extern void __VERIFIER_error() __attribute__ ((__noreturn__)); /* Generated by CIL v. 1.5.1 */ /* print_CIL_Input is false */ typedef unsigned char __u8; typedef short __s16; typedef unsigned short __u16; typedef int __s32; typedef unsigned int __u32; typedef unsigned long long __u64; typedef signed char s8; typedef unsigned char u8; typedef unsigned short u16; typedef int s32; typedef unsigned int u32; typedef long long s64; typedef unsigned long long u64; typedef long __kernel_long_t; typedef unsigned long __kernel_ulong_t; typedef int __kernel_pid_t; typedef unsigned int __kernel_uid32_t; typedef unsigned int __kernel_gid32_t; typedef __kernel_ulong_t __kernel_size_t; typedef __kernel_long_t __kernel_ssize_t; typedef long long __kernel_loff_t; typedef __kernel_long_t __kernel_time_t; typedef __kernel_long_t __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef __u16 __le16; typedef __u16 __be16; typedef __u32 __le32; typedef __u32 __be32; typedef __u32 __wsum; struct kernel_symbol { unsigned long value ; char const *name ; }; struct module; typedef __u32 __kernel_dev_t; typedef __kernel_dev_t dev_t; typedef unsigned short umode_t; typedef __kernel_pid_t pid_t; typedef __kernel_clockid_t clockid_t; typedef _Bool bool; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_loff_t loff_t; typedef __kernel_size_t size_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_time_t time_t; typedef unsigned int uint; typedef unsigned long ulong; typedef __s32 int32_t; typedef __u8 uint8_t; typedef __u16 uint16_t; typedef __u32 uint32_t; typedef __u64 uint64_t; typedef unsigned long sector_t; typedef unsigned long blkcnt_t; typedef u64 dma_addr_t; typedef unsigned int gfp_t; typedef unsigned int fmode_t; typedef unsigned int oom_flags_t; typedef u64 phys_addr_t; typedef phys_addr_t resource_size_t; struct __anonstruct_atomic_t_6 { int counter ; }; typedef struct __anonstruct_atomic_t_6 atomic_t; struct __anonstruct_atomic64_t_7 { long counter ; }; typedef struct __anonstruct_atomic64_t_7 atomic64_t; struct list_head { struct list_head *next ; struct list_head *prev ; }; struct hlist_node; struct hlist_head { struct hlist_node *first ; }; struct hlist_node { struct hlist_node *next ; struct hlist_node **pprev ; }; struct callback_head { struct callback_head *next ; void (*func)(struct callback_head * ) ; }; struct pt_regs { unsigned long r15 ; unsigned long r14 ; unsigned long r13 ; unsigned long r12 ; unsigned long bp ; unsigned long bx ; unsigned long r11 ; unsigned long r10 ; unsigned long r9 ; unsigned long r8 ; unsigned long ax ; unsigned long cx ; unsigned long dx ; unsigned long si ; unsigned long di ; unsigned long orig_ax ; unsigned long ip ; unsigned long cs ; unsigned long flags ; unsigned long sp ; unsigned long ss ; }; struct __anonstruct____missing_field_name_9 { unsigned int a ; unsigned int b ; }; struct __anonstruct____missing_field_name_10 { u16 limit0 ; u16 base0 ; unsigned char base1 ; unsigned char type : 4 ; unsigned char s : 1 ; unsigned char dpl : 2 ; unsigned char p : 1 ; unsigned char limit : 4 ; unsigned char avl : 1 ; unsigned char l : 1 ; unsigned char d : 1 ; unsigned char g : 1 ; unsigned char base2 ; }; union __anonunion____missing_field_name_8 { struct __anonstruct____missing_field_name_9 __annonCompField4 ; struct __anonstruct____missing_field_name_10 __annonCompField5 ; }; struct desc_struct { union __anonunion____missing_field_name_8 __annonCompField6 ; }; typedef unsigned long pteval_t; typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; struct __anonstruct_pte_t_11 { pteval_t pte ; }; typedef struct __anonstruct_pte_t_11 pte_t; struct pgprot { pgprotval_t pgprot ; }; typedef struct pgprot pgprot_t; struct __anonstruct_pgd_t_12 { pgdval_t pgd ; }; typedef struct __anonstruct_pgd_t_12 pgd_t; struct page; typedef struct page *pgtable_t; struct file; struct seq_file; struct thread_struct; struct mm_struct; struct task_struct; struct cpumask; struct qspinlock { atomic_t val ; }; typedef struct qspinlock arch_spinlock_t; struct qrwlock { atomic_t cnts ; arch_spinlock_t lock ; }; typedef struct qrwlock arch_rwlock_t; typedef void (*ctor_fn_t)(void); struct device; struct net_device; struct file_operations; struct completion; struct bug_entry { int bug_addr_disp ; int file_disp ; unsigned short line ; unsigned short flags ; }; struct timespec; struct compat_timespec; struct __anonstruct_futex_16 { u32 *uaddr ; u32 val ; u32 flags ; u32 bitset ; u64 time ; u32 *uaddr2 ; }; struct __anonstruct_nanosleep_17 { clockid_t clockid ; struct timespec *rmtp ; struct compat_timespec *compat_rmtp ; u64 expires ; }; struct pollfd; struct __anonstruct_poll_18 { struct pollfd *ufds ; int nfds ; int has_timeout ; unsigned long tv_sec ; unsigned long tv_nsec ; }; union __anonunion____missing_field_name_15 { struct __anonstruct_futex_16 futex ; struct __anonstruct_nanosleep_17 nanosleep ; struct __anonstruct_poll_18 poll ; }; struct restart_block { long (*fn)(struct restart_block * ) ; union __anonunion____missing_field_name_15 __annonCompField7 ; }; struct kernel_vm86_regs { struct pt_regs pt ; unsigned short es ; unsigned short __esh ; unsigned short ds ; unsigned short __dsh ; unsigned short fs ; unsigned short __fsh ; unsigned short gs ; unsigned short __gsh ; }; union __anonunion____missing_field_name_19 { struct pt_regs *regs ; struct kernel_vm86_regs *vm86 ; }; struct math_emu_info { long ___orig_eip ; union __anonunion____missing_field_name_19 __annonCompField8 ; }; struct cpumask { unsigned long bits[128U] ; }; typedef struct cpumask cpumask_t; typedef struct cpumask *cpumask_var_t; struct fregs_state { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u32 status ; }; struct __anonstruct____missing_field_name_29 { u64 rip ; u64 rdp ; }; struct __anonstruct____missing_field_name_30 { u32 fip ; u32 fcs ; u32 foo ; u32 fos ; }; union __anonunion____missing_field_name_28 { struct __anonstruct____missing_field_name_29 __annonCompField12 ; struct __anonstruct____missing_field_name_30 __annonCompField13 ; }; union __anonunion____missing_field_name_31 { u32 padding1[12U] ; u32 sw_reserved[12U] ; }; struct fxregs_state { u16 cwd ; u16 swd ; u16 twd ; u16 fop ; union __anonunion____missing_field_name_28 __annonCompField14 ; u32 mxcsr ; u32 mxcsr_mask ; u32 st_space[32U] ; u32 xmm_space[64U] ; u32 padding[12U] ; union __anonunion____missing_field_name_31 __annonCompField15 ; }; struct swregs_state { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u8 ftop ; u8 changed ; u8 lookahead ; u8 no_update ; u8 rm ; u8 alimit ; struct math_emu_info *info ; u32 entry_eip ; }; struct xstate_header { u64 xfeatures ; u64 xcomp_bv ; u64 reserved[6U] ; }; struct xregs_state { struct fxregs_state i387 ; struct xstate_header header ; u8 __reserved[464U] ; }; union fpregs_state { struct fregs_state fsave ; struct fxregs_state fxsave ; struct swregs_state soft ; struct xregs_state xsave ; }; struct fpu { union fpregs_state state ; unsigned int last_cpu ; unsigned char fpstate_active ; unsigned char fpregs_active ; unsigned char counter ; }; struct seq_operations; struct perf_event; struct thread_struct { struct desc_struct tls_array[3U] ; unsigned long sp0 ; unsigned long sp ; unsigned short es ; unsigned short ds ; unsigned short fsindex ; unsigned short gsindex ; unsigned long fs ; unsigned long gs ; struct fpu fpu ; struct perf_event *ptrace_bps[4U] ; unsigned long debugreg6 ; unsigned long ptrace_dr7 ; unsigned long cr2 ; unsigned long trap_nr ; unsigned long error_code ; unsigned long *io_bitmap_ptr ; unsigned long iopl ; unsigned int io_bitmap_max ; }; typedef atomic64_t atomic_long_t; struct lockdep_map; struct stack_trace { unsigned int nr_entries ; unsigned int max_entries ; unsigned long *entries ; int skip ; }; struct lockdep_subclass_key { char __one_byte ; }; struct lock_class_key { struct lockdep_subclass_key subkeys[8U] ; }; struct lock_class { struct list_head hash_entry ; struct list_head lock_entry ; struct lockdep_subclass_key *key ; unsigned int subclass ; unsigned int dep_gen_id ; unsigned long usage_mask ; struct stack_trace usage_traces[13U] ; struct list_head locks_after ; struct list_head locks_before ; unsigned int version ; unsigned long ops ; char const *name ; int name_version ; unsigned long contention_point[4U] ; unsigned long contending_point[4U] ; }; struct lockdep_map { struct lock_class_key *key ; struct lock_class *class_cache[2U] ; char const *name ; int cpu ; unsigned long ip ; }; struct held_lock { u64 prev_chain_key ; unsigned long acquire_ip ; struct lockdep_map *instance ; struct lockdep_map *nest_lock ; u64 waittime_stamp ; u64 holdtime_stamp ; unsigned short class_idx : 13 ; unsigned char irq_context : 2 ; unsigned char trylock : 1 ; unsigned char read : 2 ; unsigned char check : 1 ; unsigned char hardirqs_off : 1 ; unsigned short references : 12 ; unsigned int pin_count ; }; struct raw_spinlock { arch_spinlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct raw_spinlock raw_spinlock_t; struct __anonstruct____missing_field_name_35 { u8 __padding[24U] ; struct lockdep_map dep_map ; }; union __anonunion____missing_field_name_34 { struct raw_spinlock rlock ; struct __anonstruct____missing_field_name_35 __annonCompField17 ; }; struct spinlock { union __anonunion____missing_field_name_34 __annonCompField18 ; }; typedef struct spinlock spinlock_t; struct __anonstruct_rwlock_t_36 { arch_rwlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_rwlock_t_36 rwlock_t; struct seqcount { unsigned int sequence ; struct lockdep_map dep_map ; }; typedef struct seqcount seqcount_t; struct __anonstruct_seqlock_t_45 { struct seqcount seqcount ; spinlock_t lock ; }; typedef struct __anonstruct_seqlock_t_45 seqlock_t; struct timespec { __kernel_time_t tv_sec ; long tv_nsec ; }; struct user_namespace; struct __anonstruct_kuid_t_46 { uid_t val ; }; typedef struct __anonstruct_kuid_t_46 kuid_t; struct __anonstruct_kgid_t_47 { gid_t val ; }; typedef struct __anonstruct_kgid_t_47 kgid_t; struct kstat { u64 ino ; dev_t dev ; umode_t mode ; unsigned int nlink ; kuid_t uid ; kgid_t gid ; dev_t rdev ; loff_t size ; struct timespec atime ; struct timespec mtime ; struct timespec ctime ; unsigned long blksize ; unsigned long long blocks ; }; struct vm_area_struct; struct __wait_queue_head { spinlock_t lock ; struct list_head task_list ; }; typedef struct __wait_queue_head wait_queue_head_t; struct __anonstruct_nodemask_t_48 { unsigned long bits[16U] ; }; typedef struct __anonstruct_nodemask_t_48 nodemask_t; struct optimistic_spin_queue { atomic_t tail ; }; struct mutex { atomic_t count ; spinlock_t wait_lock ; struct list_head wait_list ; struct task_struct *owner ; void *magic ; struct lockdep_map dep_map ; }; struct mutex_waiter { struct list_head list ; struct task_struct *task ; void *magic ; }; struct rw_semaphore; struct rw_semaphore { long count ; struct list_head wait_list ; raw_spinlock_t wait_lock ; struct optimistic_spin_queue osq ; struct task_struct *owner ; struct lockdep_map dep_map ; }; struct completion { unsigned int done ; wait_queue_head_t wait ; }; union ktime { s64 tv64 ; }; typedef union ktime ktime_t; struct notifier_block; struct timer_list { struct hlist_node entry ; unsigned long expires ; void (*function)(unsigned long ) ; unsigned long data ; u32 flags ; int slack ; int start_pid ; void *start_site ; char start_comm[16U] ; struct lockdep_map lockdep_map ; }; struct hrtimer; enum hrtimer_restart; struct rb_node { unsigned long __rb_parent_color ; struct rb_node *rb_right ; struct rb_node *rb_left ; }; struct rb_root { struct rb_node *rb_node ; }; struct ctl_table; struct nsproxy; struct ctl_table_root; struct ctl_table_header; struct ctl_dir; typedef int proc_handler(struct ctl_table * , int , void * , size_t * , loff_t * ); struct ctl_table_poll { atomic_t event ; wait_queue_head_t wait ; }; struct ctl_table { char const *procname ; void *data ; int maxlen ; umode_t mode ; struct ctl_table *child ; proc_handler *proc_handler ; struct ctl_table_poll *poll ; void *extra1 ; void *extra2 ; }; struct ctl_node { struct rb_node node ; struct ctl_table_header *header ; }; struct __anonstruct____missing_field_name_50 { struct ctl_table *ctl_table ; int used ; int count ; int nreg ; }; union __anonunion____missing_field_name_49 { struct __anonstruct____missing_field_name_50 __annonCompField19 ; struct callback_head rcu ; }; struct ctl_table_set; struct ctl_table_header { union __anonunion____missing_field_name_49 __annonCompField20 ; struct completion *unregistering ; struct ctl_table *ctl_table_arg ; struct ctl_table_root *root ; struct ctl_table_set *set ; struct ctl_dir *parent ; struct ctl_node *node ; }; struct ctl_dir { struct ctl_table_header header ; struct rb_root root ; }; struct ctl_table_set { int (*is_seen)(struct ctl_table_set * ) ; struct ctl_dir dir ; }; struct ctl_table_root { struct ctl_table_set default_set ; struct ctl_table_set *(*lookup)(struct ctl_table_root * , struct nsproxy * ) ; int (*permissions)(struct ctl_table_header * , struct ctl_table * ) ; }; struct workqueue_struct; struct work_struct; struct work_struct { atomic_long_t data ; struct list_head entry ; void (*func)(struct work_struct * ) ; struct lockdep_map lockdep_map ; }; struct delayed_work { struct work_struct work ; struct timer_list timer ; struct workqueue_struct *wq ; int cpu ; }; struct execute_work { struct work_struct work ; }; struct notifier_block { int (*notifier_call)(struct notifier_block * , unsigned long , void * ) ; struct notifier_block *next ; int priority ; }; struct resource { resource_size_t start ; resource_size_t end ; char const *name ; unsigned long flags ; struct resource *parent ; struct resource *sibling ; struct resource *child ; }; struct pci_dev; struct pm_message { int event ; }; typedef struct pm_message pm_message_t; struct dev_pm_ops { int (*prepare)(struct device * ) ; void (*complete)(struct device * ) ; int (*suspend)(struct device * ) ; int (*resume)(struct device * ) ; int (*freeze)(struct device * ) ; int (*thaw)(struct device * ) ; int (*poweroff)(struct device * ) ; int (*restore)(struct device * ) ; int (*suspend_late)(struct device * ) ; int (*resume_early)(struct device * ) ; int (*freeze_late)(struct device * ) ; int (*thaw_early)(struct device * ) ; int (*poweroff_late)(struct device * ) ; int (*restore_early)(struct device * ) ; int (*suspend_noirq)(struct device * ) ; int (*resume_noirq)(struct device * ) ; int (*freeze_noirq)(struct device * ) ; int (*thaw_noirq)(struct device * ) ; int (*poweroff_noirq)(struct device * ) ; int (*restore_noirq)(struct device * ) ; int (*runtime_suspend)(struct device * ) ; int (*runtime_resume)(struct device * ) ; int (*runtime_idle)(struct device * ) ; }; enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; struct wakeup_source; struct wake_irq; struct pm_subsys_data { spinlock_t lock ; unsigned int refcount ; struct list_head clock_list ; }; struct dev_pm_qos; struct dev_pm_info { pm_message_t power_state ; unsigned char can_wakeup : 1 ; unsigned char async_suspend : 1 ; bool is_prepared ; bool is_suspended ; bool is_noirq_suspended ; bool is_late_suspended ; bool ignore_children ; bool early_init ; bool direct_complete ; spinlock_t lock ; struct list_head entry ; struct completion completion ; struct wakeup_source *wakeup ; bool wakeup_path ; bool syscore ; struct timer_list suspend_timer ; unsigned long timer_expires ; struct work_struct work ; wait_queue_head_t wait_queue ; struct wake_irq *wakeirq ; atomic_t usage_count ; atomic_t child_count ; unsigned char disable_depth : 3 ; unsigned char idle_notification : 1 ; unsigned char request_pending : 1 ; unsigned char deferred_resume : 1 ; unsigned char run_wake : 1 ; unsigned char runtime_auto : 1 ; unsigned char no_callbacks : 1 ; unsigned char irq_safe : 1 ; unsigned char use_autosuspend : 1 ; unsigned char timer_autosuspends : 1 ; unsigned char memalloc_noio : 1 ; enum rpm_request request ; enum rpm_status runtime_status ; int runtime_error ; int autosuspend_delay ; unsigned long last_busy ; unsigned long active_jiffies ; unsigned long suspended_jiffies ; unsigned long accounting_timestamp ; struct pm_subsys_data *subsys_data ; void (*set_latency_tolerance)(struct device * , s32 ) ; struct dev_pm_qos *qos ; }; struct dev_pm_domain { struct dev_pm_ops ops ; void (*detach)(struct device * , bool ) ; int (*activate)(struct device * ) ; void (*sync)(struct device * ) ; void (*dismiss)(struct device * ) ; }; struct pci_bus; struct __anonstruct_mm_context_t_115 { void *ldt ; int size ; unsigned short ia32_compat ; struct mutex lock ; void *vdso ; atomic_t perf_rdpmc_allowed ; }; typedef struct __anonstruct_mm_context_t_115 mm_context_t; struct bio_vec; struct llist_node; struct llist_node { struct llist_node *next ; }; struct call_single_data { struct llist_node llist ; void (*func)(void * ) ; void *info ; unsigned int flags ; }; struct cred; struct inode; struct arch_uprobe_task { unsigned long saved_scratch_register ; unsigned int saved_trap_nr ; unsigned int saved_tf ; }; enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; struct __anonstruct____missing_field_name_148 { struct arch_uprobe_task autask ; unsigned long vaddr ; }; struct __anonstruct____missing_field_name_149 { struct callback_head dup_xol_work ; unsigned long dup_xol_addr ; }; union __anonunion____missing_field_name_147 { struct __anonstruct____missing_field_name_148 __annonCompField33 ; struct __anonstruct____missing_field_name_149 __annonCompField34 ; }; struct uprobe; struct return_instance; struct uprobe_task { enum uprobe_task_state state ; union __anonunion____missing_field_name_147 __annonCompField35 ; struct uprobe *active_uprobe ; unsigned long xol_vaddr ; struct return_instance *return_instances ; unsigned int depth ; }; struct xol_area; struct uprobes_state { struct xol_area *xol_area ; }; struct address_space; struct mem_cgroup; typedef void compound_page_dtor(struct page * ); union __anonunion____missing_field_name_150 { struct address_space *mapping ; void *s_mem ; }; union __anonunion____missing_field_name_152 { unsigned long index ; void *freelist ; bool pfmemalloc ; }; struct __anonstruct____missing_field_name_156 { unsigned short inuse ; unsigned short objects : 15 ; unsigned char frozen : 1 ; }; union __anonunion____missing_field_name_155 { atomic_t _mapcount ; struct __anonstruct____missing_field_name_156 __annonCompField38 ; int units ; }; struct __anonstruct____missing_field_name_154 { union __anonunion____missing_field_name_155 __annonCompField39 ; atomic_t _count ; }; union __anonunion____missing_field_name_153 { unsigned long counters ; struct __anonstruct____missing_field_name_154 __annonCompField40 ; unsigned int active ; }; struct __anonstruct____missing_field_name_151 { union __anonunion____missing_field_name_152 __annonCompField37 ; union __anonunion____missing_field_name_153 __annonCompField41 ; }; struct __anonstruct____missing_field_name_158 { struct page *next ; int pages ; int pobjects ; }; struct slab; struct __anonstruct____missing_field_name_159 { compound_page_dtor *compound_dtor ; unsigned long compound_order ; }; union __anonunion____missing_field_name_157 { struct list_head lru ; struct __anonstruct____missing_field_name_158 __annonCompField43 ; struct slab *slab_page ; struct callback_head callback_head ; struct __anonstruct____missing_field_name_159 __annonCompField44 ; pgtable_t pmd_huge_pte ; }; struct kmem_cache; union __anonunion____missing_field_name_160 { unsigned long private ; spinlock_t *ptl ; struct kmem_cache *slab_cache ; struct page *first_page ; }; struct page { unsigned long flags ; union __anonunion____missing_field_name_150 __annonCompField36 ; struct __anonstruct____missing_field_name_151 __annonCompField42 ; union __anonunion____missing_field_name_157 __annonCompField45 ; union __anonunion____missing_field_name_160 __annonCompField46 ; struct mem_cgroup *mem_cgroup ; }; struct page_frag { struct page *page ; __u32 offset ; __u32 size ; }; struct __anonstruct_shared_161 { struct rb_node rb ; unsigned long rb_subtree_last ; }; struct anon_vma; struct vm_operations_struct; struct mempolicy; struct vm_area_struct { unsigned long vm_start ; unsigned long vm_end ; struct vm_area_struct *vm_next ; struct vm_area_struct *vm_prev ; struct rb_node vm_rb ; unsigned long rb_subtree_gap ; struct mm_struct *vm_mm ; pgprot_t vm_page_prot ; unsigned long vm_flags ; struct __anonstruct_shared_161 shared ; struct list_head anon_vma_chain ; struct anon_vma *anon_vma ; struct vm_operations_struct const *vm_ops ; unsigned long vm_pgoff ; struct file *vm_file ; void *vm_private_data ; struct mempolicy *vm_policy ; }; struct core_thread { struct task_struct *task ; struct core_thread *next ; }; struct core_state { atomic_t nr_threads ; struct core_thread dumper ; struct completion startup ; }; struct task_rss_stat { int events ; int count[3U] ; }; struct mm_rss_stat { atomic_long_t count[3U] ; }; struct kioctx_table; struct linux_binfmt; struct mmu_notifier_mm; struct mm_struct { struct vm_area_struct *mmap ; struct rb_root mm_rb ; u32 vmacache_seqnum ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; unsigned long mmap_base ; unsigned long mmap_legacy_base ; unsigned long task_size ; unsigned long highest_vm_end ; pgd_t *pgd ; atomic_t mm_users ; atomic_t mm_count ; atomic_long_t nr_ptes ; atomic_long_t nr_pmds ; int map_count ; spinlock_t page_table_lock ; struct rw_semaphore mmap_sem ; struct list_head mmlist ; unsigned long hiwater_rss ; unsigned long hiwater_vm ; unsigned long total_vm ; unsigned long locked_vm ; unsigned long pinned_vm ; unsigned long shared_vm ; unsigned long exec_vm ; unsigned long stack_vm ; unsigned long def_flags ; unsigned long start_code ; unsigned long end_code ; unsigned long start_data ; unsigned long end_data ; unsigned long start_brk ; unsigned long brk ; unsigned long start_stack ; unsigned long arg_start ; unsigned long arg_end ; unsigned long env_start ; unsigned long env_end ; unsigned long saved_auxv[46U] ; struct mm_rss_stat rss_stat ; struct linux_binfmt *binfmt ; cpumask_var_t cpu_vm_mask_var ; mm_context_t context ; unsigned long flags ; struct core_state *core_state ; spinlock_t ioctx_lock ; struct kioctx_table *ioctx_table ; struct task_struct *owner ; struct file *exe_file ; struct mmu_notifier_mm *mmu_notifier_mm ; struct cpumask cpumask_allocation ; unsigned long numa_next_scan ; unsigned long numa_scan_offset ; int numa_scan_seq ; bool tlb_flush_pending ; struct uprobes_state uprobes_state ; void *bd_addr ; }; typedef __u64 Elf64_Addr; typedef __u16 Elf64_Half; typedef __u32 Elf64_Word; typedef __u64 Elf64_Xword; struct elf64_sym { Elf64_Word st_name ; unsigned char st_info ; unsigned char st_other ; Elf64_Half st_shndx ; Elf64_Addr st_value ; Elf64_Xword st_size ; }; typedef struct elf64_sym Elf64_Sym; union __anonunion____missing_field_name_166 { unsigned long bitmap[4U] ; struct callback_head callback_head ; }; struct idr_layer { int prefix ; int layer ; struct idr_layer *ary[256U] ; int count ; union __anonunion____missing_field_name_166 __annonCompField47 ; }; struct idr { struct idr_layer *hint ; struct idr_layer *top ; int layers ; int cur ; spinlock_t lock ; int id_free_cnt ; struct idr_layer *id_free ; }; struct ida_bitmap { long nr_busy ; unsigned long bitmap[15U] ; }; struct ida { struct idr idr ; struct ida_bitmap *free_bitmap ; }; struct dentry; struct iattr; struct super_block; struct file_system_type; struct kernfs_open_node; struct kernfs_iattrs; struct kernfs_root; struct kernfs_elem_dir { unsigned long subdirs ; struct rb_root children ; struct kernfs_root *root ; }; struct kernfs_node; struct kernfs_elem_symlink { struct kernfs_node *target_kn ; }; struct kernfs_ops; struct kernfs_elem_attr { struct kernfs_ops const *ops ; struct kernfs_open_node *open ; loff_t size ; struct kernfs_node *notify_next ; }; union __anonunion____missing_field_name_171 { struct kernfs_elem_dir dir ; struct kernfs_elem_symlink symlink ; struct kernfs_elem_attr attr ; }; struct kernfs_node { atomic_t count ; atomic_t active ; struct lockdep_map dep_map ; struct kernfs_node *parent ; char const *name ; struct rb_node rb ; void const *ns ; unsigned int hash ; union __anonunion____missing_field_name_171 __annonCompField48 ; void *priv ; unsigned short flags ; umode_t mode ; unsigned int ino ; struct kernfs_iattrs *iattr ; }; struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root * , int * , char * ) ; int (*show_options)(struct seq_file * , struct kernfs_root * ) ; int (*mkdir)(struct kernfs_node * , char const * , umode_t ) ; int (*rmdir)(struct kernfs_node * ) ; int (*rename)(struct kernfs_node * , struct kernfs_node * , char const * ) ; }; struct kernfs_root { struct kernfs_node *kn ; unsigned int flags ; struct ida ino_ida ; struct kernfs_syscall_ops *syscall_ops ; struct list_head supers ; wait_queue_head_t deactivate_waitq ; }; struct kernfs_open_file { struct kernfs_node *kn ; struct file *file ; void *priv ; struct mutex mutex ; int event ; struct list_head list ; char *prealloc_buf ; size_t atomic_write_len ; bool mmapped ; struct vm_operations_struct const *vm_ops ; }; struct kernfs_ops { int (*seq_show)(struct seq_file * , void * ) ; void *(*seq_start)(struct seq_file * , loff_t * ) ; void *(*seq_next)(struct seq_file * , void * , loff_t * ) ; void (*seq_stop)(struct seq_file * , void * ) ; ssize_t (*read)(struct kernfs_open_file * , char * , size_t , loff_t ) ; size_t atomic_write_len ; bool prealloc ; ssize_t (*write)(struct kernfs_open_file * , char * , size_t , loff_t ) ; int (*mmap)(struct kernfs_open_file * , struct vm_area_struct * ) ; struct lock_class_key lockdep_key ; }; struct sock; struct kobject; enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; struct kobj_ns_type_operations { enum kobj_ns_type type ; bool (*current_may_mount)(void) ; void *(*grab_current_ns)(void) ; void const *(*netlink_ns)(struct sock * ) ; void const *(*initial_ns)(void) ; void (*drop_ns)(void * ) ; }; struct bin_attribute; struct attribute { char const *name ; umode_t mode ; bool ignore_lockdep ; struct lock_class_key *key ; struct lock_class_key skey ; }; struct attribute_group { char const *name ; umode_t (*is_visible)(struct kobject * , struct attribute * , int ) ; struct attribute **attrs ; struct bin_attribute **bin_attrs ; }; struct bin_attribute { struct attribute attr ; size_t size ; void *private ; ssize_t (*read)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; ssize_t (*write)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; int (*mmap)(struct file * , struct kobject * , struct bin_attribute * , struct vm_area_struct * ) ; }; struct sysfs_ops { ssize_t (*show)(struct kobject * , struct attribute * , char * ) ; ssize_t (*store)(struct kobject * , struct attribute * , char const * , size_t ) ; }; struct kref { atomic_t refcount ; }; enum kobject_action { KOBJ_ADD = 0, KOBJ_REMOVE = 1, KOBJ_CHANGE = 2, KOBJ_MOVE = 3, KOBJ_ONLINE = 4, KOBJ_OFFLINE = 5, KOBJ_MAX = 6 } ; struct kset; struct kobj_type; struct kobject { char const *name ; struct list_head entry ; struct kobject *parent ; struct kset *kset ; struct kobj_type *ktype ; struct kernfs_node *sd ; struct kref kref ; struct delayed_work release ; unsigned char state_initialized : 1 ; unsigned char state_in_sysfs : 1 ; unsigned char state_add_uevent_sent : 1 ; unsigned char state_remove_uevent_sent : 1 ; unsigned char uevent_suppress : 1 ; }; struct kobj_type { void (*release)(struct kobject * ) ; struct sysfs_ops const *sysfs_ops ; struct attribute **default_attrs ; struct kobj_ns_type_operations const *(*child_ns_type)(struct kobject * ) ; void const *(*namespace)(struct kobject * ) ; }; struct kobj_uevent_env { char *argv[3U] ; char *envp[32U] ; int envp_idx ; char buf[2048U] ; int buflen ; }; struct kset_uevent_ops { int (* const filter)(struct kset * , struct kobject * ) ; char const *(* const name)(struct kset * , struct kobject * ) ; int (* const uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ; }; struct kset { struct list_head list ; spinlock_t list_lock ; struct kobject kobj ; struct kset_uevent_ops const *uevent_ops ; }; struct kernel_param; struct kernel_param_ops { unsigned int flags ; int (*set)(char const * , struct kernel_param const * ) ; int (*get)(char * , struct kernel_param const * ) ; void (*free)(void * ) ; }; struct kparam_string; struct kparam_array; union __anonunion____missing_field_name_172 { void *arg ; struct kparam_string const *str ; struct kparam_array const *arr ; }; struct kernel_param { char const *name ; struct module *mod ; struct kernel_param_ops const *ops ; u16 const perm ; s8 level ; u8 flags ; union __anonunion____missing_field_name_172 __annonCompField49 ; }; struct kparam_string { unsigned int maxlen ; char *string ; }; struct kparam_array { unsigned int max ; unsigned int elemsize ; unsigned int *num ; struct kernel_param_ops const *ops ; void *elem ; }; struct latch_tree_node { struct rb_node node[2U] ; }; struct mod_arch_specific { }; struct module_param_attrs; struct module_kobject { struct kobject kobj ; struct module *mod ; struct kobject *drivers_dir ; struct module_param_attrs *mp ; struct completion *kobj_completion ; }; struct module_attribute { struct attribute attr ; ssize_t (*show)(struct module_attribute * , struct module_kobject * , char * ) ; ssize_t (*store)(struct module_attribute * , struct module_kobject * , char const * , size_t ) ; void (*setup)(struct module * , char const * ) ; int (*test)(struct module * ) ; void (*free)(struct module * ) ; }; struct exception_table_entry; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; struct mod_tree_node { struct module *mod ; struct latch_tree_node node ; }; struct module_sect_attrs; struct module_notes_attrs; struct tracepoint; struct trace_event_call; struct trace_enum_map; struct module { enum module_state state ; struct list_head list ; char name[56U] ; struct module_kobject mkobj ; struct module_attribute *modinfo_attrs ; char const *version ; char const *srcversion ; struct kobject *holders_dir ; struct kernel_symbol const *syms ; unsigned long const *crcs ; unsigned int num_syms ; struct mutex param_lock ; struct kernel_param *kp ; unsigned int num_kp ; unsigned int num_gpl_syms ; struct kernel_symbol const *gpl_syms ; unsigned long const *gpl_crcs ; struct kernel_symbol const *unused_syms ; unsigned long const *unused_crcs ; unsigned int num_unused_syms ; unsigned int num_unused_gpl_syms ; struct kernel_symbol const *unused_gpl_syms ; unsigned long const *unused_gpl_crcs ; bool sig_ok ; bool async_probe_requested ; struct kernel_symbol const *gpl_future_syms ; unsigned long const *gpl_future_crcs ; unsigned int num_gpl_future_syms ; unsigned int num_exentries ; struct exception_table_entry *extable ; int (*init)(void) ; void *module_init ; void *module_core ; unsigned int init_size ; unsigned int core_size ; unsigned int init_text_size ; unsigned int core_text_size ; struct mod_tree_node mtn_core ; struct mod_tree_node mtn_init ; unsigned int init_ro_size ; unsigned int core_ro_size ; struct mod_arch_specific arch ; unsigned int taints ; unsigned int num_bugs ; struct list_head bug_list ; struct bug_entry *bug_table ; Elf64_Sym *symtab ; Elf64_Sym *core_symtab ; unsigned int num_symtab ; unsigned int core_num_syms ; char *strtab ; char *core_strtab ; struct module_sect_attrs *sect_attrs ; struct module_notes_attrs *notes_attrs ; char *args ; void *percpu ; unsigned int percpu_size ; unsigned int num_tracepoints ; struct tracepoint * const *tracepoints_ptrs ; unsigned int num_trace_bprintk_fmt ; char const **trace_bprintk_fmt_start ; struct trace_event_call **trace_events ; unsigned int num_trace_events ; struct trace_enum_map **trace_enums ; unsigned int num_trace_enums ; unsigned int num_ftrace_callsites ; unsigned long *ftrace_callsites ; bool klp_alive ; struct list_head source_list ; struct list_head target_list ; void (*exit)(void) ; atomic_t refcnt ; ctor_fn_t (**ctors)(void) ; unsigned int num_ctors ; }; struct kernel_cap_struct { __u32 cap[2U] ; }; typedef struct kernel_cap_struct kernel_cap_t; struct plist_node { int prio ; struct list_head prio_list ; struct list_head node_list ; }; typedef unsigned long cputime_t; struct sem_undo_list; struct sysv_sem { struct sem_undo_list *undo_list ; }; struct user_struct; struct sysv_shm { struct list_head shm_clist ; }; struct __anonstruct_sigset_t_180 { unsigned long sig[1U] ; }; typedef struct __anonstruct_sigset_t_180 sigset_t; struct siginfo; typedef void __signalfn_t(int ); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; union sigval { int sival_int ; void *sival_ptr ; }; typedef union sigval sigval_t; struct __anonstruct__kill_182 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; }; struct __anonstruct__timer_183 { __kernel_timer_t _tid ; int _overrun ; char _pad[0U] ; sigval_t _sigval ; int _sys_private ; }; struct __anonstruct__rt_184 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; sigval_t _sigval ; }; struct __anonstruct__sigchld_185 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; int _status ; __kernel_clock_t _utime ; __kernel_clock_t _stime ; }; struct __anonstruct__addr_bnd_187 { void *_lower ; void *_upper ; }; struct __anonstruct__sigfault_186 { void *_addr ; short _addr_lsb ; struct __anonstruct__addr_bnd_187 _addr_bnd ; }; struct __anonstruct__sigpoll_188 { long _band ; int _fd ; }; struct __anonstruct__sigsys_189 { void *_call_addr ; int _syscall ; unsigned int _arch ; }; union __anonunion__sifields_181 { int _pad[28U] ; struct __anonstruct__kill_182 _kill ; struct __anonstruct__timer_183 _timer ; struct __anonstruct__rt_184 _rt ; struct __anonstruct__sigchld_185 _sigchld ; struct __anonstruct__sigfault_186 _sigfault ; struct __anonstruct__sigpoll_188 _sigpoll ; struct __anonstruct__sigsys_189 _sigsys ; }; struct siginfo { int si_signo ; int si_errno ; int si_code ; union __anonunion__sifields_181 _sifields ; }; typedef struct siginfo siginfo_t; struct sigpending { struct list_head list ; sigset_t signal ; }; struct sigaction { __sighandler_t sa_handler ; unsigned long sa_flags ; __sigrestore_t sa_restorer ; sigset_t sa_mask ; }; struct k_sigaction { struct sigaction sa ; }; enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; struct pid_namespace; struct upid { int nr ; struct pid_namespace *ns ; struct hlist_node pid_chain ; }; struct pid { atomic_t count ; unsigned int level ; struct hlist_head tasks[3U] ; struct callback_head rcu ; struct upid numbers[1U] ; }; struct pid_link { struct hlist_node node ; struct pid *pid ; }; struct percpu_counter { raw_spinlock_t lock ; s64 count ; struct list_head list ; s32 *counters ; }; struct seccomp_filter; struct seccomp { int mode ; struct seccomp_filter *filter ; }; struct rt_mutex_waiter; struct rlimit { __kernel_ulong_t rlim_cur ; __kernel_ulong_t rlim_max ; }; struct timerqueue_node { struct rb_node node ; ktime_t expires ; }; struct timerqueue_head { struct rb_root head ; struct timerqueue_node *next ; }; struct hrtimer_clock_base; struct hrtimer_cpu_base; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; struct hrtimer { struct timerqueue_node node ; ktime_t _softexpires ; enum hrtimer_restart (*function)(struct hrtimer * ) ; struct hrtimer_clock_base *base ; unsigned long state ; int start_pid ; void *start_site ; char start_comm[16U] ; }; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base ; int index ; clockid_t clockid ; struct timerqueue_head active ; ktime_t (*get_time)(void) ; ktime_t offset ; }; struct hrtimer_cpu_base { raw_spinlock_t lock ; seqcount_t seq ; struct hrtimer *running ; unsigned int cpu ; unsigned int active_bases ; unsigned int clock_was_set_seq ; bool migration_enabled ; bool nohz_active ; unsigned char in_hrtirq : 1 ; unsigned char hres_active : 1 ; unsigned char hang_detected : 1 ; ktime_t expires_next ; struct hrtimer *next_timer ; unsigned int nr_events ; unsigned int nr_retries ; unsigned int nr_hangs ; unsigned int max_hang_time ; struct hrtimer_clock_base clock_base[4U] ; }; struct task_io_accounting { u64 rchar ; u64 wchar ; u64 syscr ; u64 syscw ; u64 read_bytes ; u64 write_bytes ; u64 cancelled_write_bytes ; }; struct latency_record { unsigned long backtrace[12U] ; unsigned int count ; unsigned long time ; unsigned long max ; }; struct assoc_array_ptr; struct assoc_array { struct assoc_array_ptr *root ; unsigned long nr_leaves_on_tree ; }; typedef int32_t key_serial_t; typedef uint32_t key_perm_t; struct key; struct signal_struct; struct key_type; struct keyring_index_key { struct key_type *type ; char const *description ; size_t desc_len ; }; union __anonunion____missing_field_name_196 { struct list_head graveyard_link ; struct rb_node serial_node ; }; struct key_user; union __anonunion____missing_field_name_197 { time_t expiry ; time_t revoked_at ; }; struct __anonstruct____missing_field_name_199 { struct key_type *type ; char *description ; }; union __anonunion____missing_field_name_198 { struct keyring_index_key index_key ; struct __anonstruct____missing_field_name_199 __annonCompField52 ; }; union __anonunion_type_data_200 { struct list_head link ; unsigned long x[2U] ; void *p[2U] ; int reject_error ; }; union __anonunion_payload_202 { unsigned long value ; void *rcudata ; void *data ; void *data2[2U] ; }; union __anonunion____missing_field_name_201 { union __anonunion_payload_202 payload ; struct assoc_array keys ; }; struct key { atomic_t usage ; key_serial_t serial ; union __anonunion____missing_field_name_196 __annonCompField50 ; struct rw_semaphore sem ; struct key_user *user ; void *security ; union __anonunion____missing_field_name_197 __annonCompField51 ; time_t last_used_at ; kuid_t uid ; kgid_t gid ; key_perm_t perm ; unsigned short quotalen ; unsigned short datalen ; unsigned long flags ; union __anonunion____missing_field_name_198 __annonCompField53 ; union __anonunion_type_data_200 type_data ; union __anonunion____missing_field_name_201 __annonCompField54 ; }; struct audit_context; struct group_info { atomic_t usage ; int ngroups ; int nblocks ; kgid_t small_block[32U] ; kgid_t *blocks[0U] ; }; struct cred { atomic_t usage ; atomic_t subscribers ; void *put_addr ; unsigned int magic ; kuid_t uid ; kgid_t gid ; kuid_t suid ; kgid_t sgid ; kuid_t euid ; kgid_t egid ; kuid_t fsuid ; kgid_t fsgid ; unsigned int securebits ; kernel_cap_t cap_inheritable ; kernel_cap_t cap_permitted ; kernel_cap_t cap_effective ; kernel_cap_t cap_bset ; unsigned char jit_keyring ; struct key *session_keyring ; struct key *process_keyring ; struct key *thread_keyring ; struct key *request_key_auth ; void *security ; struct user_struct *user ; struct user_namespace *user_ns ; struct group_info *group_info ; struct callback_head rcu ; }; struct percpu_ref; typedef void percpu_ref_func_t(struct percpu_ref * ); struct percpu_ref { atomic_long_t count ; unsigned long percpu_count_ptr ; percpu_ref_func_t *release ; percpu_ref_func_t *confirm_switch ; bool force_atomic ; struct callback_head rcu ; }; struct cgroup; struct cgroup_root; struct cgroup_subsys; struct cgroup_taskset; struct cgroup_subsys_state { struct cgroup *cgroup ; struct cgroup_subsys *ss ; struct percpu_ref refcnt ; struct cgroup_subsys_state *parent ; struct list_head sibling ; struct list_head children ; int id ; unsigned int flags ; u64 serial_nr ; struct callback_head callback_head ; struct work_struct destroy_work ; }; struct css_set { atomic_t refcount ; struct hlist_node hlist ; struct list_head tasks ; struct list_head mg_tasks ; struct list_head cgrp_links ; struct cgroup *dfl_cgrp ; struct cgroup_subsys_state *subsys[12U] ; struct list_head mg_preload_node ; struct list_head mg_node ; struct cgroup *mg_src_cgrp ; struct css_set *mg_dst_cset ; struct list_head e_cset_node[12U] ; struct callback_head callback_head ; }; struct cgroup { struct cgroup_subsys_state self ; unsigned long flags ; int id ; int populated_cnt ; struct kernfs_node *kn ; struct kernfs_node *procs_kn ; struct kernfs_node *populated_kn ; unsigned int subtree_control ; unsigned int child_subsys_mask ; struct cgroup_subsys_state *subsys[12U] ; struct cgroup_root *root ; struct list_head cset_links ; struct list_head e_csets[12U] ; struct list_head pidlists ; struct mutex pidlist_mutex ; wait_queue_head_t offline_waitq ; struct work_struct release_agent_work ; }; struct cgroup_root { struct kernfs_root *kf_root ; unsigned int subsys_mask ; int hierarchy_id ; struct cgroup cgrp ; atomic_t nr_cgrps ; struct list_head root_list ; unsigned int flags ; struct idr cgroup_idr ; char release_agent_path[4096U] ; char name[64U] ; }; struct cftype { char name[64U] ; int private ; umode_t mode ; size_t max_write_len ; unsigned int flags ; struct cgroup_subsys *ss ; struct list_head node ; struct kernfs_ops *kf_ops ; u64 (*read_u64)(struct cgroup_subsys_state * , struct cftype * ) ; s64 (*read_s64)(struct cgroup_subsys_state * , struct cftype * ) ; int (*seq_show)(struct seq_file * , void * ) ; void *(*seq_start)(struct seq_file * , loff_t * ) ; void *(*seq_next)(struct seq_file * , void * , loff_t * ) ; void (*seq_stop)(struct seq_file * , void * ) ; int (*write_u64)(struct cgroup_subsys_state * , struct cftype * , u64 ) ; int (*write_s64)(struct cgroup_subsys_state * , struct cftype * , s64 ) ; ssize_t (*write)(struct kernfs_open_file * , char * , size_t , loff_t ) ; struct lock_class_key lockdep_key ; }; struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state * ) ; int (*css_online)(struct cgroup_subsys_state * ) ; void (*css_offline)(struct cgroup_subsys_state * ) ; void (*css_released)(struct cgroup_subsys_state * ) ; void (*css_free)(struct cgroup_subsys_state * ) ; void (*css_reset)(struct cgroup_subsys_state * ) ; void (*css_e_css_changed)(struct cgroup_subsys_state * ) ; int (*can_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*cancel_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*fork)(struct task_struct * ) ; void (*exit)(struct cgroup_subsys_state * , struct cgroup_subsys_state * , struct task_struct * ) ; void (*bind)(struct cgroup_subsys_state * ) ; int disabled ; int early_init ; bool broken_hierarchy ; bool warned_broken_hierarchy ; int id ; char const *name ; struct cgroup_root *root ; struct idr css_idr ; struct list_head cfts ; struct cftype *dfl_cftypes ; struct cftype *legacy_cftypes ; unsigned int depends_on ; }; struct futex_pi_state; struct robust_list_head; struct bio_list; struct fs_struct; struct perf_event_context; struct blk_plug; struct nameidata; struct cfs_rq; struct task_group; struct sighand_struct { atomic_t count ; struct k_sigaction action[64U] ; spinlock_t siglock ; wait_queue_head_t signalfd_wqh ; }; struct pacct_struct { int ac_flag ; long ac_exitcode ; unsigned long ac_mem ; cputime_t ac_utime ; cputime_t ac_stime ; unsigned long ac_minflt ; unsigned long ac_majflt ; }; struct cpu_itimer { cputime_t expires ; cputime_t incr ; u32 error ; u32 incr_error ; }; struct cputime { cputime_t utime ; cputime_t stime ; }; struct task_cputime { cputime_t utime ; cputime_t stime ; unsigned long long sum_exec_runtime ; }; struct task_cputime_atomic { atomic64_t utime ; atomic64_t stime ; atomic64_t sum_exec_runtime ; }; struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic ; int running ; }; struct autogroup; struct tty_struct; struct taskstats; struct tty_audit_buf; struct signal_struct { atomic_t sigcnt ; atomic_t live ; int nr_threads ; struct list_head thread_head ; wait_queue_head_t wait_chldexit ; struct task_struct *curr_target ; struct sigpending shared_pending ; int group_exit_code ; int notify_count ; struct task_struct *group_exit_task ; int group_stop_count ; unsigned int flags ; unsigned char is_child_subreaper : 1 ; unsigned char has_child_subreaper : 1 ; int posix_timer_id ; struct list_head posix_timers ; struct hrtimer real_timer ; struct pid *leader_pid ; ktime_t it_real_incr ; struct cpu_itimer it[2U] ; struct thread_group_cputimer cputimer ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct pid *tty_old_pgrp ; int leader ; struct tty_struct *tty ; struct autogroup *autogroup ; seqlock_t stats_lock ; cputime_t utime ; cputime_t stime ; cputime_t cutime ; cputime_t cstime ; cputime_t gtime ; cputime_t cgtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; unsigned long cnvcsw ; unsigned long cnivcsw ; unsigned long min_flt ; unsigned long maj_flt ; unsigned long cmin_flt ; unsigned long cmaj_flt ; unsigned long inblock ; unsigned long oublock ; unsigned long cinblock ; unsigned long coublock ; unsigned long maxrss ; unsigned long cmaxrss ; struct task_io_accounting ioac ; unsigned long long sum_sched_runtime ; struct rlimit rlim[16U] ; struct pacct_struct pacct ; struct taskstats *stats ; unsigned int audit_tty ; unsigned int audit_tty_log_passwd ; struct tty_audit_buf *tty_audit_buf ; oom_flags_t oom_flags ; short oom_score_adj ; short oom_score_adj_min ; struct mutex cred_guard_mutex ; }; struct user_struct { atomic_t __count ; atomic_t processes ; atomic_t sigpending ; atomic_t inotify_watches ; atomic_t inotify_devs ; atomic_t fanotify_listeners ; atomic_long_t epoll_watches ; unsigned long mq_bytes ; unsigned long locked_shm ; struct key *uid_keyring ; struct key *session_keyring ; struct hlist_node uidhash_node ; kuid_t uid ; atomic_long_t locked_vm ; }; struct backing_dev_info; struct reclaim_state; struct sched_info { unsigned long pcount ; unsigned long long run_delay ; unsigned long long last_arrival ; unsigned long long last_queued ; }; struct task_delay_info { spinlock_t lock ; unsigned int flags ; u64 blkio_start ; u64 blkio_delay ; u64 swapin_delay ; u32 blkio_count ; u32 swapin_count ; u64 freepages_start ; u64 freepages_delay ; u32 freepages_count ; }; struct wake_q_node { struct wake_q_node *next ; }; struct io_context; struct pipe_inode_info; struct uts_namespace; struct load_weight { unsigned long weight ; u32 inv_weight ; }; struct sched_avg { u64 last_runnable_update ; s64 decay_count ; unsigned long load_avg_contrib ; unsigned long utilization_avg_contrib ; u32 runnable_avg_sum ; u32 avg_period ; u32 running_avg_sum ; }; struct sched_statistics { u64 wait_start ; u64 wait_max ; u64 wait_count ; u64 wait_sum ; u64 iowait_count ; u64 iowait_sum ; u64 sleep_start ; u64 sleep_max ; s64 sum_sleep_runtime ; u64 block_start ; u64 block_max ; u64 exec_max ; u64 slice_max ; u64 nr_migrations_cold ; u64 nr_failed_migrations_affine ; u64 nr_failed_migrations_running ; u64 nr_failed_migrations_hot ; u64 nr_forced_migrations ; u64 nr_wakeups ; u64 nr_wakeups_sync ; u64 nr_wakeups_migrate ; u64 nr_wakeups_local ; u64 nr_wakeups_remote ; u64 nr_wakeups_affine ; u64 nr_wakeups_affine_attempts ; u64 nr_wakeups_passive ; u64 nr_wakeups_idle ; }; struct sched_entity { struct load_weight load ; struct rb_node run_node ; struct list_head group_node ; unsigned int on_rq ; u64 exec_start ; u64 sum_exec_runtime ; u64 vruntime ; u64 prev_sum_exec_runtime ; u64 nr_migrations ; struct sched_statistics statistics ; int depth ; struct sched_entity *parent ; struct cfs_rq *cfs_rq ; struct cfs_rq *my_q ; struct sched_avg avg ; }; struct rt_rq; struct sched_rt_entity { struct list_head run_list ; unsigned long timeout ; unsigned long watchdog_stamp ; unsigned int time_slice ; struct sched_rt_entity *back ; struct sched_rt_entity *parent ; struct rt_rq *rt_rq ; struct rt_rq *my_q ; }; struct sched_dl_entity { struct rb_node rb_node ; u64 dl_runtime ; u64 dl_deadline ; u64 dl_period ; u64 dl_bw ; s64 runtime ; u64 deadline ; unsigned int flags ; int dl_throttled ; int dl_new ; int dl_boosted ; int dl_yielded ; struct hrtimer dl_timer ; }; struct memcg_oom_info { struct mem_cgroup *memcg ; gfp_t gfp_mask ; int order ; unsigned char may_oom : 1 ; }; struct sched_class; struct files_struct; struct compat_robust_list_head; struct numa_group; struct ftrace_ret_stack; struct task_struct { long volatile state ; void *stack ; atomic_t usage ; unsigned int flags ; unsigned int ptrace ; struct llist_node wake_entry ; int on_cpu ; struct task_struct *last_wakee ; unsigned long wakee_flips ; unsigned long wakee_flip_decay_ts ; int wake_cpu ; int on_rq ; int prio ; int static_prio ; int normal_prio ; unsigned int rt_priority ; struct sched_class const *sched_class ; struct sched_entity se ; struct sched_rt_entity rt ; struct task_group *sched_task_group ; struct sched_dl_entity dl ; struct hlist_head preempt_notifiers ; unsigned int btrace_seq ; unsigned int policy ; int nr_cpus_allowed ; cpumask_t cpus_allowed ; unsigned long rcu_tasks_nvcsw ; bool rcu_tasks_holdout ; struct list_head rcu_tasks_holdout_list ; int rcu_tasks_idle_cpu ; struct sched_info sched_info ; struct list_head tasks ; struct plist_node pushable_tasks ; struct rb_node pushable_dl_tasks ; struct mm_struct *mm ; struct mm_struct *active_mm ; u32 vmacache_seqnum ; struct vm_area_struct *vmacache[4U] ; struct task_rss_stat rss_stat ; int exit_state ; int exit_code ; int exit_signal ; int pdeath_signal ; unsigned long jobctl ; unsigned int personality ; unsigned char in_execve : 1 ; unsigned char in_iowait : 1 ; unsigned char sched_reset_on_fork : 1 ; unsigned char sched_contributes_to_load : 1 ; unsigned char sched_migrated : 1 ; unsigned char memcg_kmem_skip_account : 1 ; unsigned char brk_randomized : 1 ; unsigned long atomic_flags ; struct restart_block restart_block ; pid_t pid ; pid_t tgid ; struct task_struct *real_parent ; struct task_struct *parent ; struct list_head children ; struct list_head sibling ; struct task_struct *group_leader ; struct list_head ptraced ; struct list_head ptrace_entry ; struct pid_link pids[3U] ; struct list_head thread_group ; struct list_head thread_node ; struct completion *vfork_done ; int *set_child_tid ; int *clear_child_tid ; cputime_t utime ; cputime_t stime ; cputime_t utimescaled ; cputime_t stimescaled ; cputime_t gtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; u64 start_time ; u64 real_start_time ; unsigned long min_flt ; unsigned long maj_flt ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct cred const *real_cred ; struct cred const *cred ; char comm[16U] ; struct nameidata *nameidata ; struct sysv_sem sysvsem ; struct sysv_shm sysvshm ; unsigned long last_switch_count ; struct thread_struct thread ; struct fs_struct *fs ; struct files_struct *files ; struct nsproxy *nsproxy ; struct signal_struct *signal ; struct sighand_struct *sighand ; sigset_t blocked ; sigset_t real_blocked ; sigset_t saved_sigmask ; struct sigpending pending ; unsigned long sas_ss_sp ; size_t sas_ss_size ; int (*notifier)(void * ) ; void *notifier_data ; sigset_t *notifier_mask ; struct callback_head *task_works ; struct audit_context *audit_context ; kuid_t loginuid ; unsigned int sessionid ; struct seccomp seccomp ; u32 parent_exec_id ; u32 self_exec_id ; spinlock_t alloc_lock ; raw_spinlock_t pi_lock ; struct wake_q_node wake_q ; struct rb_root pi_waiters ; struct rb_node *pi_waiters_leftmost ; struct rt_mutex_waiter *pi_blocked_on ; struct mutex_waiter *blocked_on ; unsigned int irq_events ; unsigned long hardirq_enable_ip ; unsigned long hardirq_disable_ip ; unsigned int hardirq_enable_event ; unsigned int hardirq_disable_event ; int hardirqs_enabled ; int hardirq_context ; unsigned long softirq_disable_ip ; unsigned long softirq_enable_ip ; unsigned int softirq_disable_event ; unsigned int softirq_enable_event ; int softirqs_enabled ; int softirq_context ; u64 curr_chain_key ; int lockdep_depth ; unsigned int lockdep_recursion ; struct held_lock held_locks[48U] ; gfp_t lockdep_reclaim_gfp ; void *journal_info ; struct bio_list *bio_list ; struct blk_plug *plug ; struct reclaim_state *reclaim_state ; struct backing_dev_info *backing_dev_info ; struct io_context *io_context ; unsigned long ptrace_message ; siginfo_t *last_siginfo ; struct task_io_accounting ioac ; u64 acct_rss_mem1 ; u64 acct_vm_mem1 ; cputime_t acct_timexpd ; nodemask_t mems_allowed ; seqcount_t mems_allowed_seq ; int cpuset_mem_spread_rotor ; int cpuset_slab_spread_rotor ; struct css_set *cgroups ; struct list_head cg_list ; struct robust_list_head *robust_list ; struct compat_robust_list_head *compat_robust_list ; struct list_head pi_state_list ; struct futex_pi_state *pi_state_cache ; struct perf_event_context *perf_event_ctxp[2U] ; struct mutex perf_event_mutex ; struct list_head perf_event_list ; struct mempolicy *mempolicy ; short il_next ; short pref_node_fork ; int numa_scan_seq ; unsigned int numa_scan_period ; unsigned int numa_scan_period_max ; int numa_preferred_nid ; unsigned long numa_migrate_retry ; u64 node_stamp ; u64 last_task_numa_placement ; u64 last_sum_exec_runtime ; struct callback_head numa_work ; struct list_head numa_entry ; struct numa_group *numa_group ; unsigned long *numa_faults ; unsigned long total_numa_faults ; unsigned long numa_faults_locality[3U] ; unsigned long numa_pages_migrated ; struct callback_head rcu ; struct pipe_inode_info *splice_pipe ; struct page_frag task_frag ; struct task_delay_info *delays ; int make_it_fail ; int nr_dirtied ; int nr_dirtied_pause ; unsigned long dirty_paused_when ; int latency_record_count ; struct latency_record latency_record[32U] ; unsigned long timer_slack_ns ; unsigned long default_timer_slack_ns ; unsigned int kasan_depth ; int curr_ret_stack ; struct ftrace_ret_stack *ret_stack ; unsigned long long ftrace_timestamp ; atomic_t trace_overrun ; atomic_t tracing_graph_pause ; unsigned long trace ; unsigned long trace_recursion ; struct memcg_oom_info memcg_oom ; struct uprobe_task *utask ; unsigned int sequential_io ; unsigned int sequential_io_avg ; unsigned long task_state_change ; int pagefault_disabled ; }; struct scsi_qla_host; struct fc_port; struct device_attribute; struct fc_rport; struct scsi_target; struct Scsi_Host; struct fc_vport; struct qla_hw_data; struct scsi_device; struct fc_bsg_job; struct scsi_cmnd; typedef unsigned long kernel_ulong_t; struct pci_device_id { __u32 vendor ; __u32 device ; __u32 subvendor ; __u32 subdevice ; __u32 class ; __u32 class_mask ; kernel_ulong_t driver_data ; }; struct acpi_device_id { __u8 id[9U] ; kernel_ulong_t driver_data ; }; struct of_device_id { char name[32U] ; char type[32U] ; char compatible[128U] ; void const *data ; }; struct klist_node; struct klist { spinlock_t k_lock ; struct list_head k_list ; void (*get)(struct klist_node * ) ; void (*put)(struct klist_node * ) ; }; struct klist_node { void *n_klist ; struct list_head n_node ; struct kref n_ref ; }; struct path; struct seq_file { char *buf ; size_t size ; size_t from ; size_t count ; size_t pad_until ; loff_t index ; loff_t read_pos ; u64 version ; struct mutex lock ; struct seq_operations const *op ; int poll_event ; struct user_namespace *user_ns ; void *private ; }; struct seq_operations { void *(*start)(struct seq_file * , loff_t * ) ; void (*stop)(struct seq_file * , void * ) ; void *(*next)(struct seq_file * , void * , loff_t * ) ; int (*show)(struct seq_file * , void * ) ; }; struct pinctrl; struct pinctrl_state; struct dev_pin_info { struct pinctrl *p ; struct pinctrl_state *default_state ; struct pinctrl_state *sleep_state ; struct pinctrl_state *idle_state ; }; struct dma_map_ops; struct dev_archdata { struct dma_map_ops *dma_ops ; void *iommu ; }; struct device_private; struct device_driver; struct driver_private; struct class; struct subsys_private; struct bus_type; struct device_node; struct fwnode_handle; struct iommu_ops; struct iommu_group; struct bus_type { char const *name ; char const *dev_name ; struct device *dev_root ; struct device_attribute *dev_attrs ; struct attribute_group const **bus_groups ; struct attribute_group const **dev_groups ; struct attribute_group const **drv_groups ; int (*match)(struct device * , struct device_driver * ) ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*online)(struct device * ) ; int (*offline)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct dev_pm_ops const *pm ; struct iommu_ops const *iommu_ops ; struct subsys_private *p ; struct lock_class_key lock_key ; }; struct device_type; enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; struct device_driver { char const *name ; struct bus_type *bus ; struct module *owner ; char const *mod_name ; bool suppress_bind_attrs ; enum probe_type probe_type ; struct of_device_id const *of_match_table ; struct acpi_device_id const *acpi_match_table ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct attribute_group const **groups ; struct dev_pm_ops const *pm ; struct driver_private *p ; }; struct class_attribute; struct class { char const *name ; struct module *owner ; struct class_attribute *class_attrs ; struct attribute_group const **dev_groups ; struct kobject *dev_kobj ; int (*dev_uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * ) ; void (*class_release)(struct class * ) ; void (*dev_release)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct kobj_ns_type_operations const *ns_type ; void const *(*namespace)(struct device * ) ; struct dev_pm_ops const *pm ; struct subsys_private *p ; }; struct class_attribute { struct attribute attr ; ssize_t (*show)(struct class * , struct class_attribute * , char * ) ; ssize_t (*store)(struct class * , struct class_attribute * , char const * , size_t ) ; }; struct device_type { char const *name ; struct attribute_group const **groups ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * , kuid_t * , kgid_t * ) ; void (*release)(struct device * ) ; struct dev_pm_ops const *pm ; }; struct device_attribute { struct attribute attr ; ssize_t (*show)(struct device * , struct device_attribute * , char * ) ; ssize_t (*store)(struct device * , struct device_attribute * , char const * , size_t ) ; }; struct device_dma_parameters { unsigned int max_segment_size ; unsigned long segment_boundary_mask ; }; struct dma_coherent_mem; struct cma; struct device { struct device *parent ; struct device_private *p ; struct kobject kobj ; char const *init_name ; struct device_type const *type ; struct mutex mutex ; struct bus_type *bus ; struct device_driver *driver ; void *platform_data ; void *driver_data ; struct dev_pm_info power ; struct dev_pm_domain *pm_domain ; struct dev_pin_info *pins ; int numa_node ; u64 *dma_mask ; u64 coherent_dma_mask ; unsigned long dma_pfn_offset ; struct device_dma_parameters *dma_parms ; struct list_head dma_pools ; struct dma_coherent_mem *dma_mem ; struct cma *cma_area ; struct dev_archdata archdata ; struct device_node *of_node ; struct fwnode_handle *fwnode ; dev_t devt ; u32 id ; spinlock_t devres_lock ; struct list_head devres_head ; struct klist_node knode_class ; struct class *class ; struct attribute_group const **groups ; void (*release)(struct device * ) ; struct iommu_group *iommu_group ; bool offline_disabled ; bool offline ; }; struct wakeup_source { char const *name ; struct list_head entry ; spinlock_t lock ; struct wake_irq *wakeirq ; struct timer_list timer ; unsigned long timer_expires ; ktime_t total_time ; ktime_t max_time ; ktime_t last_time ; ktime_t start_prevent_time ; ktime_t prevent_sleep_time ; unsigned long event_count ; unsigned long active_count ; unsigned long relax_count ; unsigned long expire_count ; unsigned long wakeup_count ; bool active ; bool autosleep_enabled ; }; struct hotplug_slot; struct pci_slot { struct pci_bus *bus ; struct list_head list ; struct hotplug_slot *hotplug ; unsigned char number ; struct kobject kobj ; }; typedef int pci_power_t; typedef unsigned int pci_channel_state_t; enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ; typedef unsigned short pci_dev_flags_t; typedef unsigned short pci_bus_flags_t; struct pcie_link_state; struct pci_vpd; struct pci_sriov; struct pci_ats; struct proc_dir_entry; struct pci_driver; union __anonunion____missing_field_name_220 { struct pci_sriov *sriov ; struct pci_dev *physfn ; }; struct pci_dev { struct list_head bus_list ; struct pci_bus *bus ; struct pci_bus *subordinate ; void *sysdata ; struct proc_dir_entry *procent ; struct pci_slot *slot ; unsigned int devfn ; unsigned short vendor ; unsigned short device ; unsigned short subsystem_vendor ; unsigned short subsystem_device ; unsigned int class ; u8 revision ; u8 hdr_type ; u8 pcie_cap ; u8 msi_cap ; u8 msix_cap ; unsigned char pcie_mpss : 3 ; u8 rom_base_reg ; u8 pin ; u16 pcie_flags_reg ; u8 dma_alias_devfn ; struct pci_driver *driver ; u64 dma_mask ; struct device_dma_parameters dma_parms ; pci_power_t current_state ; u8 pm_cap ; unsigned char pme_support : 5 ; unsigned char pme_interrupt : 1 ; unsigned char pme_poll : 1 ; unsigned char d1_support : 1 ; unsigned char d2_support : 1 ; unsigned char no_d1d2 : 1 ; unsigned char no_d3cold : 1 ; unsigned char d3cold_allowed : 1 ; unsigned char mmio_always_on : 1 ; unsigned char wakeup_prepared : 1 ; unsigned char runtime_d3cold : 1 ; unsigned char ignore_hotplug : 1 ; unsigned int d3_delay ; unsigned int d3cold_delay ; struct pcie_link_state *link_state ; pci_channel_state_t error_state ; struct device dev ; int cfg_size ; unsigned int irq ; struct resource resource[17U] ; bool match_driver ; unsigned char transparent : 1 ; unsigned char multifunction : 1 ; unsigned char is_added : 1 ; unsigned char is_busmaster : 1 ; unsigned char no_msi : 1 ; unsigned char no_64bit_msi : 1 ; unsigned char block_cfg_access : 1 ; unsigned char broken_parity_status : 1 ; unsigned char irq_reroute_variant : 2 ; unsigned char msi_enabled : 1 ; unsigned char msix_enabled : 1 ; unsigned char ari_enabled : 1 ; unsigned char is_managed : 1 ; unsigned char needs_freset : 1 ; unsigned char state_saved : 1 ; unsigned char is_physfn : 1 ; unsigned char is_virtfn : 1 ; unsigned char reset_fn : 1 ; unsigned char is_hotplug_bridge : 1 ; unsigned char __aer_firmware_first_valid : 1 ; unsigned char __aer_firmware_first : 1 ; unsigned char broken_intx_masking : 1 ; unsigned char io_window_1k : 1 ; unsigned char irq_managed : 1 ; unsigned char has_secondary_link : 1 ; pci_dev_flags_t dev_flags ; atomic_t enable_cnt ; u32 saved_config_space[16U] ; struct hlist_head saved_cap_space ; struct bin_attribute *rom_attr ; int rom_attr_enabled ; struct bin_attribute *res_attr[17U] ; struct bin_attribute *res_attr_wc[17U] ; struct list_head msi_list ; struct attribute_group const **msi_irq_groups ; struct pci_vpd *vpd ; union __anonunion____missing_field_name_220 __annonCompField58 ; struct pci_ats *ats ; phys_addr_t rom ; size_t romlen ; char *driver_override ; }; struct pci_ops; struct msi_controller; struct pci_bus { struct list_head node ; struct pci_bus *parent ; struct list_head children ; struct list_head devices ; struct pci_dev *self ; struct list_head slots ; struct resource *resource[4U] ; struct list_head resources ; struct resource busn_res ; struct pci_ops *ops ; struct msi_controller *msi ; void *sysdata ; struct proc_dir_entry *procdir ; unsigned char number ; unsigned char primary ; unsigned char max_bus_speed ; unsigned char cur_bus_speed ; char name[48U] ; unsigned short bridge_ctl ; pci_bus_flags_t bus_flags ; struct device *bridge ; struct device dev ; struct bin_attribute *legacy_io ; struct bin_attribute *legacy_mem ; unsigned char is_added : 1 ; }; struct pci_ops { void *(*map_bus)(struct pci_bus * , unsigned int , int ) ; int (*read)(struct pci_bus * , unsigned int , int , int , u32 * ) ; int (*write)(struct pci_bus * , unsigned int , int , int , u32 ) ; }; struct pci_dynids { spinlock_t lock ; struct list_head list ; }; typedef unsigned int pci_ers_result_t; struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev * , enum pci_channel_state ) ; pci_ers_result_t (*mmio_enabled)(struct pci_dev * ) ; pci_ers_result_t (*link_reset)(struct pci_dev * ) ; pci_ers_result_t (*slot_reset)(struct pci_dev * ) ; void (*reset_notify)(struct pci_dev * , bool ) ; void (*resume)(struct pci_dev * ) ; }; struct pci_driver { struct list_head node ; char const *name ; struct pci_device_id const *id_table ; int (*probe)(struct pci_dev * , struct pci_device_id const * ) ; void (*remove)(struct pci_dev * ) ; int (*suspend)(struct pci_dev * , pm_message_t ) ; int (*suspend_late)(struct pci_dev * , pm_message_t ) ; int (*resume_early)(struct pci_dev * ) ; int (*resume)(struct pci_dev * ) ; void (*shutdown)(struct pci_dev * ) ; int (*sriov_configure)(struct pci_dev * , int ) ; struct pci_error_handlers const *err_handler ; struct device_driver driver ; struct pci_dynids dynids ; }; struct shrink_control { gfp_t gfp_mask ; unsigned long nr_to_scan ; int nid ; struct mem_cgroup *memcg ; }; struct shrinker { unsigned long (*count_objects)(struct shrinker * , struct shrink_control * ) ; unsigned long (*scan_objects)(struct shrinker * , struct shrink_control * ) ; int seeks ; long batch ; unsigned long flags ; struct list_head list ; atomic_long_t *nr_deferred ; }; struct file_ra_state; struct writeback_control; struct bdi_writeback; struct vm_fault { unsigned int flags ; unsigned long pgoff ; void *virtual_address ; struct page *cow_page ; struct page *page ; unsigned long max_pgoff ; pte_t *pte ; }; struct vm_operations_struct { void (*open)(struct vm_area_struct * ) ; void (*close)(struct vm_area_struct * ) ; int (*fault)(struct vm_area_struct * , struct vm_fault * ) ; void (*map_pages)(struct vm_area_struct * , struct vm_fault * ) ; int (*page_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*pfn_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*access)(struct vm_area_struct * , unsigned long , void * , int , int ) ; char const *(*name)(struct vm_area_struct * ) ; int (*set_policy)(struct vm_area_struct * , struct mempolicy * ) ; struct mempolicy *(*get_policy)(struct vm_area_struct * , unsigned long ) ; struct page *(*find_special_page)(struct vm_area_struct * , unsigned long ) ; }; struct kvec; struct scatterlist { unsigned long sg_magic ; unsigned long page_link ; unsigned int offset ; unsigned int length ; dma_addr_t dma_address ; unsigned int dma_length ; }; struct sg_table { struct scatterlist *sgl ; unsigned int nents ; unsigned int orig_nents ; }; struct dma_pool; struct acpi_device; struct pci_sysdata { int domain ; int node ; struct acpi_device *companion ; void *iommu ; }; struct dma_attrs { unsigned long flags[1U] ; }; enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; struct dma_map_ops { void *(*alloc)(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ) ; void (*free)(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ) ; int (*mmap)(struct device * , struct vm_area_struct * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; int (*get_sgtable)(struct device * , struct sg_table * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; dma_addr_t (*map_page)(struct device * , struct page * , unsigned long , size_t , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_page)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ) ; int (*map_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*sync_single_for_cpu)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_single_for_device)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_sg_for_cpu)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; void (*sync_sg_for_device)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; int (*mapping_error)(struct device * , dma_addr_t ) ; int (*dma_supported)(struct device * , u64 ) ; int (*set_dma_mask)(struct device * , u64 ) ; int is_phys ; }; typedef void *mempool_alloc_t(gfp_t , void * ); typedef void mempool_free_t(void * , void * ); struct mempool_s { spinlock_t lock ; int min_nr ; int curr_nr ; void **elements ; void *pool_data ; mempool_alloc_t *alloc ; mempool_free_t *free ; wait_queue_head_t wait ; }; typedef struct mempool_s mempool_t; enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; typedef enum irqreturn irqreturn_t; struct exception_table_entry { int insn ; int fixup ; }; struct tasklet_struct { struct tasklet_struct *next ; unsigned long state ; atomic_t count ; void (*func)(unsigned long ) ; unsigned long data ; }; struct firmware { size_t size ; u8 const *data ; struct page **pages ; void *priv ; }; struct scsi_lun { __u8 scsi_lun[8U] ; }; struct scsi_sense_hdr { u8 response_code ; u8 sense_key ; u8 asc ; u8 ascq ; u8 byte4 ; u8 byte5 ; u8 byte6 ; u8 additional_length ; }; struct hlist_bl_node; struct hlist_bl_head { struct hlist_bl_node *first ; }; struct hlist_bl_node { struct hlist_bl_node *next ; struct hlist_bl_node **pprev ; }; struct __anonstruct____missing_field_name_232 { spinlock_t lock ; int count ; }; union __anonunion____missing_field_name_231 { struct __anonstruct____missing_field_name_232 __annonCompField66 ; }; struct lockref { union __anonunion____missing_field_name_231 __annonCompField67 ; }; struct vfsmount; struct __anonstruct____missing_field_name_234 { u32 hash ; u32 len ; }; union __anonunion____missing_field_name_233 { struct __anonstruct____missing_field_name_234 __annonCompField68 ; u64 hash_len ; }; struct qstr { union __anonunion____missing_field_name_233 __annonCompField69 ; unsigned char const *name ; }; struct dentry_operations; union __anonunion_d_u_235 { struct hlist_node d_alias ; struct callback_head d_rcu ; }; struct dentry { unsigned int d_flags ; seqcount_t d_seq ; struct hlist_bl_node d_hash ; struct dentry *d_parent ; struct qstr d_name ; struct inode *d_inode ; unsigned char d_iname[32U] ; struct lockref d_lockref ; struct dentry_operations const *d_op ; struct super_block *d_sb ; unsigned long d_time ; void *d_fsdata ; struct list_head d_lru ; struct list_head d_child ; struct list_head d_subdirs ; union __anonunion_d_u_235 d_u ; }; struct dentry_operations { int (*d_revalidate)(struct dentry * , unsigned int ) ; int (*d_weak_revalidate)(struct dentry * , unsigned int ) ; int (*d_hash)(struct dentry const * , struct qstr * ) ; int (*d_compare)(struct dentry const * , struct dentry const * , unsigned int , char const * , struct qstr const * ) ; int (*d_delete)(struct dentry const * ) ; void (*d_release)(struct dentry * ) ; void (*d_prune)(struct dentry * ) ; void (*d_iput)(struct dentry * , struct inode * ) ; char *(*d_dname)(struct dentry * , char * , int ) ; struct vfsmount *(*d_automount)(struct path * ) ; int (*d_manage)(struct dentry * , bool ) ; struct inode *(*d_select_inode)(struct dentry * , unsigned int ) ; }; struct path { struct vfsmount *mnt ; struct dentry *dentry ; }; struct list_lru_one { struct list_head list ; long nr_items ; }; struct list_lru_memcg { struct list_lru_one *lru[0U] ; }; struct list_lru_node { spinlock_t lock ; struct list_lru_one lru ; struct list_lru_memcg *memcg_lrus ; }; struct list_lru { struct list_lru_node *node ; struct list_head list ; }; struct __anonstruct____missing_field_name_239 { struct radix_tree_node *parent ; void *private_data ; }; union __anonunion____missing_field_name_238 { struct __anonstruct____missing_field_name_239 __annonCompField70 ; struct callback_head callback_head ; }; struct radix_tree_node { unsigned int path ; unsigned int count ; union __anonunion____missing_field_name_238 __annonCompField71 ; struct list_head private_list ; void *slots[64U] ; unsigned long tags[3U][1U] ; }; struct radix_tree_root { unsigned int height ; gfp_t gfp_mask ; struct radix_tree_node *rnode ; }; struct semaphore { raw_spinlock_t lock ; unsigned int count ; struct list_head wait_list ; }; struct fiemap_extent { __u64 fe_logical ; __u64 fe_physical ; __u64 fe_length ; __u64 fe_reserved64[2U] ; __u32 fe_flags ; __u32 fe_reserved[3U] ; }; enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; struct bio_set; struct bio; struct bio_integrity_payload; struct block_device; typedef void bio_end_io_t(struct bio * , int ); struct bio_vec { struct page *bv_page ; unsigned int bv_len ; unsigned int bv_offset ; }; struct bvec_iter { sector_t bi_sector ; unsigned int bi_size ; unsigned int bi_idx ; unsigned int bi_bvec_done ; }; union __anonunion____missing_field_name_242 { struct bio_integrity_payload *bi_integrity ; }; struct bio { struct bio *bi_next ; struct block_device *bi_bdev ; unsigned long bi_flags ; unsigned long bi_rw ; struct bvec_iter bi_iter ; unsigned int bi_phys_segments ; unsigned int bi_seg_front_size ; unsigned int bi_seg_back_size ; atomic_t __bi_remaining ; bio_end_io_t *bi_end_io ; void *bi_private ; struct io_context *bi_ioc ; struct cgroup_subsys_state *bi_css ; union __anonunion____missing_field_name_242 __annonCompField72 ; unsigned short bi_vcnt ; unsigned short bi_max_vecs ; atomic_t __bi_cnt ; struct bio_vec *bi_io_vec ; struct bio_set *bi_pool ; struct bio_vec bi_inline_vecs[0U] ; }; struct export_operations; struct hd_geometry; struct iovec; struct kiocb; struct poll_table_struct; struct kstatfs; struct swap_info_struct; struct iov_iter; struct iattr { unsigned int ia_valid ; umode_t ia_mode ; kuid_t ia_uid ; kgid_t ia_gid ; loff_t ia_size ; struct timespec ia_atime ; struct timespec ia_mtime ; struct timespec ia_ctime ; struct file *ia_file ; }; struct dquot; typedef __kernel_uid32_t projid_t; struct __anonstruct_kprojid_t_243 { projid_t val ; }; typedef struct __anonstruct_kprojid_t_243 kprojid_t; enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; typedef long long qsize_t; union __anonunion____missing_field_name_244 { kuid_t uid ; kgid_t gid ; kprojid_t projid ; }; struct kqid { union __anonunion____missing_field_name_244 __annonCompField73 ; enum quota_type type ; }; struct mem_dqblk { qsize_t dqb_bhardlimit ; qsize_t dqb_bsoftlimit ; qsize_t dqb_curspace ; qsize_t dqb_rsvspace ; qsize_t dqb_ihardlimit ; qsize_t dqb_isoftlimit ; qsize_t dqb_curinodes ; time_t dqb_btime ; time_t dqb_itime ; }; struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format ; int dqi_fmt_id ; struct list_head dqi_dirty_list ; unsigned long dqi_flags ; unsigned int dqi_bgrace ; unsigned int dqi_igrace ; qsize_t dqi_max_spc_limit ; qsize_t dqi_max_ino_limit ; void *dqi_priv ; }; struct dquot { struct hlist_node dq_hash ; struct list_head dq_inuse ; struct list_head dq_free ; struct list_head dq_dirty ; struct mutex dq_lock ; atomic_t dq_count ; wait_queue_head_t dq_wait_unused ; struct super_block *dq_sb ; struct kqid dq_id ; loff_t dq_off ; unsigned long dq_flags ; struct mem_dqblk dq_dqb ; }; struct quota_format_ops { int (*check_quota_file)(struct super_block * , int ) ; int (*read_file_info)(struct super_block * , int ) ; int (*write_file_info)(struct super_block * , int ) ; int (*free_file_info)(struct super_block * , int ) ; int (*read_dqblk)(struct dquot * ) ; int (*commit_dqblk)(struct dquot * ) ; int (*release_dqblk)(struct dquot * ) ; }; struct dquot_operations { int (*write_dquot)(struct dquot * ) ; struct dquot *(*alloc_dquot)(struct super_block * , int ) ; void (*destroy_dquot)(struct dquot * ) ; int (*acquire_dquot)(struct dquot * ) ; int (*release_dquot)(struct dquot * ) ; int (*mark_dirty)(struct dquot * ) ; int (*write_info)(struct super_block * , int ) ; qsize_t *(*get_reserved_space)(struct inode * ) ; int (*get_projid)(struct inode * , kprojid_t * ) ; }; struct qc_dqblk { int d_fieldmask ; u64 d_spc_hardlimit ; u64 d_spc_softlimit ; u64 d_ino_hardlimit ; u64 d_ino_softlimit ; u64 d_space ; u64 d_ino_count ; s64 d_ino_timer ; s64 d_spc_timer ; int d_ino_warns ; int d_spc_warns ; u64 d_rt_spc_hardlimit ; u64 d_rt_spc_softlimit ; u64 d_rt_space ; s64 d_rt_spc_timer ; int d_rt_spc_warns ; }; struct qc_type_state { unsigned int flags ; unsigned int spc_timelimit ; unsigned int ino_timelimit ; unsigned int rt_spc_timelimit ; unsigned int spc_warnlimit ; unsigned int ino_warnlimit ; unsigned int rt_spc_warnlimit ; unsigned long long ino ; blkcnt_t blocks ; blkcnt_t nextents ; }; struct qc_state { unsigned int s_incoredqs ; struct qc_type_state s_state[3U] ; }; struct qc_info { int i_fieldmask ; unsigned int i_flags ; unsigned int i_spc_timelimit ; unsigned int i_ino_timelimit ; unsigned int i_rt_spc_timelimit ; unsigned int i_spc_warnlimit ; unsigned int i_ino_warnlimit ; unsigned int i_rt_spc_warnlimit ; }; struct quotactl_ops { int (*quota_on)(struct super_block * , int , int , struct path * ) ; int (*quota_off)(struct super_block * , int ) ; int (*quota_enable)(struct super_block * , unsigned int ) ; int (*quota_disable)(struct super_block * , unsigned int ) ; int (*quota_sync)(struct super_block * , int ) ; int (*set_info)(struct super_block * , int , struct qc_info * ) ; int (*get_dqblk)(struct super_block * , struct kqid , struct qc_dqblk * ) ; int (*set_dqblk)(struct super_block * , struct kqid , struct qc_dqblk * ) ; int (*get_state)(struct super_block * , struct qc_state * ) ; int (*rm_xquota)(struct super_block * , unsigned int ) ; }; struct quota_format_type { int qf_fmt_id ; struct quota_format_ops const *qf_ops ; struct module *qf_owner ; struct quota_format_type *qf_next ; }; struct quota_info { unsigned int flags ; struct mutex dqio_mutex ; struct mutex dqonoff_mutex ; struct inode *files[3U] ; struct mem_dqinfo info[3U] ; struct quota_format_ops const *ops[3U] ; }; struct kiocb { struct file *ki_filp ; loff_t ki_pos ; void (*ki_complete)(struct kiocb * , long , long ) ; void *private ; int ki_flags ; }; struct address_space_operations { int (*writepage)(struct page * , struct writeback_control * ) ; int (*readpage)(struct file * , struct page * ) ; int (*writepages)(struct address_space * , struct writeback_control * ) ; int (*set_page_dirty)(struct page * ) ; int (*readpages)(struct file * , struct address_space * , struct list_head * , unsigned int ) ; int (*write_begin)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page ** , void ** ) ; int (*write_end)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page * , void * ) ; sector_t (*bmap)(struct address_space * , sector_t ) ; void (*invalidatepage)(struct page * , unsigned int , unsigned int ) ; int (*releasepage)(struct page * , gfp_t ) ; void (*freepage)(struct page * ) ; ssize_t (*direct_IO)(struct kiocb * , struct iov_iter * , loff_t ) ; int (*migratepage)(struct address_space * , struct page * , struct page * , enum migrate_mode ) ; int (*launder_page)(struct page * ) ; int (*is_partially_uptodate)(struct page * , unsigned long , unsigned long ) ; void (*is_dirty_writeback)(struct page * , bool * , bool * ) ; int (*error_remove_page)(struct address_space * , struct page * ) ; int (*swap_activate)(struct swap_info_struct * , struct file * , sector_t * ) ; void (*swap_deactivate)(struct file * ) ; }; struct address_space { struct inode *host ; struct radix_tree_root page_tree ; spinlock_t tree_lock ; atomic_t i_mmap_writable ; struct rb_root i_mmap ; struct rw_semaphore i_mmap_rwsem ; unsigned long nrpages ; unsigned long nrshadows ; unsigned long writeback_index ; struct address_space_operations const *a_ops ; unsigned long flags ; spinlock_t private_lock ; struct list_head private_list ; void *private_data ; }; struct request_queue; struct hd_struct; struct gendisk; struct block_device { dev_t bd_dev ; int bd_openers ; struct inode *bd_inode ; struct super_block *bd_super ; struct mutex bd_mutex ; struct list_head bd_inodes ; void *bd_claiming ; void *bd_holder ; int bd_holders ; bool bd_write_holder ; struct list_head bd_holder_disks ; struct block_device *bd_contains ; unsigned int bd_block_size ; struct hd_struct *bd_part ; unsigned int bd_part_count ; int bd_invalidated ; struct gendisk *bd_disk ; struct request_queue *bd_queue ; struct list_head bd_list ; unsigned long bd_private ; int bd_fsfreeze_count ; struct mutex bd_fsfreeze_mutex ; }; struct posix_acl; struct inode_operations; union __anonunion____missing_field_name_247 { unsigned int const i_nlink ; unsigned int __i_nlink ; }; union __anonunion____missing_field_name_248 { struct hlist_head i_dentry ; struct callback_head i_rcu ; }; struct file_lock_context; struct cdev; union __anonunion____missing_field_name_249 { struct pipe_inode_info *i_pipe ; struct block_device *i_bdev ; struct cdev *i_cdev ; char *i_link ; }; struct inode { umode_t i_mode ; unsigned short i_opflags ; kuid_t i_uid ; kgid_t i_gid ; unsigned int i_flags ; struct posix_acl *i_acl ; struct posix_acl *i_default_acl ; struct inode_operations const *i_op ; struct super_block *i_sb ; struct address_space *i_mapping ; void *i_security ; unsigned long i_ino ; union __anonunion____missing_field_name_247 __annonCompField74 ; dev_t i_rdev ; loff_t i_size ; struct timespec i_atime ; struct timespec i_mtime ; struct timespec i_ctime ; spinlock_t i_lock ; unsigned short i_bytes ; unsigned int i_blkbits ; blkcnt_t i_blocks ; unsigned long i_state ; struct mutex i_mutex ; unsigned long dirtied_when ; unsigned long dirtied_time_when ; struct hlist_node i_hash ; struct list_head i_wb_list ; struct bdi_writeback *i_wb ; int i_wb_frn_winner ; u16 i_wb_frn_avg_time ; u16 i_wb_frn_history ; struct list_head i_lru ; struct list_head i_sb_list ; union __anonunion____missing_field_name_248 __annonCompField75 ; u64 i_version ; atomic_t i_count ; atomic_t i_dio_count ; atomic_t i_writecount ; atomic_t i_readcount ; struct file_operations const *i_fop ; struct file_lock_context *i_flctx ; struct address_space i_data ; struct list_head i_devices ; union __anonunion____missing_field_name_249 __annonCompField76 ; __u32 i_generation ; __u32 i_fsnotify_mask ; struct hlist_head i_fsnotify_marks ; void *i_private ; }; struct fown_struct { rwlock_t lock ; struct pid *pid ; enum pid_type pid_type ; kuid_t uid ; kuid_t euid ; int signum ; }; struct file_ra_state { unsigned long start ; unsigned int size ; unsigned int async_size ; unsigned int ra_pages ; unsigned int mmap_miss ; loff_t prev_pos ; }; union __anonunion_f_u_250 { struct llist_node fu_llist ; struct callback_head fu_rcuhead ; }; struct file { union __anonunion_f_u_250 f_u ; struct path f_path ; struct inode *f_inode ; struct file_operations const *f_op ; spinlock_t f_lock ; atomic_long_t f_count ; unsigned int f_flags ; fmode_t f_mode ; struct mutex f_pos_lock ; loff_t f_pos ; struct fown_struct f_owner ; struct cred const *f_cred ; struct file_ra_state f_ra ; u64 f_version ; void *f_security ; void *private_data ; struct list_head f_ep_links ; struct list_head f_tfile_llink ; struct address_space *f_mapping ; }; typedef void *fl_owner_t; struct file_lock; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock * , struct file_lock * ) ; void (*fl_release_private)(struct file_lock * ) ; }; struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock * , struct file_lock * ) ; unsigned long (*lm_owner_key)(struct file_lock * ) ; fl_owner_t (*lm_get_owner)(fl_owner_t ) ; void (*lm_put_owner)(fl_owner_t ) ; void (*lm_notify)(struct file_lock * ) ; int (*lm_grant)(struct file_lock * , int ) ; bool (*lm_break)(struct file_lock * ) ; int (*lm_change)(struct file_lock * , int , struct list_head * ) ; void (*lm_setup)(struct file_lock * , void ** ) ; }; struct net; struct nlm_lockowner; struct nfs_lock_info { u32 state ; struct nlm_lockowner *owner ; struct list_head list ; }; struct nfs4_lock_state; struct nfs4_lock_info { struct nfs4_lock_state *owner ; }; struct fasync_struct; struct __anonstruct_afs_252 { struct list_head link ; int state ; }; union __anonunion_fl_u_251 { struct nfs_lock_info nfs_fl ; struct nfs4_lock_info nfs4_fl ; struct __anonstruct_afs_252 afs ; }; struct file_lock { struct file_lock *fl_next ; struct list_head fl_list ; struct hlist_node fl_link ; struct list_head fl_block ; fl_owner_t fl_owner ; unsigned int fl_flags ; unsigned char fl_type ; unsigned int fl_pid ; int fl_link_cpu ; struct pid *fl_nspid ; wait_queue_head_t fl_wait ; struct file *fl_file ; loff_t fl_start ; loff_t fl_end ; struct fasync_struct *fl_fasync ; unsigned long fl_break_time ; unsigned long fl_downgrade_time ; struct file_lock_operations const *fl_ops ; struct lock_manager_operations const *fl_lmops ; union __anonunion_fl_u_251 fl_u ; }; struct file_lock_context { spinlock_t flc_lock ; struct list_head flc_flock ; struct list_head flc_posix ; struct list_head flc_lease ; }; struct fasync_struct { spinlock_t fa_lock ; int magic ; int fa_fd ; struct fasync_struct *fa_next ; struct file *fa_file ; struct callback_head fa_rcu ; }; struct sb_writers { struct percpu_counter counter[3U] ; wait_queue_head_t wait ; int frozen ; wait_queue_head_t wait_unfrozen ; struct lockdep_map lock_map[3U] ; }; struct super_operations; struct xattr_handler; struct mtd_info; struct super_block { struct list_head s_list ; dev_t s_dev ; unsigned char s_blocksize_bits ; unsigned long s_blocksize ; loff_t s_maxbytes ; struct file_system_type *s_type ; struct super_operations const *s_op ; struct dquot_operations const *dq_op ; struct quotactl_ops const *s_qcop ; struct export_operations const *s_export_op ; unsigned long s_flags ; unsigned long s_iflags ; unsigned long s_magic ; struct dentry *s_root ; struct rw_semaphore s_umount ; int s_count ; atomic_t s_active ; void *s_security ; struct xattr_handler const **s_xattr ; struct list_head s_inodes ; struct hlist_bl_head s_anon ; struct list_head s_mounts ; struct block_device *s_bdev ; struct backing_dev_info *s_bdi ; struct mtd_info *s_mtd ; struct hlist_node s_instances ; unsigned int s_quota_types ; struct quota_info s_dquot ; struct sb_writers s_writers ; char s_id[32U] ; u8 s_uuid[16U] ; void *s_fs_info ; unsigned int s_max_links ; fmode_t s_mode ; u32 s_time_gran ; struct mutex s_vfs_rename_mutex ; char *s_subtype ; char *s_options ; struct dentry_operations const *s_d_op ; int cleancache_poolid ; struct shrinker s_shrink ; atomic_long_t s_remove_count ; int s_readonly_remount ; struct workqueue_struct *s_dio_done_wq ; struct hlist_head s_pins ; struct list_lru s_dentry_lru ; struct list_lru s_inode_lru ; struct callback_head rcu ; int s_stack_depth ; }; struct fiemap_extent_info { unsigned int fi_flags ; unsigned int fi_extents_mapped ; unsigned int fi_extents_max ; struct fiemap_extent *fi_extents_start ; }; struct dir_context; struct dir_context { int (*actor)(struct dir_context * , char const * , int , loff_t , u64 , unsigned int ) ; loff_t pos ; }; struct block_device_operations; struct file_operations { struct module *owner ; loff_t (*llseek)(struct file * , loff_t , int ) ; ssize_t (*read)(struct file * , char * , size_t , loff_t * ) ; ssize_t (*write)(struct file * , char const * , size_t , loff_t * ) ; ssize_t (*read_iter)(struct kiocb * , struct iov_iter * ) ; ssize_t (*write_iter)(struct kiocb * , struct iov_iter * ) ; int (*iterate)(struct file * , struct dir_context * ) ; unsigned int (*poll)(struct file * , struct poll_table_struct * ) ; long (*unlocked_ioctl)(struct file * , unsigned int , unsigned long ) ; long (*compat_ioctl)(struct file * , unsigned int , unsigned long ) ; int (*mmap)(struct file * , struct vm_area_struct * ) ; int (*mremap)(struct file * , struct vm_area_struct * ) ; int (*open)(struct inode * , struct file * ) ; int (*flush)(struct file * , fl_owner_t ) ; int (*release)(struct inode * , struct file * ) ; int (*fsync)(struct file * , loff_t , loff_t , int ) ; int (*aio_fsync)(struct kiocb * , int ) ; int (*fasync)(int , struct file * , int ) ; int (*lock)(struct file * , int , struct file_lock * ) ; ssize_t (*sendpage)(struct file * , struct page * , int , size_t , loff_t * , int ) ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; int (*check_flags)(int ) ; int (*flock)(struct file * , int , struct file_lock * ) ; ssize_t (*splice_write)(struct pipe_inode_info * , struct file * , loff_t * , size_t , unsigned int ) ; ssize_t (*splice_read)(struct file * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*setlease)(struct file * , long , struct file_lock ** , void ** ) ; long (*fallocate)(struct file * , int , loff_t , loff_t ) ; void (*show_fdinfo)(struct seq_file * , struct file * ) ; }; struct inode_operations { struct dentry *(*lookup)(struct inode * , struct dentry * , unsigned int ) ; char const *(*follow_link)(struct dentry * , void ** ) ; int (*permission)(struct inode * , int ) ; struct posix_acl *(*get_acl)(struct inode * , int ) ; int (*readlink)(struct dentry * , char * , int ) ; void (*put_link)(struct inode * , void * ) ; int (*create)(struct inode * , struct dentry * , umode_t , bool ) ; int (*link)(struct dentry * , struct inode * , struct dentry * ) ; int (*unlink)(struct inode * , struct dentry * ) ; int (*symlink)(struct inode * , struct dentry * , char const * ) ; int (*mkdir)(struct inode * , struct dentry * , umode_t ) ; int (*rmdir)(struct inode * , struct dentry * ) ; int (*mknod)(struct inode * , struct dentry * , umode_t , dev_t ) ; int (*rename)(struct inode * , struct dentry * , struct inode * , struct dentry * ) ; int (*rename2)(struct inode * , struct dentry * , struct inode * , struct dentry * , unsigned int ) ; int (*setattr)(struct dentry * , struct iattr * ) ; int (*getattr)(struct vfsmount * , struct dentry * , struct kstat * ) ; int (*setxattr)(struct dentry * , char const * , void const * , size_t , int ) ; ssize_t (*getxattr)(struct dentry * , char const * , void * , size_t ) ; ssize_t (*listxattr)(struct dentry * , char * , size_t ) ; int (*removexattr)(struct dentry * , char const * ) ; int (*fiemap)(struct inode * , struct fiemap_extent_info * , u64 , u64 ) ; int (*update_time)(struct inode * , struct timespec * , int ) ; int (*atomic_open)(struct inode * , struct dentry * , struct file * , unsigned int , umode_t , int * ) ; int (*tmpfile)(struct inode * , struct dentry * , umode_t ) ; int (*set_acl)(struct inode * , struct posix_acl * , int ) ; }; struct super_operations { struct inode *(*alloc_inode)(struct super_block * ) ; void (*destroy_inode)(struct inode * ) ; void (*dirty_inode)(struct inode * , int ) ; int (*write_inode)(struct inode * , struct writeback_control * ) ; int (*drop_inode)(struct inode * ) ; void (*evict_inode)(struct inode * ) ; void (*put_super)(struct super_block * ) ; int (*sync_fs)(struct super_block * , int ) ; int (*freeze_super)(struct super_block * ) ; int (*freeze_fs)(struct super_block * ) ; int (*thaw_super)(struct super_block * ) ; int (*unfreeze_fs)(struct super_block * ) ; int (*statfs)(struct dentry * , struct kstatfs * ) ; int (*remount_fs)(struct super_block * , int * , char * ) ; void (*umount_begin)(struct super_block * ) ; int (*show_options)(struct seq_file * , struct dentry * ) ; int (*show_devname)(struct seq_file * , struct dentry * ) ; int (*show_path)(struct seq_file * , struct dentry * ) ; int (*show_stats)(struct seq_file * , struct dentry * ) ; ssize_t (*quota_read)(struct super_block * , int , char * , size_t , loff_t ) ; ssize_t (*quota_write)(struct super_block * , int , char const * , size_t , loff_t ) ; struct dquot **(*get_dquots)(struct inode * ) ; int (*bdev_try_to_free_page)(struct super_block * , struct page * , gfp_t ) ; long (*nr_cached_objects)(struct super_block * , struct shrink_control * ) ; long (*free_cached_objects)(struct super_block * , struct shrink_control * ) ; }; struct file_system_type { char const *name ; int fs_flags ; struct dentry *(*mount)(struct file_system_type * , int , char const * , void * ) ; void (*kill_sb)(struct super_block * ) ; struct module *owner ; struct file_system_type *next ; struct hlist_head fs_supers ; struct lock_class_key s_lock_key ; struct lock_class_key s_umount_key ; struct lock_class_key s_vfs_rename_key ; struct lock_class_key s_writers_key[3U] ; struct lock_class_key i_lock_key ; struct lock_class_key i_mutex_key ; struct lock_class_key i_mutex_dir_key ; }; struct disk_stats { unsigned long sectors[2U] ; unsigned long ios[2U] ; unsigned long merges[2U] ; unsigned long ticks[2U] ; unsigned long io_ticks ; unsigned long time_in_queue ; }; struct partition_meta_info { char uuid[37U] ; u8 volname[64U] ; }; struct hd_struct { sector_t start_sect ; sector_t nr_sects ; seqcount_t nr_sects_seq ; sector_t alignment_offset ; unsigned int discard_alignment ; struct device __dev ; struct kobject *holder_dir ; int policy ; int partno ; struct partition_meta_info *info ; int make_it_fail ; unsigned long stamp ; atomic_t in_flight[2U] ; struct disk_stats *dkstats ; atomic_t ref ; struct callback_head callback_head ; }; struct disk_part_tbl { struct callback_head callback_head ; int len ; struct hd_struct *last_lookup ; struct hd_struct *part[] ; }; struct disk_events; struct timer_rand_state; struct blk_integrity; struct gendisk { int major ; int first_minor ; int minors ; char disk_name[32U] ; char *(*devnode)(struct gendisk * , umode_t * ) ; unsigned int events ; unsigned int async_events ; struct disk_part_tbl *part_tbl ; struct hd_struct part0 ; struct block_device_operations const *fops ; struct request_queue *queue ; void *private_data ; int flags ; struct device *driverfs_dev ; struct kobject *slave_dir ; struct timer_rand_state *random ; atomic_t sync_io ; struct disk_events *ev ; struct blk_integrity *integrity ; int node_id ; }; struct fprop_local_percpu { struct percpu_counter events ; unsigned int period ; raw_spinlock_t lock ; }; typedef int congested_fn(void * , int ); struct bdi_writeback_congested { unsigned long state ; atomic_t refcnt ; struct backing_dev_info *bdi ; int blkcg_id ; struct rb_node rb_node ; }; union __anonunion____missing_field_name_253 { struct work_struct release_work ; struct callback_head rcu ; }; struct bdi_writeback { struct backing_dev_info *bdi ; unsigned long state ; unsigned long last_old_flush ; struct list_head b_dirty ; struct list_head b_io ; struct list_head b_more_io ; struct list_head b_dirty_time ; spinlock_t list_lock ; struct percpu_counter stat[4U] ; struct bdi_writeback_congested *congested ; unsigned long bw_time_stamp ; unsigned long dirtied_stamp ; unsigned long written_stamp ; unsigned long write_bandwidth ; unsigned long avg_write_bandwidth ; unsigned long dirty_ratelimit ; unsigned long balanced_dirty_ratelimit ; struct fprop_local_percpu completions ; int dirty_exceeded ; spinlock_t work_lock ; struct list_head work_list ; struct delayed_work dwork ; struct percpu_ref refcnt ; struct fprop_local_percpu memcg_completions ; struct cgroup_subsys_state *memcg_css ; struct cgroup_subsys_state *blkcg_css ; struct list_head memcg_node ; struct list_head blkcg_node ; union __anonunion____missing_field_name_253 __annonCompField77 ; }; struct backing_dev_info { struct list_head bdi_list ; unsigned long ra_pages ; unsigned int capabilities ; congested_fn *congested_fn ; void *congested_data ; char *name ; unsigned int min_ratio ; unsigned int max_ratio ; unsigned int max_prop_frac ; atomic_long_t tot_write_bandwidth ; struct bdi_writeback wb ; struct radix_tree_root cgwb_tree ; struct rb_root cgwb_congested_tree ; atomic_t usage_cnt ; wait_queue_head_t wb_waitq ; struct device *dev ; struct timer_list laptop_mode_wb_timer ; struct dentry *debug_dir ; struct dentry *debug_stats ; }; union __anonunion____missing_field_name_254 { struct list_head q_node ; struct kmem_cache *__rcu_icq_cache ; }; union __anonunion____missing_field_name_255 { struct hlist_node ioc_node ; struct callback_head __rcu_head ; }; struct io_cq { struct request_queue *q ; struct io_context *ioc ; union __anonunion____missing_field_name_254 __annonCompField78 ; union __anonunion____missing_field_name_255 __annonCompField79 ; unsigned int flags ; }; struct io_context { atomic_long_t refcount ; atomic_t active_ref ; atomic_t nr_tasks ; spinlock_t lock ; unsigned short ioprio ; int nr_batch_requests ; unsigned long last_waited ; struct radix_tree_root icq_tree ; struct io_cq *icq_hint ; struct hlist_head icq_list ; struct work_struct release_work ; }; struct bio_integrity_payload { struct bio *bip_bio ; struct bvec_iter bip_iter ; bio_end_io_t *bip_end_io ; unsigned short bip_slab ; unsigned short bip_vcnt ; unsigned short bip_max_vcnt ; unsigned short bip_flags ; struct work_struct bip_work ; struct bio_vec *bip_vec ; struct bio_vec bip_inline_vecs[0U] ; }; struct bio_list { struct bio *head ; struct bio *tail ; }; struct bio_set { struct kmem_cache *bio_slab ; unsigned int front_pad ; mempool_t *bio_pool ; mempool_t *bvec_pool ; mempool_t *bio_integrity_pool ; mempool_t *bvec_integrity_pool ; spinlock_t rescue_lock ; struct bio_list rescue_list ; struct work_struct rescue_work ; struct workqueue_struct *rescue_workqueue ; }; struct bsg_class_device { struct device *class_dev ; struct device *parent ; int minor ; struct request_queue *queue ; struct kref ref ; void (*release)(struct device * ) ; }; struct elevator_queue; struct blk_trace; struct request; struct bsg_job; struct blkcg_gq; struct blk_flush_queue; typedef void rq_end_io_fn(struct request * , int ); struct request_list { struct request_queue *q ; struct blkcg_gq *blkg ; int count[2U] ; int starved[2U] ; mempool_t *rq_pool ; wait_queue_head_t wait[2U] ; unsigned int flags ; }; union __anonunion____missing_field_name_256 { struct call_single_data csd ; unsigned long fifo_time ; }; struct blk_mq_ctx; union __anonunion____missing_field_name_257 { struct hlist_node hash ; struct list_head ipi_list ; }; union __anonunion____missing_field_name_258 { struct rb_node rb_node ; void *completion_data ; }; struct __anonstruct_elv_260 { struct io_cq *icq ; void *priv[2U] ; }; struct __anonstruct_flush_261 { unsigned int seq ; struct list_head list ; rq_end_io_fn *saved_end_io ; }; union __anonunion____missing_field_name_259 { struct __anonstruct_elv_260 elv ; struct __anonstruct_flush_261 flush ; }; struct request { struct list_head queuelist ; union __anonunion____missing_field_name_256 __annonCompField80 ; struct request_queue *q ; struct blk_mq_ctx *mq_ctx ; u64 cmd_flags ; unsigned int cmd_type ; unsigned long atomic_flags ; int cpu ; unsigned int __data_len ; sector_t __sector ; struct bio *bio ; struct bio *biotail ; union __anonunion____missing_field_name_257 __annonCompField81 ; union __anonunion____missing_field_name_258 __annonCompField82 ; union __anonunion____missing_field_name_259 __annonCompField83 ; struct gendisk *rq_disk ; struct hd_struct *part ; unsigned long start_time ; struct request_list *rl ; unsigned long long start_time_ns ; unsigned long long io_start_time_ns ; unsigned short nr_phys_segments ; unsigned short nr_integrity_segments ; unsigned short ioprio ; void *special ; int tag ; int errors ; unsigned char __cmd[16U] ; unsigned char *cmd ; unsigned short cmd_len ; unsigned int extra_len ; unsigned int sense_len ; unsigned int resid_len ; void *sense ; unsigned long deadline ; struct list_head timeout_list ; unsigned int timeout ; int retries ; rq_end_io_fn *end_io ; void *end_io_data ; struct request *next_rq ; }; struct elevator_type; typedef int elevator_merge_fn(struct request_queue * , struct request ** , struct bio * ); typedef void elevator_merge_req_fn(struct request_queue * , struct request * , struct request * ); typedef void elevator_merged_fn(struct request_queue * , struct request * , int ); typedef int elevator_allow_merge_fn(struct request_queue * , struct request * , struct bio * ); typedef void elevator_bio_merged_fn(struct request_queue * , struct request * , struct bio * ); typedef int elevator_dispatch_fn(struct request_queue * , int ); typedef void elevator_add_req_fn(struct request_queue * , struct request * ); typedef struct request *elevator_request_list_fn(struct request_queue * , struct request * ); typedef void elevator_completed_req_fn(struct request_queue * , struct request * ); typedef int elevator_may_queue_fn(struct request_queue * , int ); typedef void elevator_init_icq_fn(struct io_cq * ); typedef void elevator_exit_icq_fn(struct io_cq * ); typedef int elevator_set_req_fn(struct request_queue * , struct request * , struct bio * , gfp_t ); typedef void elevator_put_req_fn(struct request * ); typedef void elevator_activate_req_fn(struct request_queue * , struct request * ); typedef void elevator_deactivate_req_fn(struct request_queue * , struct request * ); typedef int elevator_init_fn(struct request_queue * , struct elevator_type * ); typedef void elevator_exit_fn(struct elevator_queue * ); typedef void elevator_registered_fn(struct request_queue * ); struct elevator_ops { elevator_merge_fn *elevator_merge_fn ; elevator_merged_fn *elevator_merged_fn ; elevator_merge_req_fn *elevator_merge_req_fn ; elevator_allow_merge_fn *elevator_allow_merge_fn ; elevator_bio_merged_fn *elevator_bio_merged_fn ; elevator_dispatch_fn *elevator_dispatch_fn ; elevator_add_req_fn *elevator_add_req_fn ; elevator_activate_req_fn *elevator_activate_req_fn ; elevator_deactivate_req_fn *elevator_deactivate_req_fn ; elevator_completed_req_fn *elevator_completed_req_fn ; elevator_request_list_fn *elevator_former_req_fn ; elevator_request_list_fn *elevator_latter_req_fn ; elevator_init_icq_fn *elevator_init_icq_fn ; elevator_exit_icq_fn *elevator_exit_icq_fn ; elevator_set_req_fn *elevator_set_req_fn ; elevator_put_req_fn *elevator_put_req_fn ; elevator_may_queue_fn *elevator_may_queue_fn ; elevator_init_fn *elevator_init_fn ; elevator_exit_fn *elevator_exit_fn ; elevator_registered_fn *elevator_registered_fn ; }; struct elv_fs_entry { struct attribute attr ; ssize_t (*show)(struct elevator_queue * , char * ) ; ssize_t (*store)(struct elevator_queue * , char const * , size_t ) ; }; struct elevator_type { struct kmem_cache *icq_cache ; struct elevator_ops ops ; size_t icq_size ; size_t icq_align ; struct elv_fs_entry *elevator_attrs ; char elevator_name[16U] ; struct module *elevator_owner ; char icq_cache_name[21U] ; struct list_head list ; }; struct elevator_queue { struct elevator_type *type ; void *elevator_data ; struct kobject kobj ; struct mutex sysfs_lock ; unsigned char registered : 1 ; struct hlist_head hash[64U] ; }; typedef void request_fn_proc(struct request_queue * ); typedef void make_request_fn(struct request_queue * , struct bio * ); typedef int prep_rq_fn(struct request_queue * , struct request * ); typedef void unprep_rq_fn(struct request_queue * , struct request * ); struct bvec_merge_data { struct block_device *bi_bdev ; sector_t bi_sector ; unsigned int bi_size ; unsigned long bi_rw ; }; typedef int merge_bvec_fn(struct request_queue * , struct bvec_merge_data * , struct bio_vec * ); typedef void softirq_done_fn(struct request * ); typedef int dma_drain_needed_fn(struct request * ); typedef int lld_busy_fn(struct request_queue * ); typedef int bsg_job_fn(struct bsg_job * ); enum blk_eh_timer_return { BLK_EH_NOT_HANDLED = 0, BLK_EH_HANDLED = 1, BLK_EH_RESET_TIMER = 2 } ; typedef enum blk_eh_timer_return rq_timed_out_fn(struct request * ); struct blk_queue_tag { struct request **tag_index ; unsigned long *tag_map ; int busy ; int max_depth ; int real_max_depth ; atomic_t refcnt ; int alloc_policy ; int next_tag ; }; struct queue_limits { unsigned long bounce_pfn ; unsigned long seg_boundary_mask ; unsigned int max_hw_sectors ; unsigned int chunk_sectors ; unsigned int max_sectors ; unsigned int max_segment_size ; unsigned int physical_block_size ; unsigned int alignment_offset ; unsigned int io_min ; unsigned int io_opt ; unsigned int max_discard_sectors ; unsigned int max_write_same_sectors ; unsigned int discard_granularity ; unsigned int discard_alignment ; unsigned short logical_block_size ; unsigned short max_segments ; unsigned short max_integrity_segments ; unsigned char misaligned ; unsigned char discard_misaligned ; unsigned char cluster ; unsigned char discard_zeroes_data ; unsigned char raid_partial_stripes_expensive ; }; struct blk_mq_ops; struct blk_mq_hw_ctx; struct throtl_data; struct blk_mq_tag_set; struct request_queue { struct list_head queue_head ; struct request *last_merge ; struct elevator_queue *elevator ; int nr_rqs[2U] ; int nr_rqs_elvpriv ; struct request_list root_rl ; request_fn_proc *request_fn ; make_request_fn *make_request_fn ; prep_rq_fn *prep_rq_fn ; unprep_rq_fn *unprep_rq_fn ; merge_bvec_fn *merge_bvec_fn ; softirq_done_fn *softirq_done_fn ; rq_timed_out_fn *rq_timed_out_fn ; dma_drain_needed_fn *dma_drain_needed ; lld_busy_fn *lld_busy_fn ; struct blk_mq_ops *mq_ops ; unsigned int *mq_map ; struct blk_mq_ctx *queue_ctx ; unsigned int nr_queues ; struct blk_mq_hw_ctx **queue_hw_ctx ; unsigned int nr_hw_queues ; sector_t end_sector ; struct request *boundary_rq ; struct delayed_work delay_work ; struct backing_dev_info backing_dev_info ; void *queuedata ; unsigned long queue_flags ; int id ; gfp_t bounce_gfp ; spinlock_t __queue_lock ; spinlock_t *queue_lock ; struct kobject kobj ; struct kobject mq_kobj ; struct device *dev ; int rpm_status ; unsigned int nr_pending ; unsigned long nr_requests ; unsigned int nr_congestion_on ; unsigned int nr_congestion_off ; unsigned int nr_batching ; unsigned int dma_drain_size ; void *dma_drain_buffer ; unsigned int dma_pad_mask ; unsigned int dma_alignment ; struct blk_queue_tag *queue_tags ; struct list_head tag_busy_list ; unsigned int nr_sorted ; unsigned int in_flight[2U] ; unsigned int request_fn_active ; unsigned int rq_timeout ; struct timer_list timeout ; struct list_head timeout_list ; struct list_head icq_list ; unsigned long blkcg_pols[1U] ; struct blkcg_gq *root_blkg ; struct list_head blkg_list ; struct queue_limits limits ; unsigned int sg_timeout ; unsigned int sg_reserved_size ; int node ; struct blk_trace *blk_trace ; unsigned int flush_flags ; unsigned char flush_not_queueable : 1 ; struct blk_flush_queue *fq ; struct list_head requeue_list ; spinlock_t requeue_lock ; struct work_struct requeue_work ; struct mutex sysfs_lock ; int bypass_depth ; atomic_t mq_freeze_depth ; bsg_job_fn *bsg_job_fn ; int bsg_job_size ; struct bsg_class_device bsg_dev ; struct throtl_data *td ; struct callback_head callback_head ; wait_queue_head_t mq_freeze_wq ; struct percpu_ref mq_usage_counter ; struct list_head all_q_node ; struct blk_mq_tag_set *tag_set ; struct list_head tag_set_list ; }; struct blk_plug { struct list_head list ; struct list_head mq_list ; struct list_head cb_list ; }; struct blk_integrity_iter { void *prot_buf ; void *data_buf ; sector_t seed ; unsigned int data_size ; unsigned short interval ; char const *disk_name ; }; typedef int integrity_processing_fn(struct blk_integrity_iter * ); struct blk_integrity { integrity_processing_fn *generate_fn ; integrity_processing_fn *verify_fn ; unsigned short flags ; unsigned short tuple_size ; unsigned short interval ; unsigned short tag_size ; char const *name ; struct kobject kobj ; }; struct block_device_operations { int (*open)(struct block_device * , fmode_t ) ; void (*release)(struct gendisk * , fmode_t ) ; int (*rw_page)(struct block_device * , sector_t , struct page * , int ) ; int (*ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; long (*direct_access)(struct block_device * , sector_t , void ** , unsigned long * , long ) ; unsigned int (*check_events)(struct gendisk * , unsigned int ) ; int (*media_changed)(struct gendisk * ) ; void (*unlock_native_capacity)(struct gendisk * ) ; int (*revalidate_disk)(struct gendisk * ) ; int (*getgeo)(struct block_device * , struct hd_geometry * ) ; void (*swap_slot_free_notify)(struct block_device * , unsigned long ) ; struct module *owner ; }; struct blk_mq_tags; struct blk_mq_cpu_notifier { struct list_head list ; void *data ; int (*notify)(void * , unsigned long , unsigned int ) ; }; struct blk_align_bitmap; struct blk_mq_ctxmap { unsigned int size ; unsigned int bits_per_word ; struct blk_align_bitmap *map ; }; struct __anonstruct____missing_field_name_263 { spinlock_t lock ; struct list_head dispatch ; }; struct blk_mq_hw_ctx { struct __anonstruct____missing_field_name_263 __annonCompField84 ; unsigned long state ; struct delayed_work run_work ; struct delayed_work delay_work ; cpumask_var_t cpumask ; int next_cpu ; int next_cpu_batch ; unsigned long flags ; struct request_queue *queue ; struct blk_flush_queue *fq ; void *driver_data ; struct blk_mq_ctxmap ctx_map ; unsigned int nr_ctx ; struct blk_mq_ctx **ctxs ; atomic_t wait_index ; struct blk_mq_tags *tags ; unsigned long queued ; unsigned long run ; unsigned long dispatched[10U] ; unsigned int numa_node ; unsigned int queue_num ; atomic_t nr_active ; struct blk_mq_cpu_notifier cpu_notifier ; struct kobject kobj ; }; struct blk_mq_tag_set { struct blk_mq_ops *ops ; unsigned int nr_hw_queues ; unsigned int queue_depth ; unsigned int reserved_tags ; unsigned int cmd_size ; int numa_node ; unsigned int timeout ; unsigned int flags ; void *driver_data ; struct blk_mq_tags **tags ; struct mutex tag_list_lock ; struct list_head tag_list ; }; struct blk_mq_queue_data { struct request *rq ; struct list_head *list ; bool last ; }; typedef int queue_rq_fn(struct blk_mq_hw_ctx * , struct blk_mq_queue_data const * ); typedef struct blk_mq_hw_ctx *map_queue_fn(struct request_queue * , int const ); typedef enum blk_eh_timer_return timeout_fn(struct request * , bool ); typedef int init_hctx_fn(struct blk_mq_hw_ctx * , void * , unsigned int ); typedef void exit_hctx_fn(struct blk_mq_hw_ctx * , unsigned int ); typedef int init_request_fn(void * , struct request * , unsigned int , unsigned int , unsigned int ); typedef void exit_request_fn(void * , struct request * , unsigned int , unsigned int ); struct blk_mq_ops { queue_rq_fn *queue_rq ; map_queue_fn *map_queue ; timeout_fn *timeout ; softirq_done_fn *complete ; init_hctx_fn *init_hctx ; exit_hctx_fn *exit_hctx ; init_request_fn *init_request ; exit_request_fn *exit_request ; }; struct scsi_host_cmd_pool; struct scsi_transport_template; struct scsi_host_template { struct module *module ; char const *name ; int (*detect)(struct scsi_host_template * ) ; int (*release)(struct Scsi_Host * ) ; char const *(*info)(struct Scsi_Host * ) ; int (*ioctl)(struct scsi_device * , int , void * ) ; int (*compat_ioctl)(struct scsi_device * , int , void * ) ; int (*queuecommand)(struct Scsi_Host * , struct scsi_cmnd * ) ; int (*eh_abort_handler)(struct scsi_cmnd * ) ; int (*eh_device_reset_handler)(struct scsi_cmnd * ) ; int (*eh_target_reset_handler)(struct scsi_cmnd * ) ; int (*eh_bus_reset_handler)(struct scsi_cmnd * ) ; int (*eh_host_reset_handler)(struct scsi_cmnd * ) ; int (*slave_alloc)(struct scsi_device * ) ; int (*slave_configure)(struct scsi_device * ) ; void (*slave_destroy)(struct scsi_device * ) ; int (*target_alloc)(struct scsi_target * ) ; void (*target_destroy)(struct scsi_target * ) ; int (*scan_finished)(struct Scsi_Host * , unsigned long ) ; void (*scan_start)(struct Scsi_Host * ) ; int (*change_queue_depth)(struct scsi_device * , int ) ; int (*bios_param)(struct scsi_device * , struct block_device * , sector_t , int * ) ; void (*unlock_native_capacity)(struct scsi_device * ) ; int (*show_info)(struct seq_file * , struct Scsi_Host * ) ; int (*write_info)(struct Scsi_Host * , char * , int ) ; enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd * ) ; int (*host_reset)(struct Scsi_Host * , int ) ; char const *proc_name ; struct proc_dir_entry *proc_dir ; int can_queue ; int this_id ; unsigned short sg_tablesize ; unsigned short sg_prot_tablesize ; unsigned int max_sectors ; unsigned long dma_boundary ; short cmd_per_lun ; unsigned char present ; int tag_alloc_policy ; unsigned char use_blk_tags : 1 ; unsigned char track_queue_depth : 1 ; unsigned char supported_mode : 2 ; unsigned char unchecked_isa_dma : 1 ; unsigned char use_clustering : 1 ; unsigned char emulated : 1 ; unsigned char skip_settle_delay : 1 ; unsigned char no_write_same : 1 ; unsigned char no_async_abort : 1 ; unsigned int max_host_blocked ; struct device_attribute **shost_attrs ; struct device_attribute **sdev_attrs ; struct list_head legacy_hosts ; u64 vendor_id ; unsigned int cmd_size ; struct scsi_host_cmd_pool *cmd_pool ; bool disable_blk_mq ; }; enum scsi_host_state { SHOST_CREATED = 1, SHOST_RUNNING = 2, SHOST_CANCEL = 3, SHOST_DEL = 4, SHOST_RECOVERY = 5, SHOST_CANCEL_RECOVERY = 6, SHOST_DEL_RECOVERY = 7 } ; union __anonunion____missing_field_name_264 { struct blk_queue_tag *bqt ; struct blk_mq_tag_set tag_set ; }; struct Scsi_Host { struct list_head __devices ; struct list_head __targets ; struct scsi_host_cmd_pool *cmd_pool ; spinlock_t free_list_lock ; struct list_head free_list ; struct list_head starved_list ; spinlock_t default_lock ; spinlock_t *host_lock ; struct mutex scan_mutex ; struct list_head eh_cmd_q ; struct task_struct *ehandler ; struct completion *eh_action ; wait_queue_head_t host_wait ; struct scsi_host_template *hostt ; struct scsi_transport_template *transportt ; union __anonunion____missing_field_name_264 __annonCompField85 ; atomic_t host_busy ; atomic_t host_blocked ; unsigned int host_failed ; unsigned int host_eh_scheduled ; unsigned int host_no ; int eh_deadline ; unsigned long last_reset ; unsigned int max_channel ; unsigned int max_id ; u64 max_lun ; unsigned int unique_id ; unsigned short max_cmd_len ; int this_id ; int can_queue ; short cmd_per_lun ; unsigned short sg_tablesize ; unsigned short sg_prot_tablesize ; unsigned int max_sectors ; unsigned long dma_boundary ; unsigned int nr_hw_queues ; unsigned long cmd_serial_number ; unsigned char active_mode : 2 ; unsigned char unchecked_isa_dma : 1 ; unsigned char use_clustering : 1 ; unsigned char host_self_blocked : 1 ; unsigned char reverse_ordering : 1 ; unsigned char tmf_in_progress : 1 ; unsigned char async_scan : 1 ; unsigned char eh_noresume : 1 ; unsigned char no_write_same : 1 ; unsigned char use_blk_mq : 1 ; unsigned char use_cmd_list : 1 ; char work_q_name[20U] ; struct workqueue_struct *work_q ; struct workqueue_struct *tmf_work_q ; unsigned char no_scsi2_lun_in_cdb : 1 ; unsigned int max_host_blocked ; unsigned int prot_capabilities ; unsigned char prot_guard_type ; struct request_queue *uspace_req_q ; unsigned long base ; unsigned long io_port ; unsigned char n_io_port ; unsigned char dma_channel ; unsigned int irq ; enum scsi_host_state shost_state ; struct device shost_gendev ; struct device shost_dev ; struct list_head sht_legacy_list ; void *shost_data ; struct device *dma_dev ; unsigned long hostdata[0U] ; }; enum scsi_device_state { SDEV_CREATED = 1, SDEV_RUNNING = 2, SDEV_CANCEL = 3, SDEV_DEL = 4, SDEV_QUIESCE = 5, SDEV_OFFLINE = 6, SDEV_TRANSPORT_OFFLINE = 7, SDEV_BLOCK = 8, SDEV_CREATED_BLOCK = 9 } ; struct scsi_dh_data; struct scsi_device { struct Scsi_Host *host ; struct request_queue *request_queue ; struct list_head siblings ; struct list_head same_target_siblings ; atomic_t device_busy ; atomic_t device_blocked ; spinlock_t list_lock ; struct list_head cmd_list ; struct list_head starved_entry ; struct scsi_cmnd *current_cmnd ; unsigned short queue_depth ; unsigned short max_queue_depth ; unsigned short last_queue_full_depth ; unsigned short last_queue_full_count ; unsigned long last_queue_full_time ; unsigned long queue_ramp_up_period ; unsigned long last_queue_ramp_up ; unsigned int id ; unsigned int channel ; u64 lun ; unsigned int manufacturer ; unsigned int sector_size ; void *hostdata ; char type ; char scsi_level ; char inq_periph_qual ; unsigned char inquiry_len ; unsigned char *inquiry ; char const *vendor ; char const *model ; char const *rev ; int vpd_pg83_len ; unsigned char *vpd_pg83 ; int vpd_pg80_len ; unsigned char *vpd_pg80 ; unsigned char current_tag ; struct scsi_target *sdev_target ; unsigned int sdev_bflags ; unsigned int eh_timeout ; unsigned char removable : 1 ; unsigned char changed : 1 ; unsigned char busy : 1 ; unsigned char lockable : 1 ; unsigned char locked : 1 ; unsigned char borken : 1 ; unsigned char disconnect : 1 ; unsigned char soft_reset : 1 ; unsigned char sdtr : 1 ; unsigned char wdtr : 1 ; unsigned char ppr : 1 ; unsigned char tagged_supported : 1 ; unsigned char simple_tags : 1 ; unsigned char was_reset : 1 ; unsigned char expecting_cc_ua : 1 ; unsigned char use_10_for_rw : 1 ; unsigned char use_10_for_ms : 1 ; unsigned char no_report_opcodes : 1 ; unsigned char no_write_same : 1 ; unsigned char use_16_for_rw : 1 ; unsigned char skip_ms_page_8 : 1 ; unsigned char skip_ms_page_3f : 1 ; unsigned char skip_vpd_pages : 1 ; unsigned char try_vpd_pages : 1 ; unsigned char use_192_bytes_for_3f : 1 ; unsigned char no_start_on_add : 1 ; unsigned char allow_restart : 1 ; unsigned char manage_start_stop : 1 ; unsigned char start_stop_pwr_cond : 1 ; unsigned char no_uld_attach : 1 ; unsigned char select_no_atn : 1 ; unsigned char fix_capacity : 1 ; unsigned char guess_capacity : 1 ; unsigned char retry_hwerror : 1 ; unsigned char last_sector_bug : 1 ; unsigned char no_read_disc_info : 1 ; unsigned char no_read_capacity_16 : 1 ; unsigned char try_rc_10_first : 1 ; unsigned char is_visible : 1 ; unsigned char wce_default_on : 1 ; unsigned char no_dif : 1 ; unsigned char broken_fua : 1 ; unsigned char lun_in_cdb : 1 ; atomic_t disk_events_disable_depth ; unsigned long supported_events[1U] ; unsigned long pending_events[1U] ; struct list_head event_list ; struct work_struct event_work ; unsigned int max_device_blocked ; atomic_t iorequest_cnt ; atomic_t iodone_cnt ; atomic_t ioerr_cnt ; struct device sdev_gendev ; struct device sdev_dev ; struct execute_work ew ; struct work_struct requeue_work ; struct scsi_dh_data *scsi_dh_data ; enum scsi_device_state sdev_state ; unsigned long sdev_data[0U] ; }; struct scsi_device_handler { struct list_head list ; struct module *module ; char const *name ; int (*check_sense)(struct scsi_device * , struct scsi_sense_hdr * ) ; struct scsi_dh_data *(*attach)(struct scsi_device * ) ; void (*detach)(struct scsi_device * ) ; int (*activate)(struct scsi_device * , void (*)(void * , int ) , void * ) ; int (*prep_fn)(struct scsi_device * , struct request * ) ; int (*set_params)(struct scsi_device * , char const * ) ; bool (*match)(struct scsi_device * ) ; }; struct scsi_dh_data { struct scsi_device_handler *scsi_dh ; struct scsi_device *sdev ; struct kref kref ; }; enum scsi_target_state { STARGET_CREATED = 1, STARGET_RUNNING = 2, STARGET_DEL = 3 } ; struct scsi_target { struct scsi_device *starget_sdev_user ; struct list_head siblings ; struct list_head devices ; struct device dev ; struct kref reap_ref ; unsigned int channel ; unsigned int id ; unsigned char create : 1 ; unsigned char single_lun : 1 ; unsigned char pdt_1f_for_no_lun : 1 ; unsigned char no_report_luns : 1 ; unsigned char expecting_lun_change : 1 ; atomic_t target_busy ; atomic_t target_blocked ; unsigned int can_queue ; unsigned int max_target_blocked ; char scsi_level ; enum scsi_target_state state ; void *hostdata ; unsigned long starget_data[0U] ; }; struct scsi_data_buffer { struct sg_table table ; unsigned int length ; int resid ; }; struct scsi_pointer { char *ptr ; int this_residual ; struct scatterlist *buffer ; int buffers_residual ; dma_addr_t dma_handle ; int volatile Status ; int volatile Message ; int volatile have_data_in ; int volatile sent_command ; int volatile phase ; }; struct scsi_cmnd { struct scsi_device *device ; struct list_head list ; struct list_head eh_entry ; struct delayed_work abort_work ; int eh_eflags ; unsigned long serial_number ; unsigned long jiffies_at_alloc ; int retries ; int allowed ; unsigned char prot_op ; unsigned char prot_type ; unsigned char prot_flags ; unsigned short cmd_len ; enum dma_data_direction sc_data_direction ; unsigned char *cmnd ; struct scsi_data_buffer sdb ; struct scsi_data_buffer *prot_sdb ; unsigned int underflow ; unsigned int transfersize ; struct request *request ; unsigned char *sense_buffer ; void (*scsi_done)(struct scsi_cmnd * ) ; struct scsi_pointer SCp ; unsigned char *host_scribble ; int result ; int flags ; unsigned char tag ; }; struct iovec { void *iov_base ; __kernel_size_t iov_len ; }; struct kvec { void *iov_base ; size_t iov_len ; }; union __anonunion____missing_field_name_267 { struct iovec const *iov ; struct kvec const *kvec ; struct bio_vec const *bvec ; }; struct iov_iter { int type ; size_t iov_offset ; size_t count ; union __anonunion____missing_field_name_267 __annonCompField86 ; unsigned long nr_segs ; }; typedef unsigned short __kernel_sa_family_t; typedef __kernel_sa_family_t sa_family_t; struct sockaddr { sa_family_t sa_family ; char sa_data[14U] ; }; struct msghdr { void *msg_name ; int msg_namelen ; struct iov_iter msg_iter ; void *msg_control ; __kernel_size_t msg_controllen ; unsigned int msg_flags ; struct kiocb *msg_iocb ; }; enum ldv_27959 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ; typedef enum ldv_27959 socket_state; struct socket_wq { wait_queue_head_t wait ; struct fasync_struct *fasync_list ; struct callback_head rcu ; }; struct proto_ops; struct socket { socket_state state ; short type ; unsigned long flags ; struct socket_wq *wq ; struct file *file ; struct sock *sk ; struct proto_ops const *ops ; }; struct proto_ops { int family ; struct module *owner ; int (*release)(struct socket * ) ; int (*bind)(struct socket * , struct sockaddr * , int ) ; int (*connect)(struct socket * , struct sockaddr * , int , int ) ; int (*socketpair)(struct socket * , struct socket * ) ; int (*accept)(struct socket * , struct socket * , int ) ; int (*getname)(struct socket * , struct sockaddr * , int * , int ) ; unsigned int (*poll)(struct file * , struct socket * , struct poll_table_struct * ) ; int (*ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*listen)(struct socket * , int ) ; int (*shutdown)(struct socket * , int ) ; int (*setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct socket * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct socket * , int , int , char * , int * ) ; int (*sendmsg)(struct socket * , struct msghdr * , size_t ) ; int (*recvmsg)(struct socket * , struct msghdr * , size_t , int ) ; int (*mmap)(struct file * , struct socket * , struct vm_area_struct * ) ; ssize_t (*sendpage)(struct socket * , struct page * , int , size_t , int ) ; ssize_t (*splice_read)(struct socket * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*set_peek_off)(struct sock * , int ) ; }; struct in6_addr; struct sk_buff; typedef u64 netdev_features_t; union __anonunion_in6_u_268 { __u8 u6_addr8[16U] ; __be16 u6_addr16[8U] ; __be32 u6_addr32[4U] ; }; struct in6_addr { union __anonunion_in6_u_268 in6_u ; }; struct ethhdr { unsigned char h_dest[6U] ; unsigned char h_source[6U] ; __be16 h_proto ; }; struct pipe_buf_operations; struct pipe_buffer { struct page *page ; unsigned int offset ; unsigned int len ; struct pipe_buf_operations const *ops ; unsigned int flags ; unsigned long private ; }; struct pipe_inode_info { struct mutex mutex ; wait_queue_head_t wait ; unsigned int nrbufs ; unsigned int curbuf ; unsigned int buffers ; unsigned int readers ; unsigned int writers ; unsigned int files ; unsigned int waiting_writers ; unsigned int r_counter ; unsigned int w_counter ; struct page *tmp_page ; struct fasync_struct *fasync_readers ; struct fasync_struct *fasync_writers ; struct pipe_buffer *bufs ; }; struct pipe_buf_operations { int can_merge ; int (*confirm)(struct pipe_inode_info * , struct pipe_buffer * ) ; void (*release)(struct pipe_inode_info * , struct pipe_buffer * ) ; int (*steal)(struct pipe_inode_info * , struct pipe_buffer * ) ; void (*get)(struct pipe_inode_info * , struct pipe_buffer * ) ; }; struct napi_struct; struct nf_conntrack { atomic_t use ; }; union __anonunion____missing_field_name_273 { struct net_device *physoutdev ; char neigh_header[8U] ; }; union __anonunion____missing_field_name_274 { __be32 ipv4_daddr ; struct in6_addr ipv6_daddr ; }; struct nf_bridge_info { atomic_t use ; unsigned char orig_proto ; bool pkt_otherhost ; __u16 frag_max_size ; unsigned int mask ; struct net_device *physindev ; union __anonunion____missing_field_name_273 __annonCompField90 ; union __anonunion____missing_field_name_274 __annonCompField91 ; }; struct sk_buff_head { struct sk_buff *next ; struct sk_buff *prev ; __u32 qlen ; spinlock_t lock ; }; typedef unsigned int sk_buff_data_t; struct __anonstruct____missing_field_name_277 { u32 stamp_us ; u32 stamp_jiffies ; }; union __anonunion____missing_field_name_276 { u64 v64 ; struct __anonstruct____missing_field_name_277 __annonCompField92 ; }; struct skb_mstamp { union __anonunion____missing_field_name_276 __annonCompField93 ; }; union __anonunion____missing_field_name_280 { ktime_t tstamp ; struct skb_mstamp skb_mstamp ; }; struct __anonstruct____missing_field_name_279 { struct sk_buff *next ; struct sk_buff *prev ; union __anonunion____missing_field_name_280 __annonCompField94 ; }; union __anonunion____missing_field_name_278 { struct __anonstruct____missing_field_name_279 __annonCompField95 ; struct rb_node rbnode ; }; struct sec_path; struct __anonstruct____missing_field_name_282 { __u16 csum_start ; __u16 csum_offset ; }; union __anonunion____missing_field_name_281 { __wsum csum ; struct __anonstruct____missing_field_name_282 __annonCompField97 ; }; union __anonunion____missing_field_name_283 { unsigned int napi_id ; unsigned int sender_cpu ; }; union __anonunion____missing_field_name_284 { __u32 mark ; __u32 reserved_tailroom ; }; union __anonunion____missing_field_name_285 { __be16 inner_protocol ; __u8 inner_ipproto ; }; struct sk_buff { union __anonunion____missing_field_name_278 __annonCompField96 ; struct sock *sk ; struct net_device *dev ; char cb[48U] ; unsigned long _skb_refdst ; void (*destructor)(struct sk_buff * ) ; struct sec_path *sp ; struct nf_conntrack *nfct ; struct nf_bridge_info *nf_bridge ; unsigned int len ; unsigned int data_len ; __u16 mac_len ; __u16 hdr_len ; __u16 queue_mapping ; unsigned char cloned : 1 ; unsigned char nohdr : 1 ; unsigned char fclone : 2 ; unsigned char peeked : 1 ; unsigned char head_frag : 1 ; unsigned char xmit_more : 1 ; __u32 headers_start[0U] ; __u8 __pkt_type_offset[0U] ; unsigned char pkt_type : 3 ; unsigned char pfmemalloc : 1 ; unsigned char ignore_df : 1 ; unsigned char nfctinfo : 3 ; unsigned char nf_trace : 1 ; unsigned char ip_summed : 2 ; unsigned char ooo_okay : 1 ; unsigned char l4_hash : 1 ; unsigned char sw_hash : 1 ; unsigned char wifi_acked_valid : 1 ; unsigned char wifi_acked : 1 ; unsigned char no_fcs : 1 ; unsigned char encapsulation : 1 ; unsigned char encap_hdr_csum : 1 ; unsigned char csum_valid : 1 ; unsigned char csum_complete_sw : 1 ; unsigned char csum_level : 2 ; unsigned char csum_bad : 1 ; unsigned char ndisc_nodetype : 2 ; unsigned char ipvs_property : 1 ; unsigned char inner_protocol_type : 1 ; unsigned char remcsum_offload : 1 ; __u16 tc_index ; __u16 tc_verd ; union __anonunion____missing_field_name_281 __annonCompField98 ; __u32 priority ; int skb_iif ; __u32 hash ; __be16 vlan_proto ; __u16 vlan_tci ; union __anonunion____missing_field_name_283 __annonCompField99 ; __u32 secmark ; union __anonunion____missing_field_name_284 __annonCompField100 ; union __anonunion____missing_field_name_285 __annonCompField101 ; __u16 inner_transport_header ; __u16 inner_network_header ; __u16 inner_mac_header ; __be16 protocol ; __u16 transport_header ; __u16 network_header ; __u16 mac_header ; __u32 headers_end[0U] ; sk_buff_data_t tail ; sk_buff_data_t end ; unsigned char *head ; unsigned char *data ; unsigned int truesize ; atomic_t users ; }; struct dst_entry; struct rtable; struct xfrm_policy; struct xfrm_state; struct request_sock; struct mnt_namespace; struct ipc_namespace; struct nsproxy { atomic_t count ; struct uts_namespace *uts_ns ; struct ipc_namespace *ipc_ns ; struct mnt_namespace *mnt_ns ; struct pid_namespace *pid_ns_for_children ; struct net *net_ns ; }; struct nlmsghdr { __u32 nlmsg_len ; __u16 nlmsg_type ; __u16 nlmsg_flags ; __u32 nlmsg_seq ; __u32 nlmsg_pid ; }; struct nlattr { __u16 nla_len ; __u16 nla_type ; }; struct netlink_callback { struct sk_buff *skb ; struct nlmsghdr const *nlh ; int (*dump)(struct sk_buff * , struct netlink_callback * ) ; int (*done)(struct netlink_callback * ) ; void *data ; struct module *module ; u16 family ; u16 min_dump_alloc ; unsigned int prev_seq ; unsigned int seq ; long args[6U] ; }; enum fc_port_type { FC_PORTTYPE_UNKNOWN = 0, FC_PORTTYPE_OTHER = 1, FC_PORTTYPE_NOTPRESENT = 2, FC_PORTTYPE_NPORT = 3, FC_PORTTYPE_NLPORT = 4, FC_PORTTYPE_LPORT = 5, FC_PORTTYPE_PTP = 6, FC_PORTTYPE_NPIV = 7 } ; enum fc_port_state { FC_PORTSTATE_UNKNOWN = 0, FC_PORTSTATE_NOTPRESENT = 1, FC_PORTSTATE_ONLINE = 2, FC_PORTSTATE_OFFLINE = 3, FC_PORTSTATE_BLOCKED = 4, FC_PORTSTATE_BYPASSED = 5, FC_PORTSTATE_DIAGNOSTICS = 6, FC_PORTSTATE_LINKDOWN = 7, FC_PORTSTATE_ERROR = 8, FC_PORTSTATE_LOOPBACK = 9, FC_PORTSTATE_DELETED = 10 } ; enum fc_vport_state { FC_VPORT_UNKNOWN = 0, FC_VPORT_ACTIVE = 1, FC_VPORT_DISABLED = 2, FC_VPORT_LINKDOWN = 3, FC_VPORT_INITIALIZING = 4, FC_VPORT_NO_FABRIC_SUPP = 5, FC_VPORT_NO_FABRIC_RSCS = 6, FC_VPORT_FABRIC_LOGOUT = 7, FC_VPORT_FABRIC_REJ_WWN = 8, FC_VPORT_FAILED = 9 } ; struct fc_vport { enum fc_vport_state vport_state ; enum fc_vport_state vport_last_state ; u64 node_name ; u64 port_name ; u32 roles ; u32 vport_id ; enum fc_port_type vport_type ; char symbolic_name[64U] ; void *dd_data ; struct Scsi_Host *shost ; unsigned int channel ; u32 number ; u8 flags ; struct list_head peers ; struct device dev ; struct work_struct vport_delete_work ; }; struct fc_rport { u32 maxframe_size ; u32 supported_classes ; u32 dev_loss_tmo ; u64 node_name ; u64 port_name ; u32 port_id ; u32 roles ; enum fc_port_state port_state ; u32 scsi_target_id ; u32 fast_io_fail_tmo ; void *dd_data ; unsigned int channel ; u32 number ; u8 flags ; struct list_head peers ; struct device dev ; struct delayed_work dev_loss_work ; struct work_struct scan_work ; struct delayed_work fail_io_work ; struct work_struct stgt_delete_work ; struct work_struct rport_delete_work ; struct request_queue *rqst_q ; }; struct fc_host_statistics { u64 seconds_since_last_reset ; u64 tx_frames ; u64 tx_words ; u64 rx_frames ; u64 rx_words ; u64 lip_count ; u64 nos_count ; u64 error_frames ; u64 dumped_frames ; u64 link_failure_count ; u64 loss_of_sync_count ; u64 loss_of_signal_count ; u64 prim_seq_protocol_err_count ; u64 invalid_tx_word_count ; u64 invalid_crc_count ; u64 fcp_input_requests ; u64 fcp_output_requests ; u64 fcp_control_requests ; u64 fcp_input_megabytes ; u64 fcp_output_megabytes ; u64 fcp_packet_alloc_failures ; u64 fcp_packet_aborts ; u64 fcp_frame_alloc_failures ; u64 fc_no_free_exch ; u64 fc_no_free_exch_xid ; u64 fc_xid_not_found ; u64 fc_xid_busy ; u64 fc_seq_not_found ; u64 fc_non_bls_resp ; }; enum fc_host_event_code { FCH_EVT_LIP = 1, FCH_EVT_LINKUP = 2, FCH_EVT_LINKDOWN = 3, FCH_EVT_LIPRESET = 4, FCH_EVT_RSCN = 5, FCH_EVT_ADAPTER_CHANGE = 259, FCH_EVT_PORT_UNKNOWN = 512, FCH_EVT_PORT_OFFLINE = 513, FCH_EVT_PORT_ONLINE = 514, FCH_EVT_PORT_FABRIC = 516, FCH_EVT_LINK_UNKNOWN = 1280, FCH_EVT_VENDOR_UNIQUE = 65535 } ; struct fc_bsg_buffer { unsigned int payload_len ; int sg_cnt ; struct scatterlist *sg_list ; }; struct fc_bsg_request; struct fc_bsg_reply; struct fc_bsg_job { struct Scsi_Host *shost ; struct fc_rport *rport ; struct device *dev ; struct request *req ; spinlock_t job_lock ; unsigned int state_flags ; unsigned int ref_cnt ; void (*job_done)(struct fc_bsg_job * ) ; struct fc_bsg_request *request ; struct fc_bsg_reply *reply ; unsigned int request_len ; unsigned int reply_len ; struct fc_bsg_buffer request_payload ; struct fc_bsg_buffer reply_payload ; void *dd_data ; }; struct fc_function_template { void (*get_rport_dev_loss_tmo)(struct fc_rport * ) ; void (*set_rport_dev_loss_tmo)(struct fc_rport * , u32 ) ; void (*get_starget_node_name)(struct scsi_target * ) ; void (*get_starget_port_name)(struct scsi_target * ) ; void (*get_starget_port_id)(struct scsi_target * ) ; void (*get_host_port_id)(struct Scsi_Host * ) ; void (*get_host_port_type)(struct Scsi_Host * ) ; void (*get_host_port_state)(struct Scsi_Host * ) ; void (*get_host_active_fc4s)(struct Scsi_Host * ) ; void (*get_host_speed)(struct Scsi_Host * ) ; void (*get_host_fabric_name)(struct Scsi_Host * ) ; void (*get_host_symbolic_name)(struct Scsi_Host * ) ; void (*set_host_system_hostname)(struct Scsi_Host * ) ; struct fc_host_statistics *(*get_fc_host_stats)(struct Scsi_Host * ) ; void (*reset_fc_host_stats)(struct Scsi_Host * ) ; int (*issue_fc_host_lip)(struct Scsi_Host * ) ; void (*dev_loss_tmo_callbk)(struct fc_rport * ) ; void (*terminate_rport_io)(struct fc_rport * ) ; void (*set_vport_symbolic_name)(struct fc_vport * ) ; int (*vport_create)(struct fc_vport * , bool ) ; int (*vport_disable)(struct fc_vport * , bool ) ; int (*vport_delete)(struct fc_vport * ) ; int (*tsk_mgmt_response)(struct Scsi_Host * , u64 , u64 , int ) ; int (*it_nexus_response)(struct Scsi_Host * , u64 , int ) ; int (*bsg_request)(struct fc_bsg_job * ) ; int (*bsg_timeout)(struct fc_bsg_job * ) ; u32 dd_fcrport_size ; u32 dd_fcvport_size ; u32 dd_bsg_size ; unsigned char show_rport_maxframe_size : 1 ; unsigned char show_rport_supported_classes : 1 ; unsigned char show_rport_dev_loss_tmo : 1 ; unsigned char show_starget_node_name : 1 ; unsigned char show_starget_port_name : 1 ; unsigned char show_starget_port_id : 1 ; unsigned char show_host_node_name : 1 ; unsigned char show_host_port_name : 1 ; unsigned char show_host_permanent_port_name : 1 ; unsigned char show_host_supported_classes : 1 ; unsigned char show_host_supported_fc4s : 1 ; unsigned char show_host_supported_speeds : 1 ; unsigned char show_host_maxframe_size : 1 ; unsigned char show_host_serial_number : 1 ; unsigned char show_host_manufacturer : 1 ; unsigned char show_host_model : 1 ; unsigned char show_host_model_description : 1 ; unsigned char show_host_hardware_version : 1 ; unsigned char show_host_driver_version : 1 ; unsigned char show_host_firmware_version : 1 ; unsigned char show_host_optionrom_version : 1 ; unsigned char show_host_port_id : 1 ; unsigned char show_host_port_type : 1 ; unsigned char show_host_port_state : 1 ; unsigned char show_host_active_fc4s : 1 ; unsigned char show_host_speed : 1 ; unsigned char show_host_fabric_name : 1 ; unsigned char show_host_symbolic_name : 1 ; unsigned char show_host_system_hostname : 1 ; unsigned char disable_target_scan : 1 ; }; struct fc_bsg_host_add_rport { uint8_t reserved ; uint8_t port_id[3U] ; }; struct fc_bsg_host_del_rport { uint8_t reserved ; uint8_t port_id[3U] ; }; struct fc_bsg_host_els { uint8_t command_code ; uint8_t port_id[3U] ; }; struct __anonstruct_rjt_data_286 { uint8_t action ; uint8_t reason_code ; uint8_t reason_explanation ; uint8_t vendor_unique ; }; struct fc_bsg_ctels_reply { uint32_t status ; struct __anonstruct_rjt_data_286 rjt_data ; }; struct fc_bsg_host_ct { uint8_t reserved ; uint8_t port_id[3U] ; uint32_t preamble_word0 ; uint32_t preamble_word1 ; uint32_t preamble_word2 ; }; struct fc_bsg_host_vendor { uint64_t vendor_id ; uint32_t vendor_cmd[0U] ; }; struct fc_bsg_host_vendor_reply { uint32_t vendor_rsp[0U] ; }; struct fc_bsg_rport_els { uint8_t els_code ; }; struct fc_bsg_rport_ct { uint32_t preamble_word0 ; uint32_t preamble_word1 ; uint32_t preamble_word2 ; }; union __anonunion_rqst_data_287 { struct fc_bsg_host_add_rport h_addrport ; struct fc_bsg_host_del_rport h_delrport ; struct fc_bsg_host_els h_els ; struct fc_bsg_host_ct h_ct ; struct fc_bsg_host_vendor h_vendor ; struct fc_bsg_rport_els r_els ; struct fc_bsg_rport_ct r_ct ; }; struct fc_bsg_request { uint32_t msgcode ; union __anonunion_rqst_data_287 rqst_data ; }; union __anonunion_reply_data_288 { struct fc_bsg_host_vendor_reply vendor_reply ; struct fc_bsg_ctels_reply ctels_reply ; }; struct fc_bsg_reply { uint32_t result ; uint32_t reply_payload_rcv_len ; union __anonunion_reply_data_288 reply_data ; }; struct qla82xx_legacy_intr_set { uint32_t int_vec_bit ; uint32_t tgt_status_reg ; uint32_t tgt_mask_reg ; uint32_t pci_int_reg ; }; struct device_reg_82xx { uint32_t req_q_out[64U] ; uint32_t rsp_q_in[64U] ; uint32_t rsp_q_out[64U] ; uint16_t mailbox_in[32U] ; uint16_t unused_1[32U] ; uint32_t hint ; uint16_t unused_2[62U] ; uint16_t mailbox_out[32U] ; uint32_t unused_3[48U] ; uint32_t host_status ; uint32_t host_int ; }; struct fcp_cmnd { struct scsi_lun lun ; uint8_t crn ; uint8_t task_attribute ; uint8_t task_management ; uint8_t additional_cdb_len ; uint8_t cdb[260U] ; }; struct dsd_dma { struct list_head list ; dma_addr_t dsd_list_dma ; void *dsd_addr ; }; struct ct6_dsd { uint16_t fcp_cmnd_len ; dma_addr_t fcp_cmnd_dma ; struct fcp_cmnd *fcp_cmnd ; int dsd_use_cnt ; struct list_head dsd_list ; }; struct qla8044_reset_template_hdr { uint16_t version ; uint16_t signature ; uint16_t size ; uint16_t entries ; uint16_t hdr_size ; uint16_t checksum ; uint16_t init_seq_offset ; uint16_t start_seq_offset ; }; struct qla8044_reset_template { int seq_index ; int seq_error ; int array_index ; uint32_t array[16U] ; uint8_t *buff ; uint8_t *stop_offset ; uint8_t *start_offset ; uint8_t *init_offset ; struct qla8044_reset_template_hdr *hdr ; uint8_t seq_end ; uint8_t template_end ; }; struct device_reg_24xx { uint32_t flash_addr ; uint32_t flash_data ; uint32_t ctrl_status ; uint32_t ictrl ; uint32_t istatus ; uint32_t unused_1[2U] ; uint32_t req_q_in ; uint32_t req_q_out ; uint32_t rsp_q_in ; uint32_t rsp_q_out ; uint32_t preq_q_in ; uint32_t preq_q_out ; uint32_t unused_2[2U] ; uint32_t atio_q_in ; uint32_t atio_q_out ; uint32_t host_status ; uint32_t hccr ; uint32_t gpiod ; uint32_t gpioe ; uint32_t iobase_addr ; uint32_t unused_3[10U] ; uint16_t mailbox0 ; uint16_t mailbox1 ; uint16_t mailbox2 ; uint16_t mailbox3 ; uint16_t mailbox4 ; uint16_t mailbox5 ; uint16_t mailbox6 ; uint16_t mailbox7 ; uint16_t mailbox8 ; uint16_t mailbox9 ; uint16_t mailbox10 ; uint16_t mailbox11 ; uint16_t mailbox12 ; uint16_t mailbox13 ; uint16_t mailbox14 ; uint16_t mailbox15 ; uint16_t mailbox16 ; uint16_t mailbox17 ; uint16_t mailbox18 ; uint16_t mailbox19 ; uint16_t mailbox20 ; uint16_t mailbox21 ; uint16_t mailbox22 ; uint16_t mailbox23 ; uint16_t mailbox24 ; uint16_t mailbox25 ; uint16_t mailbox26 ; uint16_t mailbox27 ; uint16_t mailbox28 ; uint16_t mailbox29 ; uint16_t mailbox30 ; uint16_t mailbox31 ; uint32_t iobase_window ; uint32_t iobase_c4 ; uint32_t iobase_c8 ; uint32_t unused_4_1[6U] ; uint32_t iobase_q ; uint32_t unused_5[2U] ; uint32_t iobase_select ; uint32_t unused_6[2U] ; uint32_t iobase_sdata ; }; struct qla_npiv_entry { uint16_t flags ; uint16_t vf_id ; uint8_t q_qos ; uint8_t f_qos ; uint16_t unused1 ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; }; struct ex_init_cb_81xx { uint16_t ex_version ; uint8_t prio_fcf_matching_flags ; uint8_t reserved_1[3U] ; uint16_t pri_fcf_vlan_id ; uint8_t pri_fcf_fabric_name[8U] ; uint16_t reserved_2[7U] ; uint8_t spma_mac_addr[6U] ; uint16_t reserved_3[14U] ; }; struct qla_fcp_prio_entry { uint16_t flags ; uint8_t tag ; uint8_t reserved ; uint32_t src_pid ; uint32_t dst_pid ; uint16_t lun_beg ; uint16_t lun_end ; uint8_t src_wwpn[8U] ; uint8_t dst_wwpn[8U] ; }; struct qla_fcp_prio_cfg { uint8_t signature[4U] ; uint16_t version ; uint16_t length ; uint16_t checksum ; uint16_t num_entries ; uint16_t size_of_entry ; uint8_t attributes ; uint8_t reserved ; struct qla_fcp_prio_entry entry[1U] ; }; struct req_que; struct srb_cmd { struct scsi_cmnd *cmd ; uint32_t request_sense_length ; uint32_t fw_sense_length ; uint8_t *request_sense_ptr ; void *ctx ; }; struct __anonstruct_logio_312 { uint16_t flags ; uint16_t data[2U] ; }; struct __anonstruct_tmf_313 { uint64_t lun ; uint32_t flags ; uint32_t data ; struct completion comp ; __le16 comp_status ; }; struct __anonstruct_fxiocb_314 { uint8_t flags ; uint32_t req_len ; uint32_t rsp_len ; void *req_addr ; void *rsp_addr ; dma_addr_t req_dma_handle ; dma_addr_t rsp_dma_handle ; __le32 adapter_id ; __le32 adapter_id_hi ; __le16 req_func_type ; __le32 req_data ; __le32 req_data_extra ; __le32 result ; __le32 seq_number ; __le16 fw_flags ; struct completion fxiocb_comp ; __le32 reserved_0 ; uint8_t reserved_1 ; }; struct __anonstruct_abt_315 { uint32_t cmd_hndl ; __le16 comp_status ; struct completion comp ; }; union __anonunion_u_311 { struct __anonstruct_logio_312 logio ; struct __anonstruct_tmf_313 tmf ; struct __anonstruct_fxiocb_314 fxiocb ; struct __anonstruct_abt_315 abt ; }; struct srb_iocb { union __anonunion_u_311 u ; struct timer_list timer ; void (*timeout)(void * ) ; }; union __anonunion_u_316 { struct srb_iocb iocb_cmd ; struct fc_bsg_job *bsg_job ; struct srb_cmd scmd ; }; struct srb { atomic_t ref_count ; struct fc_port *fcport ; uint32_t handle ; uint16_t flags ; uint16_t type ; char *name ; int iocbs ; union __anonunion_u_316 u ; void (*done)(void * , void * , int ) ; void (*free)(void * , void * ) ; }; typedef struct srb srb_t; struct __anonstruct_isp2100_318 { uint16_t mailbox0 ; uint16_t mailbox1 ; uint16_t mailbox2 ; uint16_t mailbox3 ; uint16_t mailbox4 ; uint16_t mailbox5 ; uint16_t mailbox6 ; uint16_t mailbox7 ; uint16_t unused_2[59U] ; }; struct __anonstruct_isp2300_319 { uint16_t req_q_in ; uint16_t req_q_out ; uint16_t rsp_q_in ; uint16_t rsp_q_out ; uint32_t host_status ; uint16_t host_semaphore ; uint16_t unused_3[17U] ; uint16_t mailbox0 ; uint16_t mailbox1 ; uint16_t mailbox2 ; uint16_t mailbox3 ; uint16_t mailbox4 ; uint16_t mailbox5 ; uint16_t mailbox6 ; uint16_t mailbox7 ; uint16_t mailbox8 ; uint16_t mailbox9 ; uint16_t mailbox10 ; uint16_t mailbox11 ; uint16_t mailbox12 ; uint16_t mailbox13 ; uint16_t mailbox14 ; uint16_t mailbox15 ; uint16_t mailbox16 ; uint16_t mailbox17 ; uint16_t mailbox18 ; uint16_t mailbox19 ; uint16_t mailbox20 ; uint16_t mailbox21 ; uint16_t mailbox22 ; uint16_t mailbox23 ; uint16_t mailbox24 ; uint16_t mailbox25 ; uint16_t mailbox26 ; uint16_t mailbox27 ; uint16_t mailbox28 ; uint16_t mailbox29 ; uint16_t mailbox30 ; uint16_t mailbox31 ; uint16_t fb_cmd ; uint16_t unused_4[10U] ; }; union __anonunion_u_317 { struct __anonstruct_isp2100_318 isp2100 ; struct __anonstruct_isp2300_319 isp2300 ; }; struct __anonstruct_isp2200_321 { uint16_t unused_10[8U] ; uint16_t mailbox8 ; uint16_t mailbox9 ; uint16_t mailbox10 ; uint16_t mailbox11 ; uint16_t mailbox12 ; uint16_t mailbox13 ; uint16_t mailbox14 ; uint16_t mailbox15 ; uint16_t mailbox16 ; uint16_t mailbox17 ; uint16_t mailbox18 ; uint16_t mailbox19 ; uint16_t mailbox20 ; uint16_t mailbox21 ; uint16_t mailbox22 ; uint16_t mailbox23 ; }; union __anonunion_u_end_320 { struct __anonstruct_isp2200_321 isp2200 ; }; struct device_reg_2xxx { uint16_t flash_address ; uint16_t flash_data ; uint16_t unused_1[1U] ; uint16_t ctrl_status ; uint16_t ictrl ; uint16_t istatus ; uint16_t semaphore ; uint16_t nvram ; union __anonunion_u_317 u ; uint16_t fpm_diag_config ; uint16_t unused_5[4U] ; uint16_t risc_hw ; uint16_t unused_5_1 ; uint16_t pcr ; uint16_t unused_6[5U] ; uint16_t mctr ; uint16_t unused_7[3U] ; uint16_t fb_cmd_2100 ; uint16_t unused_8[3U] ; uint16_t hccr ; uint16_t unused_9[5U] ; uint16_t gpiod ; uint16_t gpioe ; union __anonunion_u_end_320 u_end ; }; struct device_reg_25xxmq { uint32_t req_q_in ; uint32_t req_q_out ; uint32_t rsp_q_in ; uint32_t rsp_q_out ; uint32_t atio_q_in ; uint32_t atio_q_out ; }; struct device_reg_fx00 { uint32_t mailbox0 ; uint32_t mailbox1 ; uint32_t mailbox2 ; uint32_t mailbox3 ; uint32_t mailbox4 ; uint32_t mailbox5 ; uint32_t mailbox6 ; uint32_t mailbox7 ; uint32_t mailbox8 ; uint32_t mailbox9 ; uint32_t mailbox10 ; uint32_t mailbox11 ; uint32_t mailbox12 ; uint32_t mailbox13 ; uint32_t mailbox14 ; uint32_t mailbox15 ; uint32_t mailbox16 ; uint32_t mailbox17 ; uint32_t mailbox18 ; uint32_t mailbox19 ; uint32_t mailbox20 ; uint32_t mailbox21 ; uint32_t mailbox22 ; uint32_t mailbox23 ; uint32_t mailbox24 ; uint32_t mailbox25 ; uint32_t mailbox26 ; uint32_t mailbox27 ; uint32_t mailbox28 ; uint32_t mailbox29 ; uint32_t mailbox30 ; uint32_t mailbox31 ; uint32_t aenmailbox0 ; uint32_t aenmailbox1 ; uint32_t aenmailbox2 ; uint32_t aenmailbox3 ; uint32_t aenmailbox4 ; uint32_t aenmailbox5 ; uint32_t aenmailbox6 ; uint32_t aenmailbox7 ; uint32_t req_q_in ; uint32_t req_q_out ; uint32_t rsp_q_in ; uint32_t rsp_q_out ; uint32_t initval0 ; uint32_t initval1 ; uint32_t initval2 ; uint32_t initval3 ; uint32_t initval4 ; uint32_t initval5 ; uint32_t initval6 ; uint32_t initval7 ; uint32_t fwheartbeat ; uint32_t pseudoaen ; }; union __anonunion_device_reg_t_322 { struct device_reg_2xxx isp ; struct device_reg_24xx isp24 ; struct device_reg_25xxmq isp25mq ; struct device_reg_82xx isp82 ; struct device_reg_fx00 ispfx00 ; }; typedef union __anonunion_device_reg_t_322 device_reg_t; struct __anonstruct_mbx_cmd_t_323 { uint32_t out_mb ; uint32_t in_mb ; uint16_t mb[32U] ; long buf_size ; void *bufp ; uint32_t tov ; uint8_t flags ; }; typedef struct __anonstruct_mbx_cmd_t_323 mbx_cmd_t; struct mbx_cmd_32 { uint32_t out_mb ; uint32_t in_mb ; uint32_t mb[32U] ; long buf_size ; void *bufp ; uint32_t tov ; uint8_t flags ; }; struct __anonstruct_init_cb_t_325 { uint8_t version ; uint8_t reserved_1 ; uint8_t firmware_options[2U] ; uint16_t frame_payload_size ; uint16_t max_iocb_allocation ; uint16_t execution_throttle ; uint8_t retry_count ; uint8_t retry_delay ; uint8_t port_name[8U] ; uint16_t hard_address ; uint8_t inquiry_data ; uint8_t login_timeout ; uint8_t node_name[8U] ; uint16_t request_q_outpointer ; uint16_t response_q_inpointer ; uint16_t request_q_length ; uint16_t response_q_length ; uint32_t request_q_address[2U] ; uint32_t response_q_address[2U] ; uint16_t lun_enables ; uint8_t command_resource_count ; uint8_t immediate_notify_resource_count ; uint16_t timeout ; uint8_t reserved_2[2U] ; uint8_t add_firmware_options[2U] ; uint8_t response_accumulation_timer ; uint8_t interrupt_delay_timer ; uint8_t special_options[2U] ; uint8_t reserved_3[26U] ; }; typedef struct __anonstruct_init_cb_t_325 init_cb_t; struct __anonstruct_response_t_327 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint8_t data[52U] ; uint32_t signature ; }; typedef struct __anonstruct_response_t_327 response_t; struct atio { uint8_t entry_type ; uint8_t entry_count ; uint8_t data[58U] ; uint32_t signature ; }; struct __anonstruct_id_329 { uint8_t reserved ; uint8_t standard ; }; union __anonunion_target_id_t_328 { uint16_t extended ; struct __anonstruct_id_329 id ; }; typedef union __anonunion_target_id_t_328 target_id_t; struct __anonstruct_cmd_entry_t_330 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; target_id_t target ; uint16_t lun ; uint16_t control_flags ; uint16_t reserved_1 ; uint16_t timeout ; uint16_t dseg_count ; uint8_t scsi_cdb[16U] ; uint32_t byte_count ; uint32_t dseg_0_address ; uint32_t dseg_0_length ; uint32_t dseg_1_address ; uint32_t dseg_1_length ; uint32_t dseg_2_address ; uint32_t dseg_2_length ; }; typedef struct __anonstruct_cmd_entry_t_330 cmd_entry_t; struct __anonstruct_request_t_332 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; target_id_t target ; uint16_t lun ; uint16_t control_flags ; uint16_t reserved_1 ; uint16_t timeout ; uint16_t dseg_count ; uint8_t scsi_cdb[16U] ; uint32_t byte_count ; uint32_t dseg_0_address[2U] ; uint32_t dseg_0_length ; uint32_t dseg_1_address[2U] ; uint32_t dseg_1_length ; }; typedef struct __anonstruct_request_t_332 request_t; struct __anonstruct_nobundling_336 { uint32_t reserved_1 ; uint16_t reserved_2 ; uint16_t reserved_3 ; uint32_t reserved_4 ; uint32_t data_address[2U] ; uint32_t data_length ; uint32_t reserved_5[2U] ; uint32_t reserved_6 ; }; struct __anonstruct_bundling_337 { __le32 dif_byte_count ; uint16_t reserved_1 ; __le16 dseg_count ; uint32_t reserved_2 ; uint32_t data_address[2U] ; uint32_t data_length ; uint32_t dif_address[2U] ; uint32_t dif_length ; }; union __anonunion_u_335 { struct __anonstruct_nobundling_336 nobundling ; struct __anonstruct_bundling_337 bundling ; }; struct crc_context { uint32_t handle ; __le32 ref_tag ; __le16 app_tag ; uint8_t ref_tag_mask[4U] ; uint8_t app_tag_mask[2U] ; __le16 guard_seed ; __le16 prot_opts ; __le16 blk_size ; uint16_t runt_blk_guard ; __le32 byte_count ; union __anonunion_u_335 u ; struct fcp_cmnd fcp_cmnd ; dma_addr_t crc_ctx_dma ; struct list_head dsd_list ; }; struct __anonstruct_ms_iocb_entry_t_343 { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t handle1 ; target_id_t loop_id ; uint16_t status ; uint16_t control_flags ; uint16_t reserved2 ; uint16_t timeout ; uint16_t cmd_dsd_count ; uint16_t total_dsd_count ; uint8_t type ; uint8_t r_ctl ; uint16_t rx_id ; uint16_t reserved3 ; uint32_t handle2 ; uint32_t rsp_bytecount ; uint32_t req_bytecount ; uint32_t dseg_req_address[2U] ; uint32_t dseg_req_length ; uint32_t dseg_rsp_address[2U] ; uint32_t dseg_rsp_length ; }; typedef struct __anonstruct_ms_iocb_entry_t_343 ms_iocb_entry_t; struct __anonstruct_b_345 { uint8_t al_pa ; uint8_t area ; uint8_t domain ; uint8_t rsvd_1 ; }; union __anonunion_port_id_t_344 { unsigned int b24 : 24 ; struct __anonstruct_b_345 b ; }; typedef union __anonunion_port_id_t_344 port_id_t; enum ldv_30574 { FCT_UNKNOWN = 0, FCT_RSCN = 1, FCT_SWITCH = 2, FCT_BROADCAST = 3, FCT_INITIATOR = 4, FCT_TARGET = 5 } ; typedef enum ldv_30574 fc_port_type_t; struct fc_port { struct list_head list ; struct scsi_qla_host *vha ; uint8_t node_name[8U] ; uint8_t port_name[8U] ; port_id_t d_id ; uint16_t loop_id ; uint16_t old_loop_id ; uint16_t tgt_id ; uint16_t old_tgt_id ; uint8_t fcp_prio ; uint8_t fabric_port_name[8U] ; uint16_t fp_speed ; fc_port_type_t port_type ; atomic_t state ; uint32_t flags ; int login_retry ; struct fc_rport *rport ; struct fc_rport *drport ; u32 supported_classes ; uint8_t fc4_type ; uint8_t scan_state ; unsigned long last_queue_full ; unsigned long last_ramp_up ; uint16_t port_id ; unsigned long retry_delay_timestamp ; }; typedef struct fc_port fc_port_t; struct mr_data_fx00 { uint8_t symbolic_name[64U] ; uint8_t serial_num[32U] ; uint8_t hw_version[16U] ; uint8_t fw_version[16U] ; uint8_t uboot_version[16U] ; uint8_t fru_serial_num[32U] ; fc_port_t fcport ; uint8_t fw_hbt_en ; uint8_t fw_hbt_cnt ; uint8_t fw_hbt_miss_cnt ; uint32_t old_fw_hbt_cnt ; uint16_t fw_reset_timer_tick ; uint8_t fw_reset_timer_exp ; uint16_t fw_critemp_timer_tick ; uint32_t old_aenmbx0_state ; uint32_t critical_temperature ; bool extended_io_enabled ; bool host_info_resend ; uint8_t hinfo_resend_timer_tick ; }; union __anonunion_a_347 { uint8_t node_name[8U] ; uint8_t manufacturer[64U] ; uint8_t serial_num[32U] ; uint8_t model[17U] ; uint8_t model_desc[80U] ; uint8_t hw_version[32U] ; uint8_t driver_version[32U] ; uint8_t orom_version[16U] ; uint8_t fw_version[32U] ; uint8_t os_version[128U] ; uint32_t max_ct_len ; }; struct ct_fdmi_hba_attr { uint16_t type ; uint16_t len ; union __anonunion_a_347 a ; }; struct ct_fdmi_hba_attributes { uint32_t count ; struct ct_fdmi_hba_attr entry[9U] ; }; union __anonunion_a_348 { uint8_t node_name[8U] ; uint8_t manufacturer[64U] ; uint8_t serial_num[32U] ; uint8_t model[17U] ; uint8_t model_desc[80U] ; uint8_t hw_version[16U] ; uint8_t driver_version[32U] ; uint8_t orom_version[16U] ; uint8_t fw_version[32U] ; uint8_t os_version[128U] ; uint32_t max_ct_len ; uint8_t sym_name[256U] ; uint32_t vendor_id ; uint32_t num_ports ; uint8_t fabric_name[8U] ; uint8_t bios_name[32U] ; uint8_t vendor_indentifer[8U] ; }; struct ct_fdmiv2_hba_attr { uint16_t type ; uint16_t len ; union __anonunion_a_348 a ; }; struct ct_fdmiv2_hba_attributes { uint32_t count ; struct ct_fdmiv2_hba_attr entry[17U] ; }; union __anonunion_a_349 { uint8_t fc4_types[32U] ; uint32_t sup_speed ; uint32_t cur_speed ; uint32_t max_frame_size ; uint8_t os_dev_name[32U] ; uint8_t host_name[256U] ; uint8_t node_name[8U] ; uint8_t port_name[8U] ; uint8_t port_sym_name[128U] ; uint32_t port_type ; uint32_t port_supported_cos ; uint8_t fabric_name[8U] ; uint8_t port_fc4_type[32U] ; uint32_t port_state ; uint32_t num_ports ; uint32_t port_id ; }; struct ct_fdmiv2_port_attr { uint16_t type ; uint16_t len ; union __anonunion_a_349 a ; }; struct ct_fdmiv2_port_attributes { uint32_t count ; struct ct_fdmiv2_port_attr entry[16U] ; }; union __anonunion_a_350 { uint8_t fc4_types[32U] ; uint32_t sup_speed ; uint32_t cur_speed ; uint32_t max_frame_size ; uint8_t os_dev_name[32U] ; uint8_t host_name[256U] ; }; struct ct_fdmi_port_attr { uint16_t type ; uint16_t len ; union __anonunion_a_350 a ; }; struct ct_fdmi_port_attributes { uint32_t count ; struct ct_fdmi_port_attr entry[6U] ; }; struct ct_cmd_hdr { uint8_t revision ; uint8_t in_id[3U] ; uint8_t gs_type ; uint8_t gs_subtype ; uint8_t options ; uint8_t reserved ; }; struct __anonstruct_port_id_352 { uint8_t reserved ; uint8_t port_id[3U] ; }; struct __anonstruct_gid_pt_353 { uint8_t port_type ; uint8_t domain ; uint8_t area ; uint8_t reserved ; }; struct __anonstruct_rft_id_354 { uint8_t reserved ; uint8_t port_id[3U] ; uint8_t fc4_types[32U] ; }; struct __anonstruct_rff_id_355 { uint8_t reserved ; uint8_t port_id[3U] ; uint16_t reserved2 ; uint8_t fc4_feature ; uint8_t fc4_type ; }; struct __anonstruct_rnn_id_356 { uint8_t reserved ; uint8_t port_id[3U] ; uint8_t node_name[8U] ; }; struct __anonstruct_rsnn_nn_357 { uint8_t node_name[8U] ; uint8_t name_len ; uint8_t sym_node_name[255U] ; }; struct __anonstruct_ghat_358 { uint8_t hba_indentifier[8U] ; }; struct __anonstruct_rhba_359 { uint8_t hba_identifier[8U] ; uint32_t entry_count ; uint8_t port_name[8U] ; struct ct_fdmi_hba_attributes attrs ; }; struct __anonstruct_rhba2_360 { uint8_t hba_identifier[8U] ; uint32_t entry_count ; uint8_t port_name[8U] ; struct ct_fdmiv2_hba_attributes attrs ; }; struct __anonstruct_rhat_361 { uint8_t hba_identifier[8U] ; struct ct_fdmi_hba_attributes attrs ; }; struct __anonstruct_rpa_362 { uint8_t port_name[8U] ; struct ct_fdmi_port_attributes attrs ; }; struct __anonstruct_rpa2_363 { uint8_t port_name[8U] ; struct ct_fdmiv2_port_attributes attrs ; }; struct __anonstruct_dhba_364 { uint8_t port_name[8U] ; }; struct __anonstruct_dhat_365 { uint8_t port_name[8U] ; }; struct __anonstruct_dprt_366 { uint8_t port_name[8U] ; }; struct __anonstruct_dpa_367 { uint8_t port_name[8U] ; }; struct __anonstruct_gpsc_368 { uint8_t port_name[8U] ; }; struct __anonstruct_gff_id_369 { uint8_t reserved ; uint8_t port_name[3U] ; }; union __anonunion_req_351 { struct __anonstruct_port_id_352 port_id ; struct __anonstruct_gid_pt_353 gid_pt ; struct __anonstruct_rft_id_354 rft_id ; struct __anonstruct_rff_id_355 rff_id ; struct __anonstruct_rnn_id_356 rnn_id ; struct __anonstruct_rsnn_nn_357 rsnn_nn ; struct __anonstruct_ghat_358 ghat ; struct __anonstruct_rhba_359 rhba ; struct __anonstruct_rhba2_360 rhba2 ; struct __anonstruct_rhat_361 rhat ; struct __anonstruct_rpa_362 rpa ; struct __anonstruct_rpa2_363 rpa2 ; struct __anonstruct_dhba_364 dhba ; struct __anonstruct_dhat_365 dhat ; struct __anonstruct_dprt_366 dprt ; struct __anonstruct_dpa_367 dpa ; struct __anonstruct_gpsc_368 gpsc ; struct __anonstruct_gff_id_369 gff_id ; }; struct ct_sns_req { struct ct_cmd_hdr header ; uint16_t command ; uint16_t max_rsp_size ; uint8_t fragment_id ; uint8_t reserved[3U] ; union __anonunion_req_351 req ; }; struct ct_rsp_hdr { struct ct_cmd_hdr header ; uint16_t response ; uint16_t residual ; uint8_t fragment_id ; uint8_t reason_code ; uint8_t explanation_code ; uint8_t vendor_unique ; }; struct ct_sns_gid_pt_data { uint8_t control_byte ; uint8_t port_id[3U] ; }; struct __anonstruct_ga_nxt_371 { uint8_t port_type ; uint8_t port_id[3U] ; uint8_t port_name[8U] ; uint8_t sym_port_name_len ; uint8_t sym_port_name[255U] ; uint8_t node_name[8U] ; uint8_t sym_node_name_len ; uint8_t sym_node_name[255U] ; uint8_t init_proc_assoc[8U] ; uint8_t node_ip_addr[16U] ; uint8_t class_of_service[4U] ; uint8_t fc4_types[32U] ; uint8_t ip_address[16U] ; uint8_t fabric_port_name[8U] ; uint8_t reserved ; uint8_t hard_address[3U] ; }; struct __anonstruct_gid_pt_372 { struct ct_sns_gid_pt_data entries[2048U] ; }; struct __anonstruct_gpn_id_373 { uint8_t port_name[8U] ; }; struct __anonstruct_gnn_id_374 { uint8_t node_name[8U] ; }; struct __anonstruct_gft_id_375 { uint8_t fc4_types[32U] ; }; struct __anonstruct_ghat_376 { uint32_t entry_count ; uint8_t port_name[8U] ; struct ct_fdmi_hba_attributes attrs ; }; struct __anonstruct_gfpn_id_377 { uint8_t port_name[8U] ; }; struct __anonstruct_gpsc_378 { uint16_t speeds ; uint16_t speed ; }; struct __anonstruct_gff_id_379 { uint8_t fc4_features[128U] ; }; union __anonunion_rsp_370 { struct __anonstruct_ga_nxt_371 ga_nxt ; struct __anonstruct_gid_pt_372 gid_pt ; struct __anonstruct_gpn_id_373 gpn_id ; struct __anonstruct_gnn_id_374 gnn_id ; struct __anonstruct_gft_id_375 gft_id ; struct __anonstruct_ghat_376 ghat ; struct __anonstruct_gfpn_id_377 gfpn_id ; struct __anonstruct_gpsc_378 gpsc ; struct __anonstruct_gff_id_379 gff_id ; }; struct ct_sns_rsp { struct ct_rsp_hdr header ; union __anonunion_rsp_370 rsp ; }; union __anonunion_p_380 { struct ct_sns_req req ; struct ct_sns_rsp rsp ; }; struct ct_sns_pkt { union __anonunion_p_380 p ; }; struct __anonstruct_cmd_382 { uint16_t buffer_length ; uint16_t reserved_1 ; uint32_t buffer_address[2U] ; uint16_t subcommand_length ; uint16_t reserved_2 ; uint16_t subcommand ; uint16_t size ; uint32_t reserved_3 ; uint8_t param[36U] ; }; union __anonunion_p_381 { struct __anonstruct_cmd_382 cmd ; uint8_t rft_data[16U] ; uint8_t rnn_data[16U] ; uint8_t gan_data[636U] ; uint8_t gid_data[2064U] ; uint8_t gpn_data[24U] ; uint8_t gnn_data[24U] ; }; struct sns_cmd_pkt { union __anonunion_p_381 p ; }; struct fw_blob { char *name ; uint32_t segs[4U] ; struct firmware const *fw ; }; struct gid_list_info { uint8_t al_pa ; uint8_t area ; uint8_t domain ; uint8_t loop_id_2100 ; uint16_t loop_id ; uint16_t reserved_1 ; }; struct rsp_que; struct isp_operations { int (*pci_config)(struct scsi_qla_host * ) ; void (*reset_chip)(struct scsi_qla_host * ) ; int (*chip_diag)(struct scsi_qla_host * ) ; void (*config_rings)(struct scsi_qla_host * ) ; void (*reset_adapter)(struct scsi_qla_host * ) ; int (*nvram_config)(struct scsi_qla_host * ) ; void (*update_fw_options)(struct scsi_qla_host * ) ; int (*load_risc)(struct scsi_qla_host * , uint32_t * ) ; char *(*pci_info_str)(struct scsi_qla_host * , char * ) ; char *(*fw_version_str)(struct scsi_qla_host * , char * , size_t ) ; irqreturn_t (*intr_handler)(int , void * ) ; void (*enable_intrs)(struct qla_hw_data * ) ; void (*disable_intrs)(struct qla_hw_data * ) ; int (*abort_command)(srb_t * ) ; int (*target_reset)(struct fc_port * , uint64_t , int ) ; int (*lun_reset)(struct fc_port * , uint64_t , int ) ; int (*fabric_login)(struct scsi_qla_host * , uint16_t , uint8_t , uint8_t , uint8_t , uint16_t * , uint8_t ) ; int (*fabric_logout)(struct scsi_qla_host * , uint16_t , uint8_t , uint8_t , uint8_t ) ; uint16_t (*calc_req_entries)(uint16_t ) ; void (*build_iocbs)(srb_t * , cmd_entry_t * , uint16_t ) ; void *(*prep_ms_iocb)(struct scsi_qla_host * , uint32_t , uint32_t ) ; void *(*prep_ms_fdmi_iocb)(struct scsi_qla_host * , uint32_t , uint32_t ) ; uint8_t *(*read_nvram)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ) ; int (*write_nvram)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ) ; void (*fw_dump)(struct scsi_qla_host * , int ) ; int (*beacon_on)(struct scsi_qla_host * ) ; int (*beacon_off)(struct scsi_qla_host * ) ; void (*beacon_blink)(struct scsi_qla_host * ) ; uint8_t *(*read_optrom)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ) ; int (*write_optrom)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ) ; int (*get_flash_version)(struct scsi_qla_host * , void * ) ; int (*start_scsi)(srb_t * ) ; int (*abort_isp)(struct scsi_qla_host * ) ; int (*iospace_config)(struct qla_hw_data * ) ; int (*initialize_adapter)(struct scsi_qla_host * ) ; }; struct qla_msix_entry { int have_irq ; uint32_t vector ; uint16_t entry ; struct rsp_que *rsp ; }; enum qla_work_type { QLA_EVT_AEN = 0, QLA_EVT_IDC_ACK = 1, QLA_EVT_ASYNC_LOGIN = 2, QLA_EVT_ASYNC_LOGIN_DONE = 3, QLA_EVT_ASYNC_LOGOUT = 4, QLA_EVT_ASYNC_LOGOUT_DONE = 5, QLA_EVT_ASYNC_ADISC = 6, QLA_EVT_ASYNC_ADISC_DONE = 7, QLA_EVT_UEVENT = 8, QLA_EVT_AENFX = 9 } ; struct __anonstruct_aen_384 { enum fc_host_event_code code ; u32 data ; }; struct __anonstruct_idc_ack_385 { uint16_t mb[7U] ; }; struct __anonstruct_logio_386 { struct fc_port *fcport ; u16 data[2U] ; }; struct __anonstruct_uevent_387 { u32 code ; }; struct __anonstruct_aenfx_388 { uint32_t evtcode ; uint32_t mbx[8U] ; uint32_t count ; }; struct __anonstruct_iosb_389 { srb_t *sp ; }; union __anonunion_u_383 { struct __anonstruct_aen_384 aen ; struct __anonstruct_idc_ack_385 idc_ack ; struct __anonstruct_logio_386 logio ; struct __anonstruct_uevent_387 uevent ; struct __anonstruct_aenfx_388 aenfx ; struct __anonstruct_iosb_389 iosb ; }; struct qla_work_evt { struct list_head list ; enum qla_work_type type ; u32 flags ; union __anonunion_u_383 u ; }; struct qla_chip_state_84xx { struct list_head list ; struct kref kref ; void *bus ; spinlock_t access_lock ; struct mutex fw_update_mutex ; uint32_t fw_update ; uint32_t op_fw_version ; uint32_t op_fw_size ; uint32_t op_fw_seq_size ; uint32_t diag_fw_version ; uint32_t gold_fw_version ; }; struct qla_statistics { uint32_t total_isp_aborts ; uint64_t input_bytes ; uint64_t output_bytes ; uint64_t input_requests ; uint64_t output_requests ; uint32_t control_requests ; uint64_t jiffies_at_last_reset ; uint32_t stat_max_pend_cmds ; uint32_t stat_max_qfull_cmds_alloc ; uint32_t stat_max_qfull_cmds_dropped ; }; struct bidi_statistics { unsigned long long io_count ; unsigned long long transfer_bytes ; }; struct rsp_que { dma_addr_t dma ; response_t *ring ; response_t *ring_ptr ; uint32_t *rsp_q_in ; uint32_t *rsp_q_out ; uint16_t ring_index ; uint16_t out_ptr ; uint16_t *in_ptr ; uint16_t length ; uint16_t options ; uint16_t rid ; uint16_t id ; uint16_t vp_idx ; struct qla_hw_data *hw ; struct qla_msix_entry *msix ; struct req_que *req ; srb_t *status_srb ; struct work_struct q_work ; dma_addr_t dma_fx00 ; response_t *ring_fx00 ; uint16_t length_fx00 ; uint8_t rsp_pkt[64U] ; }; struct req_que { dma_addr_t dma ; request_t *ring ; request_t *ring_ptr ; uint32_t *req_q_in ; uint32_t *req_q_out ; uint16_t ring_index ; uint16_t in_ptr ; uint16_t *out_ptr ; uint16_t cnt ; uint16_t length ; uint16_t options ; uint16_t rid ; uint16_t id ; uint16_t qos ; uint16_t vp_idx ; struct rsp_que *rsp ; srb_t **outstanding_cmds ; uint32_t current_outstanding_cmd ; uint16_t num_outstanding_cmds ; int max_q_depth ; dma_addr_t dma_fx00 ; request_t *ring_fx00 ; uint16_t length_fx00 ; uint8_t req_pkt[64U] ; }; struct qlfc_fw { void *fw_buf ; dma_addr_t fw_dma ; uint32_t len ; }; struct qla_tgt; struct scsi_qlt_host { void *target_lport_ptr ; struct mutex tgt_mutex ; struct mutex tgt_host_action_mutex ; struct qla_tgt *qla_tgt ; }; struct qla_tgt_func_tmpl; struct qla_tgt_cmd; struct qla_tgt_vp_map; struct qlt_hw_data { unsigned char enable_class_2 : 1 ; unsigned char enable_explicit_conf : 1 ; unsigned char ini_mode_force_reverse : 1 ; unsigned char node_name_set : 1 ; dma_addr_t atio_dma ; struct atio *atio_ring ; struct atio *atio_ring_ptr ; uint16_t atio_ring_index ; uint16_t atio_q_length ; uint32_t *atio_q_in ; uint32_t *atio_q_out ; struct qla_tgt_func_tmpl *tgt_ops ; struct qla_tgt_cmd *cmds[1024U] ; uint16_t current_handle ; struct qla_tgt_vp_map *tgt_vp_map ; int saved_set ; uint16_t saved_exchange_count ; uint32_t saved_firmware_options_1 ; uint32_t saved_firmware_options_2 ; uint32_t saved_firmware_options_3 ; uint8_t saved_firmware_options[2U] ; uint8_t saved_add_firmware_options[2U] ; uint8_t tgt_node_name[8U] ; struct list_head q_full_list ; uint32_t num_pend_cmds ; uint32_t num_qfull_cmds_alloc ; uint32_t num_qfull_cmds_dropped ; spinlock_t q_full_lock ; uint32_t leak_exchg_thresh_hold ; }; struct __anonstruct_flags_390 { unsigned char mbox_int : 1 ; unsigned char mbox_busy : 1 ; unsigned char disable_risc_code_load : 1 ; unsigned char enable_64bit_addressing : 1 ; unsigned char enable_lip_reset : 1 ; unsigned char enable_target_reset : 1 ; unsigned char enable_lip_full_login : 1 ; unsigned char enable_led_scheme : 1 ; unsigned char msi_enabled : 1 ; unsigned char msix_enabled : 1 ; unsigned char disable_serdes : 1 ; unsigned char gpsc_supported : 1 ; unsigned char npiv_supported : 1 ; unsigned char pci_channel_io_perm_failure : 1 ; unsigned char fce_enabled : 1 ; unsigned char fac_supported : 1 ; unsigned char chip_reset_done : 1 ; unsigned char running_gold_fw : 1 ; unsigned char eeh_busy : 1 ; unsigned char cpu_affinity_enabled : 1 ; unsigned char disable_msix_handshake : 1 ; unsigned char fcp_prio_enabled : 1 ; unsigned char isp82xx_fw_hung : 1 ; unsigned char nic_core_hung : 1 ; unsigned char quiesce_owner : 1 ; unsigned char nic_core_reset_hdlr_active : 1 ; unsigned char nic_core_reset_owner : 1 ; unsigned char isp82xx_no_md_cap : 1 ; unsigned char host_shutting_down : 1 ; unsigned char idc_compl_status : 1 ; unsigned char mr_reset_hdlr_active : 1 ; unsigned char mr_intr_valid : 1 ; unsigned char fawwpn_enabled : 1 ; }; struct qla2xxx_fw_dump; struct qla_hw_data { struct pci_dev *pdev ; mempool_t *srb_mempool ; struct __anonstruct_flags_390 volatile flags ; spinlock_t hardware_lock ; int bars ; int mem_only ; device_reg_t *iobase ; resource_size_t pio_address ; dma_addr_t bar0_hdl ; void *cregbase ; dma_addr_t bar2_hdl ; uint32_t rqstq_intr_code ; uint32_t mbx_intr_code ; uint32_t req_que_len ; uint32_t rsp_que_len ; uint32_t req_que_off ; uint32_t rsp_que_off ; device_reg_t *mqiobase ; device_reg_t *msixbase ; uint16_t msix_count ; uint8_t mqenable ; struct req_que **req_q_map ; struct rsp_que **rsp_q_map ; unsigned long req_qid_map[4U] ; unsigned long rsp_qid_map[4U] ; uint8_t max_req_queues ; uint8_t max_rsp_queues ; struct qla_npiv_entry *npiv_info ; uint16_t nvram_npiv_size ; uint16_t switch_cap ; uint8_t port_no ; uint8_t loop_down_abort_time ; atomic_t loop_down_timer ; uint8_t link_down_timeout ; uint16_t max_loop_id ; uint16_t max_fibre_devices ; uint16_t fb_rev ; uint16_t min_external_loopid ; uint16_t link_data_rate ; uint8_t current_topology ; uint8_t prev_topology ; uint8_t operating_mode ; uint8_t interrupts_on ; uint32_t isp_abort_cnt ; uint32_t device_type ; uint8_t serial0 ; uint8_t serial1 ; uint8_t serial2 ; uint16_t nvram_size ; uint16_t nvram_base ; void *nvram ; uint16_t vpd_size ; uint16_t vpd_base ; void *vpd ; uint16_t loop_reset_delay ; uint8_t retry_count ; uint8_t login_timeout ; uint16_t r_a_tov ; int port_down_retry_count ; uint8_t mbx_count ; uint8_t aen_mbx_count ; uint32_t login_retry_count ; ms_iocb_entry_t *ms_iocb ; dma_addr_t ms_iocb_dma ; struct ct_sns_pkt *ct_sns ; dma_addr_t ct_sns_dma ; struct sns_cmd_pkt *sns_cmd ; dma_addr_t sns_cmd_dma ; void *sfp_data ; dma_addr_t sfp_data_dma ; void *xgmac_data ; dma_addr_t xgmac_data_dma ; void *dcbx_tlv ; dma_addr_t dcbx_tlv_dma ; struct task_struct *dpc_thread ; uint8_t dpc_active ; dma_addr_t gid_list_dma ; struct gid_list_info *gid_list ; int gid_list_info_size ; struct dma_pool *s_dma_pool ; dma_addr_t init_cb_dma ; init_cb_t *init_cb ; int init_cb_size ; dma_addr_t ex_init_cb_dma ; struct ex_init_cb_81xx *ex_init_cb ; void *async_pd ; dma_addr_t async_pd_dma ; void *swl ; uint16_t mailbox_out[32U] ; uint32_t mailbox_out32[32U] ; uint32_t aenmb[8U] ; mbx_cmd_t *mcp ; struct mbx_cmd_32 *mcp32 ; unsigned long mbx_cmd_flags ; struct mutex vport_lock ; spinlock_t vport_slock ; struct completion mbx_cmd_comp ; struct completion mbx_intr_comp ; struct completion dcbx_comp ; struct completion lb_portup_comp ; int notify_dcbx_comp ; int notify_lb_portup_comp ; struct mutex selflogin_lock ; uint16_t fw_major_version ; uint16_t fw_minor_version ; uint16_t fw_subminor_version ; uint16_t fw_attributes ; uint16_t fw_attributes_h ; uint16_t fw_attributes_ext[2U] ; uint32_t fw_memory_size ; uint32_t fw_transfer_size ; uint32_t fw_srisc_address ; uint16_t fw_xcb_count ; uint16_t fw_iocb_count ; uint32_t fw_shared_ram_start ; uint32_t fw_shared_ram_end ; uint16_t fw_options[16U] ; uint8_t fw_seriallink_options[4U] ; uint16_t fw_seriallink_options24[4U] ; uint8_t mpi_version[3U] ; uint32_t mpi_capabilities ; uint8_t phy_version[3U] ; void *fw_dump_template ; uint32_t fw_dump_template_len ; struct qla2xxx_fw_dump *fw_dump ; uint32_t fw_dump_len ; int fw_dumped ; unsigned long fw_dump_cap_flags ; int fw_dump_reading ; int prev_minidump_failed ; dma_addr_t eft_dma ; void *eft ; dma_addr_t mctp_dump_dma ; void *mctp_dump ; int mctp_dumped ; int mctp_dump_reading ; uint32_t chain_offset ; struct dentry *dfs_dir ; struct dentry *dfs_fce ; dma_addr_t fce_dma ; void *fce ; uint32_t fce_bufs ; uint16_t fce_mb[8U] ; uint64_t fce_wr ; uint64_t fce_rd ; struct mutex fce_mutex ; uint32_t pci_attr ; uint16_t chip_revision ; uint16_t product_id[4U] ; uint8_t model_number[17U] ; char model_desc[80U] ; uint8_t adapter_id[17U] ; char *optrom_buffer ; uint32_t optrom_size ; int optrom_state ; uint32_t optrom_region_start ; uint32_t optrom_region_size ; struct mutex optrom_mutex ; uint8_t bios_revision[2U] ; uint8_t efi_revision[2U] ; uint8_t fcode_revision[16U] ; uint32_t fw_revision[4U] ; uint32_t gold_fw_version[4U] ; uint32_t flash_conf_off ; uint32_t flash_data_off ; uint32_t nvram_conf_off ; uint32_t nvram_data_off ; uint32_t fdt_wrt_disable ; uint32_t fdt_wrt_enable ; uint32_t fdt_erase_cmd ; uint32_t fdt_block_size ; uint32_t fdt_unprotect_sec_cmd ; uint32_t fdt_protect_sec_cmd ; uint32_t fdt_wrt_sts_reg_cmd ; uint32_t flt_region_flt ; uint32_t flt_region_fdt ; uint32_t flt_region_boot ; uint32_t flt_region_fw ; uint32_t flt_region_vpd_nvram ; uint32_t flt_region_vpd ; uint32_t flt_region_nvram ; uint32_t flt_region_npiv_conf ; uint32_t flt_region_gold_fw ; uint32_t flt_region_fcp_prio ; uint32_t flt_region_bootload ; uint16_t beacon_blink_led ; uint8_t beacon_color_state ; uint16_t zio_mode ; uint16_t zio_timer ; struct qla_msix_entry *msix_entries ; struct list_head vp_list ; unsigned long vp_idx_map[4U] ; uint16_t num_vhosts ; uint16_t num_vsans ; uint16_t max_npiv_vports ; int cur_vport_count ; struct qla_chip_state_84xx *cs84xx ; struct qla_statistics qla_stats ; struct isp_operations *isp_ops ; struct workqueue_struct *wq ; struct qlfc_fw fw_buf ; struct qla_fcp_prio_cfg *fcp_prio_cfg ; struct dma_pool *dl_dma_pool ; struct dma_pool *fcp_cmnd_dma_pool ; mempool_t *ctx_mempool ; unsigned long nx_pcibase ; uint8_t *nxdb_rd_ptr ; unsigned long nxdb_wr_ptr ; uint32_t crb_win ; uint32_t curr_window ; uint32_t ddr_mn_window ; unsigned long mn_win_crb ; unsigned long ms_win_crb ; int qdr_sn_window ; uint32_t fcoe_dev_init_timeout ; uint32_t fcoe_reset_timeout ; rwlock_t hw_lock ; uint16_t portnum ; int link_width ; struct fw_blob *hablob ; struct qla82xx_legacy_intr_set nx_legacy_intr ; uint16_t gbl_dsd_inuse ; uint16_t gbl_dsd_avail ; struct list_head gbl_dsd_list ; uint8_t fw_type ; __le32 file_prd_off ; uint32_t md_template_size ; void *md_tmplt_hdr ; dma_addr_t md_tmplt_hdr_dma ; void *md_dump ; uint32_t md_dump_size ; void *loop_id_map ; uint32_t idc_audit_ts ; uint32_t idc_extend_tmo ; struct workqueue_struct *dpc_lp_wq ; struct work_struct idc_aen ; struct workqueue_struct *dpc_hp_wq ; struct work_struct nic_core_reset ; struct work_struct idc_state_handler ; struct work_struct nic_core_unrecoverable ; struct work_struct board_disable ; struct mr_data_fx00 mr ; uint32_t chip_reset ; struct qlt_hw_data tgt ; int allow_cna_fw_dump ; }; struct __anonstruct_flags_391 { unsigned char init_done : 1 ; unsigned char online : 1 ; unsigned char reset_active : 1 ; unsigned char management_server_logged_in : 1 ; unsigned char process_response_queue : 1 ; unsigned char difdix_supported : 1 ; unsigned char delete_progress : 1 ; unsigned char fw_tgt_reported : 1 ; }; struct scsi_qla_host { struct list_head list ; struct list_head vp_fcports ; struct list_head work_list ; spinlock_t work_lock ; struct Scsi_Host *host ; unsigned long host_no ; uint8_t host_str[16U] ; struct __anonstruct_flags_391 volatile flags ; atomic_t loop_state ; unsigned long dpc_flags ; unsigned long pci_flags ; uint32_t device_flags ; uint16_t loop_id ; uint16_t self_login_loop_id ; fc_port_t bidir_fcport ; port_id_t d_id ; uint8_t marker_needed ; uint16_t mgmt_svr_loop_id ; uint8_t loop_down_abort_time ; atomic_t loop_down_timer ; uint8_t link_down_timeout ; uint32_t timer_active ; struct timer_list timer ; uint8_t node_name[8U] ; uint8_t port_name[8U] ; uint8_t fabric_node_name[8U] ; uint16_t fcoe_vlan_id ; uint16_t fcoe_fcf_idx ; uint8_t fcoe_vn_port_mac[6U] ; uint32_t vp_abort_cnt ; struct fc_vport *fc_vport ; uint16_t vp_idx ; unsigned long vp_flags ; atomic_t vp_state ; uint16_t vp_err_state ; uint16_t vp_prev_err_state ; struct qla_hw_data *hw ; struct scsi_qlt_host vha_tgt ; struct req_que *req ; int fw_heartbeat_counter ; int seconds_since_last_heartbeat ; struct fc_host_statistics fc_host_stat ; struct qla_statistics qla_stats ; struct bidi_statistics bidi_stats ; atomic_t vref_count ; struct qla8044_reset_template reset_tmplt ; }; typedef struct scsi_qla_host scsi_qla_host_t; struct qla_tgt_vp_map { uint8_t idx ; scsi_qla_host_t *vha ; }; enum nexus_wait_type { WAIT_HOST = 0, WAIT_TARGET = 1, WAIT_LUN = 2 } ; struct qla2300_fw_dump { uint16_t hccr ; uint16_t pbiu_reg[8U] ; uint16_t risc_host_reg[8U] ; uint16_t mailbox_reg[32U] ; uint16_t resp_dma_reg[32U] ; uint16_t dma_reg[48U] ; uint16_t risc_hdw_reg[16U] ; uint16_t risc_gp0_reg[16U] ; uint16_t risc_gp1_reg[16U] ; uint16_t risc_gp2_reg[16U] ; uint16_t risc_gp3_reg[16U] ; uint16_t risc_gp4_reg[16U] ; uint16_t risc_gp5_reg[16U] ; uint16_t risc_gp6_reg[16U] ; uint16_t risc_gp7_reg[16U] ; uint16_t frame_buf_hdw_reg[64U] ; uint16_t fpm_b0_reg[64U] ; uint16_t fpm_b1_reg[64U] ; uint16_t risc_ram[63488U] ; uint16_t stack_ram[4096U] ; uint16_t data_ram[1U] ; }; struct qla2100_fw_dump { uint16_t hccr ; uint16_t pbiu_reg[8U] ; uint16_t mailbox_reg[32U] ; uint16_t dma_reg[48U] ; uint16_t risc_hdw_reg[16U] ; uint16_t risc_gp0_reg[16U] ; uint16_t risc_gp1_reg[16U] ; uint16_t risc_gp2_reg[16U] ; uint16_t risc_gp3_reg[16U] ; uint16_t risc_gp4_reg[16U] ; uint16_t risc_gp5_reg[16U] ; uint16_t risc_gp6_reg[16U] ; uint16_t risc_gp7_reg[16U] ; uint16_t frame_buf_hdw_reg[16U] ; uint16_t fpm_b0_reg[64U] ; uint16_t fpm_b1_reg[64U] ; uint16_t risc_ram[61440U] ; }; struct qla24xx_fw_dump { uint32_t host_status ; uint32_t host_reg[32U] ; uint32_t shadow_reg[7U] ; uint16_t mailbox_reg[32U] ; uint32_t xseq_gp_reg[128U] ; uint32_t xseq_0_reg[16U] ; uint32_t xseq_1_reg[16U] ; uint32_t rseq_gp_reg[128U] ; uint32_t rseq_0_reg[16U] ; uint32_t rseq_1_reg[16U] ; uint32_t rseq_2_reg[16U] ; uint32_t cmd_dma_reg[16U] ; uint32_t req0_dma_reg[15U] ; uint32_t resp0_dma_reg[15U] ; uint32_t req1_dma_reg[15U] ; uint32_t xmt0_dma_reg[32U] ; uint32_t xmt1_dma_reg[32U] ; uint32_t xmt2_dma_reg[32U] ; uint32_t xmt3_dma_reg[32U] ; uint32_t xmt4_dma_reg[32U] ; uint32_t xmt_data_dma_reg[16U] ; uint32_t rcvt0_data_dma_reg[32U] ; uint32_t rcvt1_data_dma_reg[32U] ; uint32_t risc_gp_reg[128U] ; uint32_t lmc_reg[112U] ; uint32_t fpm_hdw_reg[192U] ; uint32_t fb_hdw_reg[176U] ; uint32_t code_ram[8192U] ; uint32_t ext_mem[1U] ; }; struct qla25xx_fw_dump { uint32_t host_status ; uint32_t host_risc_reg[32U] ; uint32_t pcie_regs[4U] ; uint32_t host_reg[32U] ; uint32_t shadow_reg[11U] ; uint32_t risc_io_reg ; uint16_t mailbox_reg[32U] ; uint32_t xseq_gp_reg[128U] ; uint32_t xseq_0_reg[48U] ; uint32_t xseq_1_reg[16U] ; uint32_t rseq_gp_reg[128U] ; uint32_t rseq_0_reg[32U] ; uint32_t rseq_1_reg[16U] ; uint32_t rseq_2_reg[16U] ; uint32_t aseq_gp_reg[128U] ; uint32_t aseq_0_reg[32U] ; uint32_t aseq_1_reg[16U] ; uint32_t aseq_2_reg[16U] ; uint32_t cmd_dma_reg[16U] ; uint32_t req0_dma_reg[15U] ; uint32_t resp0_dma_reg[15U] ; uint32_t req1_dma_reg[15U] ; uint32_t xmt0_dma_reg[32U] ; uint32_t xmt1_dma_reg[32U] ; uint32_t xmt2_dma_reg[32U] ; uint32_t xmt3_dma_reg[32U] ; uint32_t xmt4_dma_reg[32U] ; uint32_t xmt_data_dma_reg[16U] ; uint32_t rcvt0_data_dma_reg[32U] ; uint32_t rcvt1_data_dma_reg[32U] ; uint32_t risc_gp_reg[128U] ; uint32_t lmc_reg[128U] ; uint32_t fpm_hdw_reg[192U] ; uint32_t fb_hdw_reg[192U] ; uint32_t code_ram[8192U] ; uint32_t ext_mem[1U] ; }; struct qla81xx_fw_dump { uint32_t host_status ; uint32_t host_risc_reg[32U] ; uint32_t pcie_regs[4U] ; uint32_t host_reg[32U] ; uint32_t shadow_reg[11U] ; uint32_t risc_io_reg ; uint16_t mailbox_reg[32U] ; uint32_t xseq_gp_reg[128U] ; uint32_t xseq_0_reg[48U] ; uint32_t xseq_1_reg[16U] ; uint32_t rseq_gp_reg[128U] ; uint32_t rseq_0_reg[32U] ; uint32_t rseq_1_reg[16U] ; uint32_t rseq_2_reg[16U] ; uint32_t aseq_gp_reg[128U] ; uint32_t aseq_0_reg[32U] ; uint32_t aseq_1_reg[16U] ; uint32_t aseq_2_reg[16U] ; uint32_t cmd_dma_reg[16U] ; uint32_t req0_dma_reg[15U] ; uint32_t resp0_dma_reg[15U] ; uint32_t req1_dma_reg[15U] ; uint32_t xmt0_dma_reg[32U] ; uint32_t xmt1_dma_reg[32U] ; uint32_t xmt2_dma_reg[32U] ; uint32_t xmt3_dma_reg[32U] ; uint32_t xmt4_dma_reg[32U] ; uint32_t xmt_data_dma_reg[16U] ; uint32_t rcvt0_data_dma_reg[32U] ; uint32_t rcvt1_data_dma_reg[32U] ; uint32_t risc_gp_reg[128U] ; uint32_t lmc_reg[128U] ; uint32_t fpm_hdw_reg[224U] ; uint32_t fb_hdw_reg[208U] ; uint32_t code_ram[8192U] ; uint32_t ext_mem[1U] ; }; struct qla83xx_fw_dump { uint32_t host_status ; uint32_t host_risc_reg[48U] ; uint32_t pcie_regs[4U] ; uint32_t host_reg[32U] ; uint32_t shadow_reg[11U] ; uint32_t risc_io_reg ; uint16_t mailbox_reg[32U] ; uint32_t xseq_gp_reg[256U] ; uint32_t xseq_0_reg[48U] ; uint32_t xseq_1_reg[16U] ; uint32_t xseq_2_reg[16U] ; uint32_t rseq_gp_reg[256U] ; uint32_t rseq_0_reg[32U] ; uint32_t rseq_1_reg[16U] ; uint32_t rseq_2_reg[16U] ; uint32_t rseq_3_reg[16U] ; uint32_t aseq_gp_reg[256U] ; uint32_t aseq_0_reg[32U] ; uint32_t aseq_1_reg[16U] ; uint32_t aseq_2_reg[16U] ; uint32_t aseq_3_reg[16U] ; uint32_t cmd_dma_reg[64U] ; uint32_t req0_dma_reg[15U] ; uint32_t resp0_dma_reg[15U] ; uint32_t req1_dma_reg[15U] ; uint32_t xmt0_dma_reg[32U] ; uint32_t xmt1_dma_reg[32U] ; uint32_t xmt2_dma_reg[32U] ; uint32_t xmt3_dma_reg[32U] ; uint32_t xmt4_dma_reg[32U] ; uint32_t xmt_data_dma_reg[16U] ; uint32_t rcvt0_data_dma_reg[32U] ; uint32_t rcvt1_data_dma_reg[32U] ; uint32_t risc_gp_reg[128U] ; uint32_t lmc_reg[128U] ; uint32_t fpm_hdw_reg[256U] ; uint32_t rq0_array_reg[256U] ; uint32_t rq1_array_reg[256U] ; uint32_t rp0_array_reg[256U] ; uint32_t rp1_array_reg[256U] ; uint32_t queue_control_reg[16U] ; uint32_t fb_hdw_reg[432U] ; uint32_t at0_array_reg[128U] ; uint32_t code_ram[9216U] ; uint32_t ext_mem[1U] ; }; union __anonunion_isp_392 { struct qla2100_fw_dump isp21 ; struct qla2300_fw_dump isp23 ; struct qla24xx_fw_dump isp24 ; struct qla25xx_fw_dump isp25 ; struct qla81xx_fw_dump isp81 ; struct qla83xx_fw_dump isp83 ; }; struct qla2xxx_fw_dump { uint8_t signature[4U] ; uint32_t version ; uint32_t fw_major_version ; uint32_t fw_minor_version ; uint32_t fw_subminor_version ; uint32_t fw_attributes ; uint32_t vendor ; uint32_t device ; uint32_t subsystem_vendor ; uint32_t subsystem_device ; uint32_t fixed_size ; uint32_t mem_size ; uint32_t req_q_size ; uint32_t rsp_q_size ; uint32_t eft_size ; uint32_t eft_addr_l ; uint32_t eft_addr_h ; uint32_t header_size ; union __anonunion_isp_392 isp ; }; struct __anonstruct_isp2x_394 { uint32_t sys_define_2 ; target_id_t target ; uint16_t lun ; uint8_t target_id ; uint8_t reserved_1 ; uint16_t status_modifier ; uint16_t status ; uint16_t task_flags ; uint16_t seq_id ; uint16_t srr_rx_id ; uint32_t srr_rel_offs ; uint16_t srr_ui ; uint16_t srr_ox_id ; uint8_t reserved_2[28U] ; }; struct __anonstruct_isp24_395 { uint32_t reserved ; uint16_t nport_handle ; uint16_t reserved_2 ; uint16_t flags ; uint16_t srr_rx_id ; uint16_t status ; uint8_t status_subcode ; uint8_t fw_handle ; uint32_t exchange_address ; uint32_t srr_rel_offs ; uint16_t srr_ui ; uint16_t srr_ox_id ; uint8_t reserved_4[19U] ; uint8_t vp_index ; uint32_t reserved_5 ; uint8_t port_id[3U] ; uint8_t reserved_6 ; }; union __anonunion_u_393 { struct __anonstruct_isp2x_394 isp2x ; struct __anonstruct_isp24_395 isp24 ; }; struct imm_ntfy_from_isp { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; union __anonunion_u_393 u ; uint16_t reserved_7 ; uint16_t ox_id ; }; struct fcp_hdr { uint8_t r_ctl ; uint8_t d_id[3U] ; uint8_t cs_ctl ; uint8_t s_id[3U] ; uint8_t type ; uint8_t f_ctl[3U] ; uint8_t seq_id ; uint8_t df_ctl ; uint16_t seq_cnt ; __be16 ox_id ; uint16_t rx_id ; uint32_t parameter ; }; struct fcp_hdr_le { uint8_t d_id[3U] ; uint8_t r_ctl ; uint8_t s_id[3U] ; uint8_t cs_ctl ; uint8_t f_ctl[3U] ; uint8_t type ; uint16_t seq_cnt ; uint8_t df_ctl ; uint8_t seq_id ; uint16_t rx_id ; uint16_t ox_id ; uint32_t parameter ; }; struct atio7_fcp_cmnd { uint64_t lun ; uint8_t cmnd_ref ; unsigned char task_attr : 3 ; unsigned char reserved : 5 ; uint8_t task_mgmt_flags ; unsigned char wrdata : 1 ; unsigned char rddata : 1 ; unsigned char add_cdb_len : 6 ; uint8_t cdb[16U] ; uint8_t add_cdb[4U] ; }; struct __anonstruct_isp2x_400 { uint16_t entry_hdr ; uint8_t sys_define ; uint8_t entry_status ; uint32_t sys_define_2 ; target_id_t target ; uint16_t rx_id ; uint16_t flags ; uint16_t status ; uint8_t command_ref ; uint8_t task_codes ; uint8_t task_flags ; uint8_t execution_codes ; uint8_t cdb[16U] ; uint32_t data_length ; uint16_t lun ; uint8_t initiator_port_name[8U] ; uint16_t reserved_32[6U] ; uint16_t ox_id ; }; struct __anonstruct_isp24_401 { uint16_t entry_hdr ; uint8_t fcp_cmnd_len_low ; unsigned char fcp_cmnd_len_high : 4 ; unsigned char attr : 4 ; uint32_t exchange_addr ; struct fcp_hdr fcp_hdr ; struct atio7_fcp_cmnd fcp_cmnd ; }; struct __anonstruct_raw_402 { uint8_t entry_type ; uint8_t entry_count ; uint8_t data[58U] ; uint32_t signature ; }; union __anonunion_u_399 { struct __anonstruct_isp2x_400 isp2x ; struct __anonstruct_isp24_401 isp24 ; struct __anonstruct_raw_402 raw ; }; struct atio_from_isp { union __anonunion_u_399 u ; }; struct abts_recv_from_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint8_t reserved_1[6U] ; uint16_t nport_handle ; uint8_t reserved_2[2U] ; uint8_t vp_index ; unsigned char reserved_3 : 4 ; unsigned char sof_type : 4 ; uint32_t exchange_address ; struct fcp_hdr_le fcp_hdr_le ; uint8_t reserved_4[16U] ; uint32_t exchange_addr_to_abort ; }; struct qla_tgt_mgmt_cmd; struct qla_tgt_sess; struct qla_tgt_func_tmpl { int (*handle_cmd)(struct scsi_qla_host * , struct qla_tgt_cmd * , unsigned char * , uint32_t , int , int , int ) ; void (*handle_data)(struct qla_tgt_cmd * ) ; void (*handle_dif_err)(struct qla_tgt_cmd * ) ; int (*handle_tmr)(struct qla_tgt_mgmt_cmd * , uint32_t , uint8_t , uint32_t ) ; void (*free_cmd)(struct qla_tgt_cmd * ) ; void (*free_mcmd)(struct qla_tgt_mgmt_cmd * ) ; void (*free_session)(struct qla_tgt_sess * ) ; int (*check_initiator_node_acl)(struct scsi_qla_host * , unsigned char * , void * , uint8_t * , uint16_t ) ; void (*update_sess)(struct qla_tgt_sess * , port_id_t , uint16_t , bool ) ; struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host * , uint16_t const ) ; struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host * , uint8_t const * ) ; void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess * ) ; void (*put_sess)(struct qla_tgt_sess * ) ; void (*shutdown_sess)(struct qla_tgt_sess * ) ; }; struct configfs_item_operations; struct configfs_group_operations; struct configfs_attribute; struct configfs_subsystem; struct config_group; struct config_item_type; struct config_item { char *ci_name ; char ci_namebuf[20U] ; struct kref ci_kref ; struct list_head ci_entry ; struct config_item *ci_parent ; struct config_group *ci_group ; struct config_item_type *ci_type ; struct dentry *ci_dentry ; }; struct config_item_type { struct module *ct_owner ; struct configfs_item_operations *ct_item_ops ; struct configfs_group_operations *ct_group_ops ; struct configfs_attribute **ct_attrs ; }; struct config_group { struct config_item cg_item ; struct list_head cg_children ; struct configfs_subsystem *cg_subsys ; struct config_group **default_groups ; }; struct configfs_attribute { char const *ca_name ; struct module *ca_owner ; umode_t ca_mode ; }; struct configfs_item_operations { void (*release)(struct config_item * ) ; ssize_t (*show_attribute)(struct config_item * , struct configfs_attribute * , char * ) ; ssize_t (*store_attribute)(struct config_item * , struct configfs_attribute * , char const * , size_t ) ; int (*allow_link)(struct config_item * , struct config_item * ) ; int (*drop_link)(struct config_item * , struct config_item * ) ; }; struct configfs_group_operations { struct config_item *(*make_item)(struct config_group * , char const * ) ; struct config_group *(*make_group)(struct config_group * , char const * ) ; int (*commit_item)(struct config_item * ) ; void (*disconnect_notify)(struct config_group * , struct config_item * ) ; void (*drop_item)(struct config_group * , struct config_item * ) ; }; struct configfs_subsystem { struct config_group su_group ; struct mutex su_mutex ; }; struct percpu_ida_cpu; struct __anonstruct____missing_field_name_407 { spinlock_t lock ; unsigned int cpu_last_stolen ; wait_queue_head_t wait ; unsigned int nr_free ; unsigned int *freelist ; }; struct percpu_ida { unsigned int nr_tags ; unsigned int percpu_max_size ; unsigned int percpu_batch_size ; struct percpu_ida_cpu *tag_cpu ; cpumask_t cpus_have_tags ; struct __anonstruct____missing_field_name_407 __annonCompField102 ; }; struct hlist_nulls_node; struct hlist_nulls_head { struct hlist_nulls_node *first ; }; struct hlist_nulls_node { struct hlist_nulls_node *next ; struct hlist_nulls_node **pprev ; }; struct dql { unsigned int num_queued ; unsigned int adj_limit ; unsigned int last_obj_cnt ; unsigned int limit ; unsigned int num_completed ; unsigned int prev_ovlimit ; unsigned int prev_num_queued ; unsigned int prev_last_obj_cnt ; unsigned int lowest_slack ; unsigned long slack_start_time ; unsigned int max_limit ; unsigned int min_limit ; unsigned int slack_hold_time ; }; struct __anonstruct_sync_serial_settings_409 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; }; typedef struct __anonstruct_sync_serial_settings_409 sync_serial_settings; struct __anonstruct_te1_settings_410 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; unsigned int slot_map ; }; typedef struct __anonstruct_te1_settings_410 te1_settings; struct __anonstruct_raw_hdlc_proto_411 { unsigned short encoding ; unsigned short parity ; }; typedef struct __anonstruct_raw_hdlc_proto_411 raw_hdlc_proto; struct __anonstruct_fr_proto_412 { unsigned int t391 ; unsigned int t392 ; unsigned int n391 ; unsigned int n392 ; unsigned int n393 ; unsigned short lmi ; unsigned short dce ; }; typedef struct __anonstruct_fr_proto_412 fr_proto; struct __anonstruct_fr_proto_pvc_413 { unsigned int dlci ; }; typedef struct __anonstruct_fr_proto_pvc_413 fr_proto_pvc; struct __anonstruct_fr_proto_pvc_info_414 { unsigned int dlci ; char master[16U] ; }; typedef struct __anonstruct_fr_proto_pvc_info_414 fr_proto_pvc_info; struct __anonstruct_cisco_proto_415 { unsigned int interval ; unsigned int timeout ; }; typedef struct __anonstruct_cisco_proto_415 cisco_proto; struct ifmap { unsigned long mem_start ; unsigned long mem_end ; unsigned short base_addr ; unsigned char irq ; unsigned char dma ; unsigned char port ; }; union __anonunion_ifs_ifsu_416 { raw_hdlc_proto *raw_hdlc ; cisco_proto *cisco ; fr_proto *fr ; fr_proto_pvc *fr_pvc ; fr_proto_pvc_info *fr_pvc_info ; sync_serial_settings *sync ; te1_settings *te1 ; }; struct if_settings { unsigned int type ; unsigned int size ; union __anonunion_ifs_ifsu_416 ifs_ifsu ; }; union __anonunion_ifr_ifrn_417 { char ifrn_name[16U] ; }; union __anonunion_ifr_ifru_418 { struct sockaddr ifru_addr ; struct sockaddr ifru_dstaddr ; struct sockaddr ifru_broadaddr ; struct sockaddr ifru_netmask ; struct sockaddr ifru_hwaddr ; short ifru_flags ; int ifru_ivalue ; int ifru_mtu ; struct ifmap ifru_map ; char ifru_slave[16U] ; char ifru_newname[16U] ; void *ifru_data ; struct if_settings ifru_settings ; }; struct ifreq { union __anonunion_ifr_ifrn_417 ifr_ifrn ; union __anonunion_ifr_ifru_418 ifr_ifru ; }; typedef s32 compat_time_t; typedef s32 compat_long_t; typedef u32 compat_uptr_t; struct compat_timespec { compat_time_t tv_sec ; s32 tv_nsec ; }; struct compat_robust_list { compat_uptr_t next ; }; struct compat_robust_list_head { struct compat_robust_list list ; compat_long_t futex_offset ; compat_uptr_t list_op_pending ; }; struct ethtool_cmd { __u32 cmd ; __u32 supported ; __u32 advertising ; __u16 speed ; __u8 duplex ; __u8 port ; __u8 phy_address ; __u8 transceiver ; __u8 autoneg ; __u8 mdio_support ; __u32 maxtxpkt ; __u32 maxrxpkt ; __u16 speed_hi ; __u8 eth_tp_mdix ; __u8 eth_tp_mdix_ctrl ; __u32 lp_advertising ; __u32 reserved[2U] ; }; struct ethtool_drvinfo { __u32 cmd ; char driver[32U] ; char version[32U] ; char fw_version[32U] ; char bus_info[32U] ; char erom_version[32U] ; char reserved2[12U] ; __u32 n_priv_flags ; __u32 n_stats ; __u32 testinfo_len ; __u32 eedump_len ; __u32 regdump_len ; }; struct ethtool_wolinfo { __u32 cmd ; __u32 supported ; __u32 wolopts ; __u8 sopass[6U] ; }; struct ethtool_tunable { __u32 cmd ; __u32 id ; __u32 type_id ; __u32 len ; void *data[0U] ; }; struct ethtool_regs { __u32 cmd ; __u32 version ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eeprom { __u32 cmd ; __u32 magic ; __u32 offset ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eee { __u32 cmd ; __u32 supported ; __u32 advertised ; __u32 lp_advertised ; __u32 eee_active ; __u32 eee_enabled ; __u32 tx_lpi_enabled ; __u32 tx_lpi_timer ; __u32 reserved[2U] ; }; struct ethtool_modinfo { __u32 cmd ; __u32 type ; __u32 eeprom_len ; __u32 reserved[8U] ; }; struct ethtool_coalesce { __u32 cmd ; __u32 rx_coalesce_usecs ; __u32 rx_max_coalesced_frames ; __u32 rx_coalesce_usecs_irq ; __u32 rx_max_coalesced_frames_irq ; __u32 tx_coalesce_usecs ; __u32 tx_max_coalesced_frames ; __u32 tx_coalesce_usecs_irq ; __u32 tx_max_coalesced_frames_irq ; __u32 stats_block_coalesce_usecs ; __u32 use_adaptive_rx_coalesce ; __u32 use_adaptive_tx_coalesce ; __u32 pkt_rate_low ; __u32 rx_coalesce_usecs_low ; __u32 rx_max_coalesced_frames_low ; __u32 tx_coalesce_usecs_low ; __u32 tx_max_coalesced_frames_low ; __u32 pkt_rate_high ; __u32 rx_coalesce_usecs_high ; __u32 rx_max_coalesced_frames_high ; __u32 tx_coalesce_usecs_high ; __u32 tx_max_coalesced_frames_high ; __u32 rate_sample_interval ; }; struct ethtool_ringparam { __u32 cmd ; __u32 rx_max_pending ; __u32 rx_mini_max_pending ; __u32 rx_jumbo_max_pending ; __u32 tx_max_pending ; __u32 rx_pending ; __u32 rx_mini_pending ; __u32 rx_jumbo_pending ; __u32 tx_pending ; }; struct ethtool_channels { __u32 cmd ; __u32 max_rx ; __u32 max_tx ; __u32 max_other ; __u32 max_combined ; __u32 rx_count ; __u32 tx_count ; __u32 other_count ; __u32 combined_count ; }; struct ethtool_pauseparam { __u32 cmd ; __u32 autoneg ; __u32 rx_pause ; __u32 tx_pause ; }; struct ethtool_test { __u32 cmd ; __u32 flags ; __u32 reserved ; __u32 len ; __u64 data[0U] ; }; struct ethtool_stats { __u32 cmd ; __u32 n_stats ; __u64 data[0U] ; }; struct ethtool_tcpip4_spec { __be32 ip4src ; __be32 ip4dst ; __be16 psrc ; __be16 pdst ; __u8 tos ; }; struct ethtool_ah_espip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 spi ; __u8 tos ; }; struct ethtool_usrip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 l4_4_bytes ; __u8 tos ; __u8 ip_ver ; __u8 proto ; }; union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec ; struct ethtool_tcpip4_spec udp_ip4_spec ; struct ethtool_tcpip4_spec sctp_ip4_spec ; struct ethtool_ah_espip4_spec ah_ip4_spec ; struct ethtool_ah_espip4_spec esp_ip4_spec ; struct ethtool_usrip4_spec usr_ip4_spec ; struct ethhdr ether_spec ; __u8 hdata[52U] ; }; struct ethtool_flow_ext { __u8 padding[2U] ; unsigned char h_dest[6U] ; __be16 vlan_etype ; __be16 vlan_tci ; __be32 data[2U] ; }; struct ethtool_rx_flow_spec { __u32 flow_type ; union ethtool_flow_union h_u ; struct ethtool_flow_ext h_ext ; union ethtool_flow_union m_u ; struct ethtool_flow_ext m_ext ; __u64 ring_cookie ; __u32 location ; }; struct ethtool_rxnfc { __u32 cmd ; __u32 flow_type ; __u64 data ; struct ethtool_rx_flow_spec fs ; __u32 rule_cnt ; __u32 rule_locs[0U] ; }; struct ethtool_flash { __u32 cmd ; __u32 region ; char data[128U] ; }; struct ethtool_dump { __u32 cmd ; __u32 version ; __u32 flag ; __u32 len ; __u8 data[0U] ; }; struct ethtool_ts_info { __u32 cmd ; __u32 so_timestamping ; __s32 phc_index ; __u32 tx_types ; __u32 tx_reserved[3U] ; __u32 rx_filters ; __u32 rx_reserved[3U] ; }; enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; struct ethtool_ops { int (*get_settings)(struct net_device * , struct ethtool_cmd * ) ; int (*set_settings)(struct net_device * , struct ethtool_cmd * ) ; void (*get_drvinfo)(struct net_device * , struct ethtool_drvinfo * ) ; int (*get_regs_len)(struct net_device * ) ; void (*get_regs)(struct net_device * , struct ethtool_regs * , void * ) ; void (*get_wol)(struct net_device * , struct ethtool_wolinfo * ) ; int (*set_wol)(struct net_device * , struct ethtool_wolinfo * ) ; u32 (*get_msglevel)(struct net_device * ) ; void (*set_msglevel)(struct net_device * , u32 ) ; int (*nway_reset)(struct net_device * ) ; u32 (*get_link)(struct net_device * ) ; int (*get_eeprom_len)(struct net_device * ) ; int (*get_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*set_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; int (*set_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; void (*get_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; int (*set_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; void (*get_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; int (*set_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; void (*self_test)(struct net_device * , struct ethtool_test * , u64 * ) ; void (*get_strings)(struct net_device * , u32 , u8 * ) ; int (*set_phys_id)(struct net_device * , enum ethtool_phys_id_state ) ; void (*get_ethtool_stats)(struct net_device * , struct ethtool_stats * , u64 * ) ; int (*begin)(struct net_device * ) ; void (*complete)(struct net_device * ) ; u32 (*get_priv_flags)(struct net_device * ) ; int (*set_priv_flags)(struct net_device * , u32 ) ; int (*get_sset_count)(struct net_device * , int ) ; int (*get_rxnfc)(struct net_device * , struct ethtool_rxnfc * , u32 * ) ; int (*set_rxnfc)(struct net_device * , struct ethtool_rxnfc * ) ; int (*flash_device)(struct net_device * , struct ethtool_flash * ) ; int (*reset)(struct net_device * , u32 * ) ; u32 (*get_rxfh_key_size)(struct net_device * ) ; u32 (*get_rxfh_indir_size)(struct net_device * ) ; int (*get_rxfh)(struct net_device * , u32 * , u8 * , u8 * ) ; int (*set_rxfh)(struct net_device * , u32 const * , u8 const * , u8 const ) ; void (*get_channels)(struct net_device * , struct ethtool_channels * ) ; int (*set_channels)(struct net_device * , struct ethtool_channels * ) ; int (*get_dump_flag)(struct net_device * , struct ethtool_dump * ) ; int (*get_dump_data)(struct net_device * , struct ethtool_dump * , void * ) ; int (*set_dump)(struct net_device * , struct ethtool_dump * ) ; int (*get_ts_info)(struct net_device * , struct ethtool_ts_info * ) ; int (*get_module_info)(struct net_device * , struct ethtool_modinfo * ) ; int (*get_module_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_eee)(struct net_device * , struct ethtool_eee * ) ; int (*set_eee)(struct net_device * , struct ethtool_eee * ) ; int (*get_tunable)(struct net_device * , struct ethtool_tunable const * , void * ) ; int (*set_tunable)(struct net_device * , struct ethtool_tunable const * , void const * ) ; }; struct prot_inuse; struct netns_core { struct ctl_table_header *sysctl_hdr ; int sysctl_somaxconn ; struct prot_inuse *inuse ; }; struct u64_stats_sync { }; struct ipstats_mib { u64 mibs[36U] ; struct u64_stats_sync syncp ; }; struct icmp_mib { unsigned long mibs[28U] ; }; struct icmpmsg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6_mib { unsigned long mibs[6U] ; }; struct icmpv6_mib_device { atomic_long_t mibs[6U] ; }; struct icmpv6msg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6msg_mib_device { atomic_long_t mibs[512U] ; }; struct tcp_mib { unsigned long mibs[16U] ; }; struct udp_mib { unsigned long mibs[9U] ; }; struct linux_mib { unsigned long mibs[115U] ; }; struct linux_xfrm_mib { unsigned long mibs[29U] ; }; struct netns_mib { struct tcp_mib *tcp_statistics ; struct ipstats_mib *ip_statistics ; struct linux_mib *net_statistics ; struct udp_mib *udp_statistics ; struct udp_mib *udplite_statistics ; struct icmp_mib *icmp_statistics ; struct icmpmsg_mib *icmpmsg_statistics ; struct proc_dir_entry *proc_net_devsnmp6 ; struct udp_mib *udp_stats_in6 ; struct udp_mib *udplite_stats_in6 ; struct ipstats_mib *ipv6_statistics ; struct icmpv6_mib *icmpv6_statistics ; struct icmpv6msg_mib *icmpv6msg_statistics ; struct linux_xfrm_mib *xfrm_statistics ; }; struct netns_unix { int sysctl_max_dgram_qlen ; struct ctl_table_header *ctl ; }; struct netns_packet { struct mutex sklist_lock ; struct hlist_head sklist ; }; struct netns_frags { struct percpu_counter mem ; int timeout ; int high_thresh ; int low_thresh ; }; struct ipv4_devconf; struct fib_rules_ops; struct fib_table; struct local_ports { seqlock_t lock ; int range[2U] ; bool warned ; }; struct ping_group_range { seqlock_t lock ; kgid_t range[2U] ; }; struct inet_peer_base; struct xt_table; struct netns_ipv4 { struct ctl_table_header *forw_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *ipv4_hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *xfrm4_hdr ; struct ipv4_devconf *devconf_all ; struct ipv4_devconf *devconf_dflt ; struct fib_rules_ops *rules_ops ; bool fib_has_custom_rules ; struct fib_table *fib_local ; struct fib_table *fib_main ; struct fib_table *fib_default ; int fib_num_tclassid_users ; struct hlist_head *fib_table_hash ; bool fib_offload_disabled ; struct sock *fibnl ; struct sock **icmp_sk ; struct sock *mc_autojoin_sk ; struct inet_peer_base *peers ; struct sock **tcp_sk ; struct netns_frags frags ; struct xt_table *iptable_filter ; struct xt_table *iptable_mangle ; struct xt_table *iptable_raw ; struct xt_table *arptable_filter ; struct xt_table *iptable_security ; struct xt_table *nat_table ; int sysctl_icmp_echo_ignore_all ; int sysctl_icmp_echo_ignore_broadcasts ; int sysctl_icmp_ignore_bogus_error_responses ; int sysctl_icmp_ratelimit ; int sysctl_icmp_ratemask ; int sysctl_icmp_errors_use_inbound_ifaddr ; struct local_ports ip_local_ports ; int sysctl_tcp_ecn ; int sysctl_tcp_ecn_fallback ; int sysctl_ip_no_pmtu_disc ; int sysctl_ip_fwd_use_pmtu ; int sysctl_ip_nonlocal_bind ; int sysctl_fwmark_reflect ; int sysctl_tcp_fwmark_accept ; int sysctl_tcp_mtu_probing ; int sysctl_tcp_base_mss ; int sysctl_tcp_probe_threshold ; u32 sysctl_tcp_probe_interval ; struct ping_group_range ping_group_range ; atomic_t dev_addr_genid ; unsigned long *sysctl_local_reserved_ports ; struct list_head mr_tables ; struct fib_rules_ops *mr_rules_ops ; atomic_t rt_genid ; }; struct neighbour; struct dst_ops { unsigned short family ; unsigned int gc_thresh ; int (*gc)(struct dst_ops * ) ; struct dst_entry *(*check)(struct dst_entry * , __u32 ) ; unsigned int (*default_advmss)(struct dst_entry const * ) ; unsigned int (*mtu)(struct dst_entry const * ) ; u32 *(*cow_metrics)(struct dst_entry * , unsigned long ) ; void (*destroy)(struct dst_entry * ) ; void (*ifdown)(struct dst_entry * , struct net_device * , int ) ; struct dst_entry *(*negative_advice)(struct dst_entry * ) ; void (*link_failure)(struct sk_buff * ) ; void (*update_pmtu)(struct dst_entry * , struct sock * , struct sk_buff * , u32 ) ; void (*redirect)(struct dst_entry * , struct sock * , struct sk_buff * ) ; int (*local_out)(struct sk_buff * ) ; struct neighbour *(*neigh_lookup)(struct dst_entry const * , struct sk_buff * , void const * ) ; struct kmem_cache *kmem_cachep ; struct percpu_counter pcpuc_entries ; }; struct netns_sysctl_ipv6 { struct ctl_table_header *hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *icmp_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *xfrm6_hdr ; int bindv6only ; int flush_delay ; int ip6_rt_max_size ; int ip6_rt_gc_min_interval ; int ip6_rt_gc_timeout ; int ip6_rt_gc_interval ; int ip6_rt_gc_elasticity ; int ip6_rt_mtu_expires ; int ip6_rt_min_advmss ; int flowlabel_consistency ; int auto_flowlabels ; int icmpv6_time ; int anycast_src_echo_reply ; int fwmark_reflect ; int idgen_retries ; int idgen_delay ; int flowlabel_state_ranges ; }; struct ipv6_devconf; struct rt6_info; struct rt6_statistics; struct fib6_table; struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl ; struct ipv6_devconf *devconf_all ; struct ipv6_devconf *devconf_dflt ; struct inet_peer_base *peers ; struct netns_frags frags ; struct xt_table *ip6table_filter ; struct xt_table *ip6table_mangle ; struct xt_table *ip6table_raw ; struct xt_table *ip6table_security ; struct xt_table *ip6table_nat ; struct rt6_info *ip6_null_entry ; struct rt6_statistics *rt6_stats ; struct timer_list ip6_fib_timer ; struct hlist_head *fib_table_hash ; struct fib6_table *fib6_main_tbl ; struct dst_ops ip6_dst_ops ; unsigned int ip6_rt_gc_expire ; unsigned long ip6_rt_last_gc ; struct rt6_info *ip6_prohibit_entry ; struct rt6_info *ip6_blk_hole_entry ; struct fib6_table *fib6_local_tbl ; struct fib_rules_ops *fib6_rules_ops ; struct sock **icmp_sk ; struct sock *ndisc_sk ; struct sock *tcp_sk ; struct sock *igmp_sk ; struct sock *mc_autojoin_sk ; struct list_head mr6_tables ; struct fib_rules_ops *mr6_rules_ops ; atomic_t dev_addr_genid ; atomic_t fib6_sernum ; }; struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl ; struct netns_frags frags ; }; struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr ; }; struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl ; struct netns_frags frags ; }; struct sctp_mib; struct netns_sctp { struct sctp_mib *sctp_statistics ; struct proc_dir_entry *proc_net_sctp ; struct ctl_table_header *sysctl_header ; struct sock *ctl_sock ; struct list_head local_addr_list ; struct list_head addr_waitq ; struct timer_list addr_wq_timer ; struct list_head auto_asconf_splist ; spinlock_t addr_wq_lock ; spinlock_t local_addr_lock ; unsigned int rto_initial ; unsigned int rto_min ; unsigned int rto_max ; int rto_alpha ; int rto_beta ; int max_burst ; int cookie_preserve_enable ; char *sctp_hmac_alg ; unsigned int valid_cookie_life ; unsigned int sack_timeout ; unsigned int hb_interval ; int max_retrans_association ; int max_retrans_path ; int max_retrans_init ; int pf_retrans ; int sndbuf_policy ; int rcvbuf_policy ; int default_auto_asconf ; int addip_enable ; int addip_noauth ; int prsctp_enable ; int auth_enable ; int scope_policy ; int rwnd_upd_shift ; unsigned long max_autoclose ; }; struct netns_dccp { struct sock *v4_ctl_sk ; struct sock *v6_ctl_sk ; }; struct nf_logger; struct netns_nf { struct proc_dir_entry *proc_netfilter ; struct nf_logger const *nf_loggers[13U] ; struct ctl_table_header *nf_log_dir_header ; }; struct ebt_table; struct netns_xt { struct list_head tables[13U] ; bool notrack_deprecated_warning ; bool clusterip_deprecated_warning ; struct ebt_table *broute_table ; struct ebt_table *frame_filter ; struct ebt_table *frame_nat ; }; struct nf_proto_net { struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; struct ctl_table_header *ctl_compat_header ; struct ctl_table *ctl_compat_table ; unsigned int users ; }; struct nf_generic_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_tcp_net { struct nf_proto_net pn ; unsigned int timeouts[14U] ; unsigned int tcp_loose ; unsigned int tcp_be_liberal ; unsigned int tcp_max_retrans ; }; struct nf_udp_net { struct nf_proto_net pn ; unsigned int timeouts[2U] ; }; struct nf_icmp_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_ip_net { struct nf_generic_net generic ; struct nf_tcp_net tcp ; struct nf_udp_net udp ; struct nf_icmp_net icmp ; struct nf_icmp_net icmpv6 ; struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; }; struct ct_pcpu { spinlock_t lock ; struct hlist_nulls_head unconfirmed ; struct hlist_nulls_head dying ; struct hlist_nulls_head tmpl ; }; struct ip_conntrack_stat; struct nf_ct_event_notifier; struct nf_exp_event_notifier; struct netns_ct { atomic_t count ; unsigned int expect_count ; struct delayed_work ecache_dwork ; bool ecache_dwork_pending ; struct ctl_table_header *sysctl_header ; struct ctl_table_header *acct_sysctl_header ; struct ctl_table_header *tstamp_sysctl_header ; struct ctl_table_header *event_sysctl_header ; struct ctl_table_header *helper_sysctl_header ; char *slabname ; unsigned int sysctl_log_invalid ; int sysctl_events ; int sysctl_acct ; int sysctl_auto_assign_helper ; bool auto_assign_helper_warned ; int sysctl_tstamp ; int sysctl_checksum ; unsigned int htable_size ; seqcount_t generation ; struct kmem_cache *nf_conntrack_cachep ; struct hlist_nulls_head *hash ; struct hlist_head *expect_hash ; struct ct_pcpu *pcpu_lists ; struct ip_conntrack_stat *stat ; struct nf_ct_event_notifier *nf_conntrack_event_cb ; struct nf_exp_event_notifier *nf_expect_event_cb ; struct nf_ip_net nf_ct_proto ; unsigned int labels_used ; u8 label_words ; struct hlist_head *nat_bysource ; unsigned int nat_htable_size ; }; struct nft_af_info; struct netns_nftables { struct list_head af_info ; struct list_head commit_list ; struct nft_af_info *ipv4 ; struct nft_af_info *ipv6 ; struct nft_af_info *inet ; struct nft_af_info *arp ; struct nft_af_info *bridge ; struct nft_af_info *netdev ; unsigned int base_seq ; u8 gencursor ; }; struct flow_cache_percpu { struct hlist_head *hash_table ; int hash_count ; u32 hash_rnd ; int hash_rnd_recalc ; struct tasklet_struct flush_tasklet ; }; struct flow_cache { u32 hash_shift ; struct flow_cache_percpu *percpu ; struct notifier_block hotcpu_notifier ; int low_watermark ; int high_watermark ; struct timer_list rnd_timer ; }; struct xfrm_policy_hash { struct hlist_head *table ; unsigned int hmask ; u8 dbits4 ; u8 sbits4 ; u8 dbits6 ; u8 sbits6 ; }; struct xfrm_policy_hthresh { struct work_struct work ; seqlock_t lock ; u8 lbits4 ; u8 rbits4 ; u8 lbits6 ; u8 rbits6 ; }; struct netns_xfrm { struct list_head state_all ; struct hlist_head *state_bydst ; struct hlist_head *state_bysrc ; struct hlist_head *state_byspi ; unsigned int state_hmask ; unsigned int state_num ; struct work_struct state_hash_work ; struct hlist_head state_gc_list ; struct work_struct state_gc_work ; struct list_head policy_all ; struct hlist_head *policy_byidx ; unsigned int policy_idx_hmask ; struct hlist_head policy_inexact[3U] ; struct xfrm_policy_hash policy_bydst[3U] ; unsigned int policy_count[6U] ; struct work_struct policy_hash_work ; struct xfrm_policy_hthresh policy_hthresh ; struct sock *nlsk ; struct sock *nlsk_stash ; u32 sysctl_aevent_etime ; u32 sysctl_aevent_rseqth ; int sysctl_larval_drop ; u32 sysctl_acq_expires ; struct ctl_table_header *sysctl_hdr ; struct dst_ops xfrm4_dst_ops ; struct dst_ops xfrm6_dst_ops ; spinlock_t xfrm_state_lock ; rwlock_t xfrm_policy_lock ; struct mutex xfrm_cfg_mutex ; struct flow_cache flow_cache_global ; atomic_t flow_cache_genid ; struct list_head flow_cache_gc_list ; spinlock_t flow_cache_gc_lock ; struct work_struct flow_cache_gc_work ; struct work_struct flow_cache_flush_work ; struct mutex flow_flush_sem ; }; struct mpls_route; struct netns_mpls { size_t platform_labels ; struct mpls_route **platform_label ; struct ctl_table_header *ctl ; }; struct proc_ns_operations; struct ns_common { atomic_long_t stashed ; struct proc_ns_operations const *ops ; unsigned int inum ; }; struct net_generic; struct netns_ipvs; struct net { atomic_t passive ; atomic_t count ; spinlock_t rules_mod_lock ; atomic64_t cookie_gen ; struct list_head list ; struct list_head cleanup_list ; struct list_head exit_list ; struct user_namespace *user_ns ; spinlock_t nsid_lock ; struct idr netns_ids ; struct ns_common ns ; struct proc_dir_entry *proc_net ; struct proc_dir_entry *proc_net_stat ; struct ctl_table_set sysctls ; struct sock *rtnl ; struct sock *genl_sock ; struct list_head dev_base_head ; struct hlist_head *dev_name_head ; struct hlist_head *dev_index_head ; unsigned int dev_base_seq ; int ifindex ; unsigned int dev_unreg_count ; struct list_head rules_ops ; struct net_device *loopback_dev ; struct netns_core core ; struct netns_mib mib ; struct netns_packet packet ; struct netns_unix unx ; struct netns_ipv4 ipv4 ; struct netns_ipv6 ipv6 ; struct netns_ieee802154_lowpan ieee802154_lowpan ; struct netns_sctp sctp ; struct netns_dccp dccp ; struct netns_nf nf ; struct netns_xt xt ; struct netns_ct ct ; struct netns_nftables nft ; struct netns_nf_frag nf_frag ; struct sock *nfnl ; struct sock *nfnl_stash ; struct sk_buff_head wext_nlevents ; struct net_generic *gen ; struct netns_xfrm xfrm ; struct netns_ipvs *ipvs ; struct netns_mpls mpls ; struct sock *diag_nlsk ; atomic_t fnhe_genid ; }; struct __anonstruct_possible_net_t_442 { struct net *net ; }; typedef struct __anonstruct_possible_net_t_442 possible_net_t; enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_PDATA = 3 } ; struct fwnode_handle { enum fwnode_type type ; struct fwnode_handle *secondary ; }; typedef u32 phandle; struct property { char *name ; int length ; void *value ; struct property *next ; unsigned long _flags ; unsigned int unique_id ; struct bin_attribute attr ; }; struct device_node { char const *name ; char const *type ; phandle phandle ; char const *full_name ; struct fwnode_handle fwnode ; struct property *properties ; struct property *deadprops ; struct device_node *parent ; struct device_node *child ; struct device_node *sibling ; struct kobject kobj ; unsigned long _flags ; void *data ; }; enum ldv_34518 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_MAX = 16 } ; typedef enum ldv_34518 phy_interface_t; enum ldv_34570 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ; struct phy_device; struct mii_bus { char const *name ; char id[17U] ; void *priv ; int (*read)(struct mii_bus * , int , int ) ; int (*write)(struct mii_bus * , int , int , u16 ) ; int (*reset)(struct mii_bus * ) ; struct mutex mdio_lock ; struct device *parent ; enum ldv_34570 state ; struct device dev ; struct phy_device *phy_map[32U] ; u32 phy_mask ; u32 phy_ignore_ta_mask ; int *irq ; }; enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ; struct phy_c45_device_ids { u32 devices_in_package ; u32 device_ids[8U] ; }; struct phy_driver; struct phy_device { struct phy_driver *drv ; struct mii_bus *bus ; struct device dev ; u32 phy_id ; struct phy_c45_device_ids c45_ids ; bool is_c45 ; bool is_internal ; bool has_fixups ; bool suspended ; enum phy_state state ; u32 dev_flags ; phy_interface_t interface ; int addr ; int speed ; int duplex ; int pause ; int asym_pause ; int link ; u32 interrupts ; u32 supported ; u32 advertising ; u32 lp_advertising ; int autoneg ; int link_timeout ; int irq ; void *priv ; struct work_struct phy_queue ; struct delayed_work state_queue ; atomic_t irq_disable ; struct mutex lock ; struct net_device *attached_dev ; void (*adjust_link)(struct net_device * ) ; }; struct phy_driver { u32 phy_id ; char *name ; unsigned int phy_id_mask ; u32 features ; u32 flags ; void const *driver_data ; int (*soft_reset)(struct phy_device * ) ; int (*config_init)(struct phy_device * ) ; int (*probe)(struct phy_device * ) ; int (*suspend)(struct phy_device * ) ; int (*resume)(struct phy_device * ) ; int (*config_aneg)(struct phy_device * ) ; int (*aneg_done)(struct phy_device * ) ; int (*read_status)(struct phy_device * ) ; int (*ack_interrupt)(struct phy_device * ) ; int (*config_intr)(struct phy_device * ) ; int (*did_interrupt)(struct phy_device * ) ; void (*remove)(struct phy_device * ) ; int (*match_phy_device)(struct phy_device * ) ; int (*ts_info)(struct phy_device * , struct ethtool_ts_info * ) ; int (*hwtstamp)(struct phy_device * , struct ifreq * ) ; bool (*rxtstamp)(struct phy_device * , struct sk_buff * , int ) ; void (*txtstamp)(struct phy_device * , struct sk_buff * , int ) ; int (*set_wol)(struct phy_device * , struct ethtool_wolinfo * ) ; void (*get_wol)(struct phy_device * , struct ethtool_wolinfo * ) ; void (*link_change_notify)(struct phy_device * ) ; int (*read_mmd_indirect)(struct phy_device * , int , int , int ) ; void (*write_mmd_indirect)(struct phy_device * , int , int , int , u32 ) ; int (*module_info)(struct phy_device * , struct ethtool_modinfo * ) ; int (*module_eeprom)(struct phy_device * , struct ethtool_eeprom * , u8 * ) ; struct device_driver driver ; }; struct fixed_phy_status { int link ; int speed ; int duplex ; int pause ; int asym_pause ; }; enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4 } ; struct dsa_chip_data { struct device *host_dev ; int sw_addr ; int eeprom_len ; struct device_node *of_node ; char *port_names[12U] ; struct device_node *port_dn[12U] ; s8 *rtable ; }; struct dsa_platform_data { struct device *netdev ; struct net_device *of_netdev ; int nr_chips ; struct dsa_chip_data *chip ; }; struct packet_type; struct dsa_switch; struct dsa_switch_tree { struct dsa_platform_data *pd ; struct net_device *master_netdev ; int (*rcv)(struct sk_buff * , struct net_device * , struct packet_type * , struct net_device * ) ; enum dsa_tag_protocol tag_protocol ; s8 cpu_switch ; s8 cpu_port ; int link_poll_needed ; struct work_struct link_poll_work ; struct timer_list link_poll_timer ; struct dsa_switch *ds[4U] ; }; struct dsa_switch_driver; struct dsa_switch { struct dsa_switch_tree *dst ; int index ; enum dsa_tag_protocol tag_protocol ; struct dsa_chip_data *pd ; struct dsa_switch_driver *drv ; struct device *master_dev ; char hwmon_name[24U] ; struct device *hwmon_dev ; u32 dsa_port_mask ; u32 phys_port_mask ; u32 phys_mii_mask ; struct mii_bus *slave_mii_bus ; struct net_device *ports[12U] ; }; struct dsa_switch_driver { struct list_head list ; enum dsa_tag_protocol tag_protocol ; int priv_size ; char *(*probe)(struct device * , int ) ; int (*setup)(struct dsa_switch * ) ; int (*set_addr)(struct dsa_switch * , u8 * ) ; u32 (*get_phy_flags)(struct dsa_switch * , int ) ; int (*phy_read)(struct dsa_switch * , int , int ) ; int (*phy_write)(struct dsa_switch * , int , int , u16 ) ; void (*poll_link)(struct dsa_switch * ) ; void (*adjust_link)(struct dsa_switch * , int , struct phy_device * ) ; void (*fixed_link_update)(struct dsa_switch * , int , struct fixed_phy_status * ) ; void (*get_strings)(struct dsa_switch * , int , uint8_t * ) ; void (*get_ethtool_stats)(struct dsa_switch * , int , uint64_t * ) ; int (*get_sset_count)(struct dsa_switch * ) ; void (*get_wol)(struct dsa_switch * , int , struct ethtool_wolinfo * ) ; int (*set_wol)(struct dsa_switch * , int , struct ethtool_wolinfo * ) ; int (*suspend)(struct dsa_switch * ) ; int (*resume)(struct dsa_switch * ) ; int (*port_enable)(struct dsa_switch * , int , struct phy_device * ) ; void (*port_disable)(struct dsa_switch * , int , struct phy_device * ) ; int (*set_eee)(struct dsa_switch * , int , struct phy_device * , struct ethtool_eee * ) ; int (*get_eee)(struct dsa_switch * , int , struct ethtool_eee * ) ; int (*get_temp)(struct dsa_switch * , int * ) ; int (*get_temp_limit)(struct dsa_switch * , int * ) ; int (*set_temp_limit)(struct dsa_switch * , int ) ; int (*get_temp_alarm)(struct dsa_switch * , bool * ) ; int (*get_eeprom_len)(struct dsa_switch * ) ; int (*get_eeprom)(struct dsa_switch * , struct ethtool_eeprom * , u8 * ) ; int (*set_eeprom)(struct dsa_switch * , struct ethtool_eeprom * , u8 * ) ; int (*get_regs_len)(struct dsa_switch * , int ) ; void (*get_regs)(struct dsa_switch * , int , struct ethtool_regs * , void * ) ; int (*port_join_bridge)(struct dsa_switch * , int , u32 ) ; int (*port_leave_bridge)(struct dsa_switch * , int , u32 ) ; int (*port_stp_update)(struct dsa_switch * , int , u8 ) ; int (*fdb_add)(struct dsa_switch * , int , unsigned char const * , u16 ) ; int (*fdb_del)(struct dsa_switch * , int , unsigned char const * , u16 ) ; int (*fdb_getnext)(struct dsa_switch * , int , unsigned char * , bool * ) ; }; struct ieee_ets { __u8 willing ; __u8 ets_cap ; __u8 cbs ; __u8 tc_tx_bw[8U] ; __u8 tc_rx_bw[8U] ; __u8 tc_tsa[8U] ; __u8 prio_tc[8U] ; __u8 tc_reco_bw[8U] ; __u8 tc_reco_tsa[8U] ; __u8 reco_prio_tc[8U] ; }; struct ieee_maxrate { __u64 tc_maxrate[8U] ; }; struct ieee_qcn { __u8 rpg_enable[8U] ; __u32 rppp_max_rps[8U] ; __u32 rpg_time_reset[8U] ; __u32 rpg_byte_reset[8U] ; __u32 rpg_threshold[8U] ; __u32 rpg_max_rate[8U] ; __u32 rpg_ai_rate[8U] ; __u32 rpg_hai_rate[8U] ; __u32 rpg_gd[8U] ; __u32 rpg_min_dec_fac[8U] ; __u32 rpg_min_rate[8U] ; __u32 cndd_state_machine[8U] ; }; struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U] ; __u32 rppp_created_rps[8U] ; }; struct ieee_pfc { __u8 pfc_cap ; __u8 pfc_en ; __u8 mbc ; __u16 delay ; __u64 requests[8U] ; __u64 indications[8U] ; }; struct cee_pg { __u8 willing ; __u8 error ; __u8 pg_en ; __u8 tcs_supported ; __u8 pg_bw[8U] ; __u8 prio_pg[8U] ; }; struct cee_pfc { __u8 willing ; __u8 error ; __u8 pfc_en ; __u8 tcs_supported ; }; struct dcb_app { __u8 selector ; __u8 priority ; __u16 protocol ; }; struct dcb_peer_app_info { __u8 willing ; __u8 error ; }; struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_setets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_getmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_setmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_getqcn)(struct net_device * , struct ieee_qcn * ) ; int (*ieee_setqcn)(struct net_device * , struct ieee_qcn * ) ; int (*ieee_getqcnstats)(struct net_device * , struct ieee_qcn_stats * ) ; int (*ieee_getpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_setpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_getapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_setapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_delapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_peer_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_peer_getpfc)(struct net_device * , struct ieee_pfc * ) ; u8 (*getstate)(struct net_device * ) ; u8 (*setstate)(struct net_device * , u8 ) ; void (*getpermhwaddr)(struct net_device * , u8 * ) ; void (*setpgtccfgtx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgtx)(struct net_device * , int , u8 ) ; void (*setpgtccfgrx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgrx)(struct net_device * , int , u8 ) ; void (*getpgtccfgtx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgtx)(struct net_device * , int , u8 * ) ; void (*getpgtccfgrx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgrx)(struct net_device * , int , u8 * ) ; void (*setpfccfg)(struct net_device * , int , u8 ) ; void (*getpfccfg)(struct net_device * , int , u8 * ) ; u8 (*setall)(struct net_device * ) ; u8 (*getcap)(struct net_device * , int , u8 * ) ; int (*getnumtcs)(struct net_device * , int , u8 * ) ; int (*setnumtcs)(struct net_device * , int , u8 ) ; u8 (*getpfcstate)(struct net_device * ) ; void (*setpfcstate)(struct net_device * , u8 ) ; void (*getbcncfg)(struct net_device * , int , u32 * ) ; void (*setbcncfg)(struct net_device * , int , u32 ) ; void (*getbcnrp)(struct net_device * , int , u8 * ) ; void (*setbcnrp)(struct net_device * , int , u8 ) ; int (*setapp)(struct net_device * , u8 , u16 , u8 ) ; int (*getapp)(struct net_device * , u8 , u16 ) ; u8 (*getfeatcfg)(struct net_device * , int , u8 * ) ; u8 (*setfeatcfg)(struct net_device * , int , u8 ) ; u8 (*getdcbx)(struct net_device * ) ; u8 (*setdcbx)(struct net_device * , u8 ) ; int (*peer_getappinfo)(struct net_device * , struct dcb_peer_app_info * , u16 * ) ; int (*peer_getapptable)(struct net_device * , struct dcb_app * ) ; int (*cee_peer_getpg)(struct net_device * , struct cee_pg * ) ; int (*cee_peer_getpfc)(struct net_device * , struct cee_pfc * ) ; }; struct taskstats { __u16 version ; __u32 ac_exitcode ; __u8 ac_flag ; __u8 ac_nice ; __u64 cpu_count ; __u64 cpu_delay_total ; __u64 blkio_count ; __u64 blkio_delay_total ; __u64 swapin_count ; __u64 swapin_delay_total ; __u64 cpu_run_real_total ; __u64 cpu_run_virtual_total ; char ac_comm[32U] ; __u8 ac_sched ; __u8 ac_pad[3U] ; __u32 ac_uid ; __u32 ac_gid ; __u32 ac_pid ; __u32 ac_ppid ; __u32 ac_btime ; __u64 ac_etime ; __u64 ac_utime ; __u64 ac_stime ; __u64 ac_minflt ; __u64 ac_majflt ; __u64 coremem ; __u64 virtmem ; __u64 hiwater_rss ; __u64 hiwater_vm ; __u64 read_char ; __u64 write_char ; __u64 read_syscalls ; __u64 write_syscalls ; __u64 read_bytes ; __u64 write_bytes ; __u64 cancelled_write_bytes ; __u64 nvcsw ; __u64 nivcsw ; __u64 ac_utimescaled ; __u64 ac_stimescaled ; __u64 cpu_scaled_run_real_total ; __u64 freepages_count ; __u64 freepages_delay_total ; }; struct netprio_map { struct callback_head rcu ; u32 priomap_len ; u32 priomap[] ; }; struct ndmsg { __u8 ndm_family ; __u8 ndm_pad1 ; __u16 ndm_pad2 ; __s32 ndm_ifindex ; __u16 ndm_state ; __u8 ndm_flags ; __u8 ndm_type ; }; struct rtnl_link_stats64 { __u64 rx_packets ; __u64 tx_packets ; __u64 rx_bytes ; __u64 tx_bytes ; __u64 rx_errors ; __u64 tx_errors ; __u64 rx_dropped ; __u64 tx_dropped ; __u64 multicast ; __u64 collisions ; __u64 rx_length_errors ; __u64 rx_over_errors ; __u64 rx_crc_errors ; __u64 rx_frame_errors ; __u64 rx_fifo_errors ; __u64 rx_missed_errors ; __u64 tx_aborted_errors ; __u64 tx_carrier_errors ; __u64 tx_fifo_errors ; __u64 tx_heartbeat_errors ; __u64 tx_window_errors ; __u64 rx_compressed ; __u64 tx_compressed ; }; struct ifla_vf_stats { __u64 rx_packets ; __u64 tx_packets ; __u64 rx_bytes ; __u64 tx_bytes ; __u64 broadcast ; __u64 multicast ; }; struct ifla_vf_info { __u32 vf ; __u8 mac[32U] ; __u32 vlan ; __u32 qos ; __u32 spoofchk ; __u32 linkstate ; __u32 min_tx_rate ; __u32 max_tx_rate ; __u32 rss_query_en ; }; struct netpoll_info; struct wireless_dev; struct wpan_dev; struct mpls_dev; enum netdev_tx { __NETDEV_TX_MIN = (-0x7FFFFFFF-1), NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, NETDEV_TX_LOCKED = 32 } ; typedef enum netdev_tx netdev_tx_t; struct net_device_stats { unsigned long rx_packets ; unsigned long tx_packets ; unsigned long rx_bytes ; unsigned long tx_bytes ; unsigned long rx_errors ; unsigned long tx_errors ; unsigned long rx_dropped ; unsigned long tx_dropped ; unsigned long multicast ; unsigned long collisions ; unsigned long rx_length_errors ; unsigned long rx_over_errors ; unsigned long rx_crc_errors ; unsigned long rx_frame_errors ; unsigned long rx_fifo_errors ; unsigned long rx_missed_errors ; unsigned long tx_aborted_errors ; unsigned long tx_carrier_errors ; unsigned long tx_fifo_errors ; unsigned long tx_heartbeat_errors ; unsigned long tx_window_errors ; unsigned long rx_compressed ; unsigned long tx_compressed ; }; struct neigh_parms; struct netdev_hw_addr_list { struct list_head list ; int count ; }; struct hh_cache { u16 hh_len ; u16 __pad ; seqlock_t hh_lock ; unsigned long hh_data[16U] ; }; struct header_ops { int (*create)(struct sk_buff * , struct net_device * , unsigned short , void const * , void const * , unsigned int ) ; int (*parse)(struct sk_buff const * , unsigned char * ) ; int (*cache)(struct neighbour const * , struct hh_cache * , __be16 ) ; void (*cache_update)(struct hh_cache * , struct net_device const * , unsigned char const * ) ; }; struct napi_struct { struct list_head poll_list ; unsigned long state ; int weight ; unsigned int gro_count ; int (*poll)(struct napi_struct * , int ) ; spinlock_t poll_lock ; int poll_owner ; struct net_device *dev ; struct sk_buff *gro_list ; struct sk_buff *skb ; struct hrtimer timer ; struct list_head dev_list ; struct hlist_node napi_hash_node ; unsigned int napi_id ; }; enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff ** ); struct Qdisc; struct netdev_queue { struct net_device *dev ; struct Qdisc *qdisc ; struct Qdisc *qdisc_sleeping ; struct kobject kobj ; int numa_node ; spinlock_t _xmit_lock ; int xmit_lock_owner ; unsigned long trans_start ; unsigned long trans_timeout ; unsigned long state ; struct dql dql ; unsigned long tx_maxrate ; }; struct rps_map { unsigned int len ; struct callback_head rcu ; u16 cpus[0U] ; }; struct rps_dev_flow { u16 cpu ; u16 filter ; unsigned int last_qtail ; }; struct rps_dev_flow_table { unsigned int mask ; struct callback_head rcu ; struct rps_dev_flow flows[0U] ; }; struct netdev_rx_queue { struct rps_map *rps_map ; struct rps_dev_flow_table *rps_flow_table ; struct kobject kobj ; struct net_device *dev ; }; struct xps_map { unsigned int len ; unsigned int alloc_len ; struct callback_head rcu ; u16 queues[0U] ; }; struct xps_dev_maps { struct callback_head rcu ; struct xps_map *cpu_map[0U] ; }; struct netdev_tc_txq { u16 count ; u16 offset ; }; struct netdev_fcoe_hbainfo { char manufacturer[64U] ; char serial_number[64U] ; char hardware_version[64U] ; char driver_version[64U] ; char optionrom_version[64U] ; char firmware_version[64U] ; char model[256U] ; char model_description[256U] ; }; struct netdev_phys_item_id { unsigned char id[32U] ; unsigned char id_len ; }; struct net_device_ops { int (*ndo_init)(struct net_device * ) ; void (*ndo_uninit)(struct net_device * ) ; int (*ndo_open)(struct net_device * ) ; int (*ndo_stop)(struct net_device * ) ; netdev_tx_t (*ndo_start_xmit)(struct sk_buff * , struct net_device * ) ; u16 (*ndo_select_queue)(struct net_device * , struct sk_buff * , void * , u16 (*)(struct net_device * , struct sk_buff * ) ) ; void (*ndo_change_rx_flags)(struct net_device * , int ) ; void (*ndo_set_rx_mode)(struct net_device * ) ; int (*ndo_set_mac_address)(struct net_device * , void * ) ; int (*ndo_validate_addr)(struct net_device * ) ; int (*ndo_do_ioctl)(struct net_device * , struct ifreq * , int ) ; int (*ndo_set_config)(struct net_device * , struct ifmap * ) ; int (*ndo_change_mtu)(struct net_device * , int ) ; int (*ndo_neigh_setup)(struct net_device * , struct neigh_parms * ) ; void (*ndo_tx_timeout)(struct net_device * ) ; struct rtnl_link_stats64 *(*ndo_get_stats64)(struct net_device * , struct rtnl_link_stats64 * ) ; struct net_device_stats *(*ndo_get_stats)(struct net_device * ) ; int (*ndo_vlan_rx_add_vid)(struct net_device * , __be16 , u16 ) ; int (*ndo_vlan_rx_kill_vid)(struct net_device * , __be16 , u16 ) ; void (*ndo_poll_controller)(struct net_device * ) ; int (*ndo_netpoll_setup)(struct net_device * , struct netpoll_info * ) ; void (*ndo_netpoll_cleanup)(struct net_device * ) ; int (*ndo_busy_poll)(struct napi_struct * ) ; int (*ndo_set_vf_mac)(struct net_device * , int , u8 * ) ; int (*ndo_set_vf_vlan)(struct net_device * , int , u16 , u8 ) ; int (*ndo_set_vf_rate)(struct net_device * , int , int , int ) ; int (*ndo_set_vf_spoofchk)(struct net_device * , int , bool ) ; int (*ndo_get_vf_config)(struct net_device * , int , struct ifla_vf_info * ) ; int (*ndo_set_vf_link_state)(struct net_device * , int , int ) ; int (*ndo_get_vf_stats)(struct net_device * , int , struct ifla_vf_stats * ) ; int (*ndo_set_vf_port)(struct net_device * , int , struct nlattr ** ) ; int (*ndo_get_vf_port)(struct net_device * , int , struct sk_buff * ) ; int (*ndo_set_vf_rss_query_en)(struct net_device * , int , bool ) ; int (*ndo_setup_tc)(struct net_device * , u8 ) ; int (*ndo_fcoe_enable)(struct net_device * ) ; int (*ndo_fcoe_disable)(struct net_device * ) ; int (*ndo_fcoe_ddp_setup)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_ddp_done)(struct net_device * , u16 ) ; int (*ndo_fcoe_ddp_target)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_get_hbainfo)(struct net_device * , struct netdev_fcoe_hbainfo * ) ; int (*ndo_fcoe_get_wwn)(struct net_device * , u64 * , int ) ; int (*ndo_rx_flow_steer)(struct net_device * , struct sk_buff const * , u16 , u32 ) ; int (*ndo_add_slave)(struct net_device * , struct net_device * ) ; int (*ndo_del_slave)(struct net_device * , struct net_device * ) ; netdev_features_t (*ndo_fix_features)(struct net_device * , netdev_features_t ) ; int (*ndo_set_features)(struct net_device * , netdev_features_t ) ; int (*ndo_neigh_construct)(struct neighbour * ) ; void (*ndo_neigh_destroy)(struct neighbour * ) ; int (*ndo_fdb_add)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 , u16 ) ; int (*ndo_fdb_del)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 ) ; int (*ndo_fdb_dump)(struct sk_buff * , struct netlink_callback * , struct net_device * , struct net_device * , int ) ; int (*ndo_bridge_setlink)(struct net_device * , struct nlmsghdr * , u16 ) ; int (*ndo_bridge_getlink)(struct sk_buff * , u32 , u32 , struct net_device * , u32 , int ) ; int (*ndo_bridge_dellink)(struct net_device * , struct nlmsghdr * , u16 ) ; int (*ndo_change_carrier)(struct net_device * , bool ) ; int (*ndo_get_phys_port_id)(struct net_device * , struct netdev_phys_item_id * ) ; int (*ndo_get_phys_port_name)(struct net_device * , char * , size_t ) ; void (*ndo_add_vxlan_port)(struct net_device * , sa_family_t , __be16 ) ; void (*ndo_del_vxlan_port)(struct net_device * , sa_family_t , __be16 ) ; void *(*ndo_dfwd_add_station)(struct net_device * , struct net_device * ) ; void (*ndo_dfwd_del_station)(struct net_device * , void * ) ; netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff * , struct net_device * , void * ) ; int (*ndo_get_lock_subclass)(struct net_device * ) ; netdev_features_t (*ndo_features_check)(struct sk_buff * , struct net_device * , netdev_features_t ) ; int (*ndo_set_tx_maxrate)(struct net_device * , int , u32 ) ; int (*ndo_get_iflink)(struct net_device const * ) ; }; struct __anonstruct_adj_list_452 { struct list_head upper ; struct list_head lower ; }; struct __anonstruct_all_adj_list_453 { struct list_head upper ; struct list_head lower ; }; struct iw_handler_def; struct iw_public_data; struct switchdev_ops; struct vlan_info; struct tipc_bearer; struct in_device; struct dn_dev; struct inet6_dev; struct tcf_proto; struct cpu_rmap; struct pcpu_lstats; struct pcpu_sw_netstats; struct pcpu_dstats; struct pcpu_vstats; union __anonunion____missing_field_name_454 { void *ml_priv ; struct pcpu_lstats *lstats ; struct pcpu_sw_netstats *tstats ; struct pcpu_dstats *dstats ; struct pcpu_vstats *vstats ; }; struct garp_port; struct mrp_port; struct rtnl_link_ops; struct net_device { char name[16U] ; struct hlist_node name_hlist ; char *ifalias ; unsigned long mem_end ; unsigned long mem_start ; unsigned long base_addr ; int irq ; atomic_t carrier_changes ; unsigned long state ; struct list_head dev_list ; struct list_head napi_list ; struct list_head unreg_list ; struct list_head close_list ; struct list_head ptype_all ; struct list_head ptype_specific ; struct __anonstruct_adj_list_452 adj_list ; struct __anonstruct_all_adj_list_453 all_adj_list ; netdev_features_t features ; netdev_features_t hw_features ; netdev_features_t wanted_features ; netdev_features_t vlan_features ; netdev_features_t hw_enc_features ; netdev_features_t mpls_features ; int ifindex ; int group ; struct net_device_stats stats ; atomic_long_t rx_dropped ; atomic_long_t tx_dropped ; struct iw_handler_def const *wireless_handlers ; struct iw_public_data *wireless_data ; struct net_device_ops const *netdev_ops ; struct ethtool_ops const *ethtool_ops ; struct switchdev_ops const *switchdev_ops ; struct header_ops const *header_ops ; unsigned int flags ; unsigned int priv_flags ; unsigned short gflags ; unsigned short padded ; unsigned char operstate ; unsigned char link_mode ; unsigned char if_port ; unsigned char dma ; unsigned int mtu ; unsigned short type ; unsigned short hard_header_len ; unsigned short needed_headroom ; unsigned short needed_tailroom ; unsigned char perm_addr[32U] ; unsigned char addr_assign_type ; unsigned char addr_len ; unsigned short neigh_priv_len ; unsigned short dev_id ; unsigned short dev_port ; spinlock_t addr_list_lock ; unsigned char name_assign_type ; bool uc_promisc ; struct netdev_hw_addr_list uc ; struct netdev_hw_addr_list mc ; struct netdev_hw_addr_list dev_addrs ; struct kset *queues_kset ; unsigned int promiscuity ; unsigned int allmulti ; struct vlan_info *vlan_info ; struct dsa_switch_tree *dsa_ptr ; struct tipc_bearer *tipc_ptr ; void *atalk_ptr ; struct in_device *ip_ptr ; struct dn_dev *dn_ptr ; struct inet6_dev *ip6_ptr ; void *ax25_ptr ; struct wireless_dev *ieee80211_ptr ; struct wpan_dev *ieee802154_ptr ; struct mpls_dev *mpls_ptr ; unsigned long last_rx ; unsigned char *dev_addr ; struct netdev_rx_queue *_rx ; unsigned int num_rx_queues ; unsigned int real_num_rx_queues ; unsigned long gro_flush_timeout ; rx_handler_func_t *rx_handler ; void *rx_handler_data ; struct tcf_proto *ingress_cl_list ; struct netdev_queue *ingress_queue ; struct list_head nf_hooks_ingress ; unsigned char broadcast[32U] ; struct cpu_rmap *rx_cpu_rmap ; struct hlist_node index_hlist ; struct netdev_queue *_tx ; unsigned int num_tx_queues ; unsigned int real_num_tx_queues ; struct Qdisc *qdisc ; unsigned long tx_queue_len ; spinlock_t tx_global_lock ; int watchdog_timeo ; struct xps_dev_maps *xps_maps ; unsigned long trans_start ; struct timer_list watchdog_timer ; int *pcpu_refcnt ; struct list_head todo_list ; struct list_head link_watch_list ; unsigned char reg_state ; bool dismantle ; unsigned short rtnl_link_state ; void (*destructor)(struct net_device * ) ; struct netpoll_info *npinfo ; possible_net_t nd_net ; union __anonunion____missing_field_name_454 __annonCompField105 ; struct garp_port *garp_port ; struct mrp_port *mrp_port ; struct device dev ; struct attribute_group const *sysfs_groups[4U] ; struct attribute_group const *sysfs_rx_queue_group ; struct rtnl_link_ops const *rtnl_link_ops ; unsigned int gso_max_size ; u16 gso_max_segs ; u16 gso_min_segs ; struct dcbnl_rtnl_ops const *dcbnl_ops ; u8 num_tc ; struct netdev_tc_txq tc_to_txq[16U] ; u8 prio_tc_map[16U] ; unsigned int fcoe_ddp_xid ; struct netprio_map *priomap ; struct phy_device *phydev ; struct lock_class_key *qdisc_tx_busylock ; }; struct packet_type { __be16 type ; struct net_device *dev ; int (*func)(struct sk_buff * , struct net_device * , struct packet_type * , struct net_device * ) ; bool (*id_match)(struct packet_type * , struct sock * ) ; void *af_packet_priv ; struct list_head list ; }; struct pcpu_sw_netstats { u64 rx_packets ; u64 rx_bytes ; u64 tx_packets ; u64 tx_bytes ; struct u64_stats_sync syncp ; }; struct page_counter { atomic_long_t count ; unsigned long limit ; struct page_counter *parent ; unsigned long watermark ; unsigned long failcnt ; }; struct sock_filter { __u16 code ; __u8 jt ; __u8 jf ; __u32 k ; }; struct bpf_insn { __u8 code ; unsigned char dst_reg : 4 ; unsigned char src_reg : 4 ; __s16 off ; __s32 imm ; }; enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC = 0, BPF_PROG_TYPE_SOCKET_FILTER = 1, BPF_PROG_TYPE_KPROBE = 2, BPF_PROG_TYPE_SCHED_CLS = 3, BPF_PROG_TYPE_SCHED_ACT = 4 } ; struct bpf_prog_aux; struct sock_fprog_kern { u16 len ; struct sock_filter *filter ; }; union __anonunion____missing_field_name_465 { struct sock_filter insns[0U] ; struct bpf_insn insnsi[0U] ; }; struct bpf_prog { u16 pages ; bool jited ; bool gpl_compatible ; u32 len ; enum bpf_prog_type type ; struct bpf_prog_aux *aux ; struct sock_fprog_kern *orig_prog ; unsigned int (*bpf_func)(struct sk_buff const * , struct bpf_insn const * ) ; union __anonunion____missing_field_name_465 __annonCompField110 ; }; struct sk_filter { atomic_t refcnt ; struct callback_head rcu ; struct bpf_prog *prog ; }; struct pollfd { int fd ; short events ; short revents ; }; struct poll_table_struct { void (*_qproc)(struct file * , wait_queue_head_t * , struct poll_table_struct * ) ; unsigned long _key ; }; struct nla_policy { u16 type ; u16 len ; }; struct rtnl_link_ops { struct list_head list ; char const *kind ; size_t priv_size ; void (*setup)(struct net_device * ) ; int maxtype ; struct nla_policy const *policy ; int (*validate)(struct nlattr ** , struct nlattr ** ) ; int (*newlink)(struct net * , struct net_device * , struct nlattr ** , struct nlattr ** ) ; int (*changelink)(struct net_device * , struct nlattr ** , struct nlattr ** ) ; void (*dellink)(struct net_device * , struct list_head * ) ; size_t (*get_size)(struct net_device const * ) ; int (*fill_info)(struct sk_buff * , struct net_device const * ) ; size_t (*get_xstats_size)(struct net_device const * ) ; int (*fill_xstats)(struct sk_buff * , struct net_device const * ) ; unsigned int (*get_num_tx_queues)(void) ; unsigned int (*get_num_rx_queues)(void) ; int slave_maxtype ; struct nla_policy const *slave_policy ; int (*slave_validate)(struct nlattr ** , struct nlattr ** ) ; int (*slave_changelink)(struct net_device * , struct net_device * , struct nlattr ** , struct nlattr ** ) ; size_t (*get_slave_size)(struct net_device const * , struct net_device const * ) ; int (*fill_slave_info)(struct sk_buff * , struct net_device const * , struct net_device const * ) ; struct net *(*get_link_net)(struct net_device const * ) ; }; struct neigh_table; struct neigh_parms { possible_net_t net ; struct net_device *dev ; struct list_head list ; int (*neigh_setup)(struct neighbour * ) ; void (*neigh_cleanup)(struct neighbour * ) ; struct neigh_table *tbl ; void *sysctl_table ; int dead ; atomic_t refcnt ; struct callback_head callback_head ; int reachable_time ; int data[13U] ; unsigned long data_state[1U] ; }; struct neigh_statistics { unsigned long allocs ; unsigned long destroys ; unsigned long hash_grows ; unsigned long res_failed ; unsigned long lookups ; unsigned long hits ; unsigned long rcv_probes_mcast ; unsigned long rcv_probes_ucast ; unsigned long periodic_gc_runs ; unsigned long forced_gc_runs ; unsigned long unres_discards ; }; struct neigh_ops; struct neighbour { struct neighbour *next ; struct neigh_table *tbl ; struct neigh_parms *parms ; unsigned long confirmed ; unsigned long updated ; rwlock_t lock ; atomic_t refcnt ; struct sk_buff_head arp_queue ; unsigned int arp_queue_len_bytes ; struct timer_list timer ; unsigned long used ; atomic_t probes ; __u8 flags ; __u8 nud_state ; __u8 type ; __u8 dead ; seqlock_t ha_lock ; unsigned char ha[32U] ; struct hh_cache hh ; int (*output)(struct neighbour * , struct sk_buff * ) ; struct neigh_ops const *ops ; struct callback_head rcu ; struct net_device *dev ; u8 primary_key[0U] ; }; struct neigh_ops { int family ; void (*solicit)(struct neighbour * , struct sk_buff * ) ; void (*error_report)(struct neighbour * , struct sk_buff * ) ; int (*output)(struct neighbour * , struct sk_buff * ) ; int (*connected_output)(struct neighbour * , struct sk_buff * ) ; }; struct pneigh_entry { struct pneigh_entry *next ; possible_net_t net ; struct net_device *dev ; u8 flags ; u8 key[0U] ; }; struct neigh_hash_table { struct neighbour **hash_buckets ; unsigned int hash_shift ; __u32 hash_rnd[4U] ; struct callback_head rcu ; }; struct neigh_table { int family ; int entry_size ; int key_len ; __be16 protocol ; __u32 (*hash)(void const * , struct net_device const * , __u32 * ) ; bool (*key_eq)(struct neighbour const * , void const * ) ; int (*constructor)(struct neighbour * ) ; int (*pconstructor)(struct pneigh_entry * ) ; void (*pdestructor)(struct pneigh_entry * ) ; void (*proxy_redo)(struct sk_buff * ) ; char *id ; struct neigh_parms parms ; struct list_head parms_list ; int gc_interval ; int gc_thresh1 ; int gc_thresh2 ; int gc_thresh3 ; unsigned long last_flush ; struct delayed_work gc_work ; struct timer_list proxy_timer ; struct sk_buff_head proxy_queue ; atomic_t entries ; rwlock_t lock ; unsigned long last_rand ; struct neigh_statistics *stats ; struct neigh_hash_table *nht ; struct pneigh_entry **phash_buckets ; }; struct dn_route; union __anonunion____missing_field_name_476 { struct dst_entry *next ; struct rtable *rt_next ; struct rt6_info *rt6_next ; struct dn_route *dn_next ; }; struct dst_entry { struct callback_head callback_head ; struct dst_entry *child ; struct net_device *dev ; struct dst_ops *ops ; unsigned long _metrics ; unsigned long expires ; struct dst_entry *path ; struct dst_entry *from ; struct xfrm_state *xfrm ; int (*input)(struct sk_buff * ) ; int (*output)(struct sock * , struct sk_buff * ) ; unsigned short flags ; unsigned short pending_confirm ; short error ; short obsolete ; unsigned short header_len ; unsigned short trailer_len ; __u32 tclassid ; long __pad_to_align_refcnt[2U] ; atomic_t __refcnt ; int __use ; unsigned long lastuse ; union __anonunion____missing_field_name_476 __annonCompField111 ; }; struct __anonstruct_socket_lock_t_477 { spinlock_t slock ; int owned ; wait_queue_head_t wq ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_socket_lock_t_477 socket_lock_t; struct proto; typedef __u32 __portpair; typedef __u64 __addrpair; struct __anonstruct____missing_field_name_479 { __be32 skc_daddr ; __be32 skc_rcv_saddr ; }; union __anonunion____missing_field_name_478 { __addrpair skc_addrpair ; struct __anonstruct____missing_field_name_479 __annonCompField112 ; }; union __anonunion____missing_field_name_480 { unsigned int skc_hash ; __u16 skc_u16hashes[2U] ; }; struct __anonstruct____missing_field_name_482 { __be16 skc_dport ; __u16 skc_num ; }; union __anonunion____missing_field_name_481 { __portpair skc_portpair ; struct __anonstruct____missing_field_name_482 __annonCompField115 ; }; union __anonunion____missing_field_name_483 { struct hlist_node skc_bind_node ; struct hlist_nulls_node skc_portaddr_node ; }; union __anonunion____missing_field_name_484 { struct hlist_node skc_node ; struct hlist_nulls_node skc_nulls_node ; }; struct sock_common { union __anonunion____missing_field_name_478 __annonCompField113 ; union __anonunion____missing_field_name_480 __annonCompField114 ; union __anonunion____missing_field_name_481 __annonCompField116 ; unsigned short skc_family ; unsigned char volatile skc_state ; unsigned char skc_reuse : 4 ; unsigned char skc_reuseport : 1 ; unsigned char skc_ipv6only : 1 ; unsigned char skc_net_refcnt : 1 ; int skc_bound_dev_if ; union __anonunion____missing_field_name_483 __annonCompField117 ; struct proto *skc_prot ; possible_net_t skc_net ; struct in6_addr skc_v6_daddr ; struct in6_addr skc_v6_rcv_saddr ; atomic64_t skc_cookie ; int skc_dontcopy_begin[0U] ; union __anonunion____missing_field_name_484 __annonCompField118 ; int skc_tx_queue_mapping ; atomic_t skc_refcnt ; int skc_dontcopy_end[0U] ; }; struct cg_proto; struct __anonstruct_sk_backlog_485 { atomic_t rmem_alloc ; int len ; struct sk_buff *head ; struct sk_buff *tail ; }; struct sock { struct sock_common __sk_common ; socket_lock_t sk_lock ; struct sk_buff_head sk_receive_queue ; struct __anonstruct_sk_backlog_485 sk_backlog ; int sk_forward_alloc ; __u32 sk_rxhash ; u16 sk_incoming_cpu ; __u32 sk_txhash ; unsigned int sk_napi_id ; unsigned int sk_ll_usec ; atomic_t sk_drops ; int sk_rcvbuf ; struct sk_filter *sk_filter ; struct socket_wq *sk_wq ; struct xfrm_policy *sk_policy[2U] ; unsigned long sk_flags ; struct dst_entry *sk_rx_dst ; struct dst_entry *sk_dst_cache ; spinlock_t sk_dst_lock ; atomic_t sk_wmem_alloc ; atomic_t sk_omem_alloc ; int sk_sndbuf ; struct sk_buff_head sk_write_queue ; unsigned char sk_shutdown : 2 ; unsigned char sk_no_check_tx : 1 ; unsigned char sk_no_check_rx : 1 ; unsigned char sk_userlocks : 4 ; unsigned char sk_protocol ; unsigned short sk_type ; int sk_wmem_queued ; gfp_t sk_allocation ; u32 sk_pacing_rate ; u32 sk_max_pacing_rate ; netdev_features_t sk_route_caps ; netdev_features_t sk_route_nocaps ; int sk_gso_type ; unsigned int sk_gso_max_size ; u16 sk_gso_max_segs ; int sk_rcvlowat ; unsigned long sk_lingertime ; struct sk_buff_head sk_error_queue ; struct proto *sk_prot_creator ; rwlock_t sk_callback_lock ; int sk_err ; int sk_err_soft ; u32 sk_ack_backlog ; u32 sk_max_ack_backlog ; __u32 sk_priority ; __u32 sk_cgrp_prioidx ; struct pid *sk_peer_pid ; struct cred const *sk_peer_cred ; long sk_rcvtimeo ; long sk_sndtimeo ; struct timer_list sk_timer ; ktime_t sk_stamp ; u16 sk_tsflags ; u32 sk_tskey ; struct socket *sk_socket ; void *sk_user_data ; struct page_frag sk_frag ; struct sk_buff *sk_send_head ; __s32 sk_peek_off ; int sk_write_pending ; void *sk_security ; __u32 sk_mark ; u32 sk_classid ; struct cg_proto *sk_cgrp ; void (*sk_state_change)(struct sock * ) ; void (*sk_data_ready)(struct sock * ) ; void (*sk_write_space)(struct sock * ) ; void (*sk_error_report)(struct sock * ) ; int (*sk_backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*sk_destruct)(struct sock * ) ; }; struct request_sock_ops; struct timewait_sock_ops; struct inet_hashinfo; struct raw_hashinfo; struct udp_table; union __anonunion_h_488 { struct inet_hashinfo *hashinfo ; struct udp_table *udp_table ; struct raw_hashinfo *raw_hash ; }; struct proto { void (*close)(struct sock * , long ) ; int (*connect)(struct sock * , struct sockaddr * , int ) ; int (*disconnect)(struct sock * , int ) ; struct sock *(*accept)(struct sock * , int , int * ) ; int (*ioctl)(struct sock * , int , unsigned long ) ; int (*init)(struct sock * ) ; void (*destroy)(struct sock * ) ; void (*shutdown)(struct sock * , int ) ; int (*setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_ioctl)(struct sock * , unsigned int , unsigned long ) ; int (*sendmsg)(struct sock * , struct msghdr * , size_t ) ; int (*recvmsg)(struct sock * , struct msghdr * , size_t , int , int , int * ) ; int (*sendpage)(struct sock * , struct page * , int , size_t , int ) ; int (*bind)(struct sock * , struct sockaddr * , int ) ; int (*backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*release_cb)(struct sock * ) ; void (*hash)(struct sock * ) ; void (*unhash)(struct sock * ) ; void (*rehash)(struct sock * ) ; int (*get_port)(struct sock * , unsigned short ) ; void (*clear_sk)(struct sock * , int ) ; unsigned int inuse_idx ; bool (*stream_memory_free)(struct sock const * ) ; void (*enter_memory_pressure)(struct sock * ) ; atomic_long_t *memory_allocated ; struct percpu_counter *sockets_allocated ; int *memory_pressure ; long *sysctl_mem ; int *sysctl_wmem ; int *sysctl_rmem ; int max_header ; bool no_autobind ; struct kmem_cache *slab ; unsigned int obj_size ; int slab_flags ; struct percpu_counter *orphan_count ; struct request_sock_ops *rsk_prot ; struct timewait_sock_ops *twsk_prot ; union __anonunion_h_488 h ; struct module *owner ; char name[32U] ; struct list_head node ; int (*init_cgroup)(struct mem_cgroup * , struct cgroup_subsys * ) ; void (*destroy_cgroup)(struct mem_cgroup * ) ; struct cg_proto *(*proto_cgroup)(struct mem_cgroup * ) ; }; struct cg_proto { struct page_counter memory_allocated ; struct percpu_counter sockets_allocated ; int memory_pressure ; long sysctl_mem[3U] ; unsigned long flags ; struct mem_cgroup *memcg ; }; struct request_sock_ops { int family ; int obj_size ; struct kmem_cache *slab ; char *slab_name ; int (*rtx_syn_ack)(struct sock * , struct request_sock * ) ; void (*send_ack)(struct sock * , struct sk_buff * , struct request_sock * ) ; void (*send_reset)(struct sock * , struct sk_buff * ) ; void (*destructor)(struct request_sock * ) ; void (*syn_ack_timeout)(struct request_sock const * ) ; }; struct request_sock { struct sock_common __req_common ; struct request_sock *dl_next ; struct sock *rsk_listener ; u16 mss ; u8 num_retrans ; unsigned char cookie_ts : 1 ; unsigned char num_timeout : 7 ; u32 window_clamp ; u32 rcv_wnd ; u32 ts_recent ; struct timer_list rsk_timer ; struct request_sock_ops const *rsk_ops ; struct sock *sk ; u32 *saved_syn ; u32 secid ; u32 peer_secid ; }; struct timewait_sock_ops { struct kmem_cache *twsk_slab ; char *twsk_slab_name ; unsigned int twsk_obj_size ; int (*twsk_unique)(struct sock * , struct sock * , void * ) ; void (*twsk_destructor)(struct sock * ) ; }; struct ipv6_stable_secret { bool initialized ; struct in6_addr secret ; }; struct ipv6_devconf { __s32 forwarding ; __s32 hop_limit ; __s32 mtu6 ; __s32 accept_ra ; __s32 accept_redirects ; __s32 autoconf ; __s32 dad_transmits ; __s32 rtr_solicits ; __s32 rtr_solicit_interval ; __s32 rtr_solicit_delay ; __s32 force_mld_version ; __s32 mldv1_unsolicited_report_interval ; __s32 mldv2_unsolicited_report_interval ; __s32 use_tempaddr ; __s32 temp_valid_lft ; __s32 temp_prefered_lft ; __s32 regen_max_retry ; __s32 max_desync_factor ; __s32 max_addresses ; __s32 accept_ra_defrtr ; __s32 accept_ra_pinfo ; __s32 accept_ra_rtr_pref ; __s32 rtr_probe_interval ; __s32 accept_ra_rt_info_max_plen ; __s32 proxy_ndp ; __s32 accept_source_route ; __s32 accept_ra_from_local ; __s32 optimistic_dad ; __s32 use_optimistic ; __s32 mc_forwarding ; __s32 disable_ipv6 ; __s32 accept_dad ; __s32 force_tllao ; __s32 ndisc_notify ; __s32 suppress_frag_ndisc ; __s32 accept_ra_mtu ; struct ipv6_stable_secret stable_secret ; void *sysctl ; }; struct ip6_sf_list { struct ip6_sf_list *sf_next ; struct in6_addr sf_addr ; unsigned long sf_count[2U] ; unsigned char sf_gsresp ; unsigned char sf_oldin ; unsigned char sf_crcount ; }; struct ifmcaddr6 { struct in6_addr mca_addr ; struct inet6_dev *idev ; struct ifmcaddr6 *next ; struct ip6_sf_list *mca_sources ; struct ip6_sf_list *mca_tomb ; unsigned int mca_sfmode ; unsigned char mca_crcount ; unsigned long mca_sfcount[2U] ; struct timer_list mca_timer ; unsigned int mca_flags ; int mca_users ; atomic_t mca_refcnt ; spinlock_t mca_lock ; unsigned long mca_cstamp ; unsigned long mca_tstamp ; }; struct ifacaddr6 { struct in6_addr aca_addr ; struct inet6_dev *aca_idev ; struct rt6_info *aca_rt ; struct ifacaddr6 *aca_next ; int aca_users ; atomic_t aca_refcnt ; unsigned long aca_cstamp ; unsigned long aca_tstamp ; }; struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry ; struct ipstats_mib *ipv6 ; struct icmpv6_mib_device *icmpv6dev ; struct icmpv6msg_mib_device *icmpv6msgdev ; }; struct inet6_dev { struct net_device *dev ; struct list_head addr_list ; struct ifmcaddr6 *mc_list ; struct ifmcaddr6 *mc_tomb ; spinlock_t mc_lock ; unsigned char mc_qrv ; unsigned char mc_gq_running ; unsigned char mc_ifc_count ; unsigned char mc_dad_count ; unsigned long mc_v1_seen ; unsigned long mc_qi ; unsigned long mc_qri ; unsigned long mc_maxdelay ; struct timer_list mc_gq_timer ; struct timer_list mc_ifc_timer ; struct timer_list mc_dad_timer ; struct ifacaddr6 *ac_list ; rwlock_t lock ; atomic_t refcnt ; __u32 if_flags ; int dead ; u8 rndid[8U] ; struct timer_list regen_timer ; struct list_head tempaddr_list ; struct in6_addr token ; struct neigh_parms *nd_parms ; struct ipv6_devconf cnf ; struct ipv6_devstat stats ; struct timer_list rs_timer ; __u8 rs_probes ; __u8 addr_gen_mode ; unsigned long tstamp ; struct callback_head rcu ; }; union __anonunion____missing_field_name_510 { __be32 a4 ; __be32 a6[4U] ; struct in6_addr in6 ; }; struct inetpeer_addr_base { union __anonunion____missing_field_name_510 __annonCompField120 ; }; struct inetpeer_addr { struct inetpeer_addr_base addr ; __u16 family ; }; union __anonunion____missing_field_name_511 { struct list_head gc_list ; struct callback_head gc_rcu ; }; struct __anonstruct____missing_field_name_513 { atomic_t rid ; }; union __anonunion____missing_field_name_512 { struct __anonstruct____missing_field_name_513 __annonCompField122 ; struct callback_head rcu ; struct inet_peer *gc_next ; }; struct inet_peer { struct inet_peer *avl_left ; struct inet_peer *avl_right ; struct inetpeer_addr daddr ; __u32 avl_height ; u32 metrics[16U] ; u32 rate_tokens ; unsigned long rate_last ; union __anonunion____missing_field_name_511 __annonCompField121 ; union __anonunion____missing_field_name_512 __annonCompField123 ; __u32 dtime ; atomic_t refcnt ; }; struct inet_peer_base { struct inet_peer *root ; seqlock_t lock ; int total ; }; struct uncached_list; struct rtable { struct dst_entry dst ; int rt_genid ; unsigned int rt_flags ; __u16 rt_type ; __u8 rt_is_input ; __u8 rt_uses_gateway ; int rt_iif ; __be32 rt_gateway ; u32 rt_pmtu ; struct list_head rt_uncached ; struct uncached_list *rt_uncached_list ; }; struct inet_ehash_bucket { struct hlist_nulls_head chain ; }; struct inet_bind_hashbucket { spinlock_t lock ; struct hlist_head chain ; }; struct inet_listen_hashbucket { spinlock_t lock ; struct hlist_nulls_head head ; }; struct inet_hashinfo { struct inet_ehash_bucket *ehash ; spinlock_t *ehash_locks ; unsigned int ehash_mask ; unsigned int ehash_locks_mask ; struct inet_bind_hashbucket *bhash ; unsigned int bhash_size ; struct kmem_cache *bind_bucket_cachep ; struct inet_listen_hashbucket listening_hash[32U] ; }; enum transport_state_table { TRANSPORT_NO_STATE = 0, TRANSPORT_NEW_CMD = 1, TRANSPORT_WRITE_PENDING = 3, TRANSPORT_PROCESSING = 5, TRANSPORT_COMPLETE = 6, TRANSPORT_ISTATE_PROCESSING = 11, TRANSPORT_COMPLETE_QF_WP = 18, TRANSPORT_COMPLETE_QF_OK = 19 } ; typedef unsigned int sense_reason_t; struct se_cmd; struct se_device; struct t10_alua_tg_pt_gp; struct t10_alua { u16 alua_tg_pt_gps_counter ; u32 alua_tg_pt_gps_count ; spinlock_t lba_map_lock ; u32 lba_map_segment_size ; u32 lba_map_segment_multiplier ; struct list_head lba_map_list ; spinlock_t tg_pt_gps_lock ; struct se_device *t10_dev ; struct t10_alua_tg_pt_gp *default_tg_pt_gp ; struct config_group alua_tg_pt_gps_group ; struct list_head tg_pt_gps_list ; }; struct t10_alua_lu_gp { u16 lu_gp_id ; int lu_gp_valid_id ; u32 lu_gp_members ; atomic_t lu_gp_ref_cnt ; spinlock_t lu_gp_lock ; struct config_group lu_gp_group ; struct list_head lu_gp_node ; struct list_head lu_gp_mem_list ; }; struct t10_alua_lu_gp_member { bool lu_gp_assoc ; atomic_t lu_gp_mem_ref_cnt ; spinlock_t lu_gp_mem_lock ; struct t10_alua_lu_gp *lu_gp ; struct se_device *lu_gp_mem_dev ; struct list_head lu_gp_mem_list ; }; struct se_lun; struct se_node_acl; struct t10_alua_tg_pt_gp { u16 tg_pt_gp_id ; int tg_pt_gp_valid_id ; int tg_pt_gp_alua_supported_states ; int tg_pt_gp_alua_pending_state ; int tg_pt_gp_alua_previous_state ; int tg_pt_gp_alua_access_status ; int tg_pt_gp_alua_access_type ; int tg_pt_gp_nonop_delay_msecs ; int tg_pt_gp_trans_delay_msecs ; int tg_pt_gp_implicit_trans_secs ; int tg_pt_gp_pref ; int tg_pt_gp_write_metadata ; u32 tg_pt_gp_members ; atomic_t tg_pt_gp_alua_access_state ; atomic_t tg_pt_gp_ref_cnt ; spinlock_t tg_pt_gp_lock ; struct mutex tg_pt_gp_md_mutex ; struct se_device *tg_pt_gp_dev ; struct config_group tg_pt_gp_group ; struct list_head tg_pt_gp_list ; struct list_head tg_pt_gp_lun_list ; struct se_lun *tg_pt_gp_alua_lun ; struct se_node_acl *tg_pt_gp_alua_nacl ; struct delayed_work tg_pt_gp_transition_work ; struct completion *tg_pt_gp_transition_complete ; }; struct t10_wwn { char vendor[8U] ; char model[16U] ; char revision[4U] ; char unit_serial[254U] ; spinlock_t t10_vpd_lock ; struct se_device *t10_dev ; struct config_group t10_wwn_group ; struct list_head t10_vpd_list ; }; struct se_dev_entry; struct t10_pr_registration { char pr_reg_isid[16U] ; unsigned char pr_iport[256U] ; unsigned char pr_tport[256U] ; u16 pr_aptpl_rpti ; u16 pr_reg_tpgt ; int pr_reg_all_tg_pt ; int pr_reg_aptpl ; int pr_res_holder ; int pr_res_type ; int pr_res_scope ; bool isid_present_at_reg ; u64 pr_res_mapped_lun ; u64 pr_aptpl_target_lun ; u16 tg_pt_sep_rtpi ; u32 pr_res_generation ; u64 pr_reg_bin_isid ; u64 pr_res_key ; atomic_t pr_res_holders ; struct se_node_acl *pr_reg_nacl ; struct se_dev_entry *pr_reg_deve ; struct list_head pr_reg_list ; struct list_head pr_reg_abort_list ; struct list_head pr_reg_aptpl_list ; struct list_head pr_reg_atp_list ; struct list_head pr_reg_atp_mem_list ; }; struct t10_reservation { int pr_all_tg_pt ; int pr_aptpl_active ; u32 pr_generation ; spinlock_t registration_lock ; spinlock_t aptpl_reg_lock ; struct se_node_acl *pr_res_holder ; struct list_head registration_list ; struct list_head aptpl_reg_list ; }; struct se_tmr_req { u8 function ; u8 response ; int call_transport ; u64 ref_task_tag ; void *fabric_tmr_ptr ; struct se_cmd *task_cmd ; struct se_device *tmr_dev ; struct se_lun *tmr_lun ; struct list_head tmr_list ; }; enum target_prot_op { TARGET_PROT_NORMAL = 0, TARGET_PROT_DIN_INSERT = 1, TARGET_PROT_DOUT_INSERT = 2, TARGET_PROT_DIN_STRIP = 4, TARGET_PROT_DOUT_STRIP = 8, TARGET_PROT_DIN_PASS = 16, TARGET_PROT_DOUT_PASS = 32 } ; enum target_prot_type { TARGET_DIF_TYPE0_PROT = 0, TARGET_DIF_TYPE1_PROT = 1, TARGET_DIF_TYPE2_PROT = 2, TARGET_DIF_TYPE3_PROT = 3 } ; struct se_session; struct target_core_fabric_ops; struct se_cmd { u8 scsi_status ; u8 scsi_asc ; u8 scsi_ascq ; u16 scsi_sense_length ; u64 tag ; int alua_nonop_delay ; enum dma_data_direction data_direction ; int sam_task_attr ; unsigned int map_tag ; enum transport_state_table t_state ; unsigned char cmd_wait_set : 1 ; unsigned char unknown_data_length : 1 ; u32 se_cmd_flags ; u32 se_ordered_id ; u32 data_length ; u32 residual_count ; u64 orig_fe_lun ; u64 pr_res_key ; void *sense_buffer ; struct list_head se_delayed_node ; struct list_head se_qf_node ; struct se_device *se_dev ; struct se_lun *se_lun ; struct se_session *se_sess ; struct se_tmr_req *se_tmr_req ; struct list_head se_cmd_list ; struct completion cmd_wait_comp ; struct kref cmd_kref ; struct target_core_fabric_ops const *se_tfo ; sense_reason_t (*execute_cmd)(struct se_cmd * ) ; sense_reason_t (*transport_complete_callback)(struct se_cmd * , bool ) ; void *protocol_data ; unsigned char *t_task_cdb ; unsigned char __t_task_cdb[32U] ; unsigned long long t_task_lba ; unsigned int t_task_nolb ; unsigned int transport_state ; spinlock_t t_state_lock ; struct completion t_transport_stop_comp ; struct work_struct work ; struct scatterlist *t_data_sg ; struct scatterlist *t_data_sg_orig ; unsigned int t_data_nents ; unsigned int t_data_nents_orig ; void *t_data_vmap ; struct scatterlist *t_bidi_data_sg ; unsigned int t_bidi_data_nents ; struct list_head state_list ; bool state_active ; struct completion task_stop_comp ; void *priv ; int lun_ref_active ; enum target_prot_op prot_op ; enum target_prot_type prot_type ; u8 prot_checks ; u32 prot_length ; u32 reftag_seed ; struct scatterlist *t_prot_sg ; unsigned int t_prot_nents ; sense_reason_t pi_err ; sector_t bad_sector ; bool prot_pto ; }; struct se_portal_group; struct se_node_acl { char initiatorname[224U] ; bool dynamic_node_acl ; bool acl_stop ; u32 queue_depth ; u32 acl_index ; enum target_prot_type saved_prot_type ; char acl_tag[64U] ; atomic_t acl_pr_ref_count ; struct hlist_head lun_entry_hlist ; struct se_session *nacl_sess ; struct se_portal_group *se_tpg ; struct mutex lun_entry_mutex ; spinlock_t nacl_sess_lock ; struct config_group acl_group ; struct config_group acl_attrib_group ; struct config_group acl_auth_group ; struct config_group acl_param_group ; struct config_group acl_fabric_stat_group ; struct config_group *acl_default_groups[5U] ; struct list_head acl_list ; struct list_head acl_sess_list ; struct completion acl_free_comp ; struct kref acl_kref ; }; struct se_session { unsigned char sess_tearing_down : 1 ; u64 sess_bin_isid ; enum target_prot_op sup_prot_ops ; enum target_prot_type sess_prot_type ; struct se_node_acl *se_node_acl ; struct se_portal_group *se_tpg ; void *fabric_sess_ptr ; struct list_head sess_list ; struct list_head sess_acl_list ; struct list_head sess_cmd_list ; struct list_head sess_wait_list ; spinlock_t sess_cmd_lock ; struct kref sess_kref ; void *sess_cmd_map ; struct percpu_ida sess_tag_pool ; }; struct se_ml_stat_grps { struct config_group stat_group ; struct config_group scsi_auth_intr_group ; struct config_group scsi_att_intr_port_group ; }; struct se_lun_acl { char initiatorname[224U] ; u64 mapped_lun ; struct se_node_acl *se_lun_nacl ; struct se_lun *se_lun ; struct config_group se_lun_group ; struct se_ml_stat_grps ml_stat_grps ; }; struct se_dev_entry { u64 mapped_lun ; u64 pr_res_key ; u64 creation_time ; u32 lun_flags ; u32 attach_count ; atomic_long_t total_cmds ; atomic_long_t read_bytes ; atomic_long_t write_bytes ; atomic_t ua_count ; struct kref pr_kref ; struct completion pr_comp ; struct se_lun_acl *se_lun_acl ; spinlock_t ua_lock ; struct se_lun *se_lun ; unsigned long deve_flags ; struct list_head alua_port_list ; struct list_head lun_link ; struct list_head ua_list ; struct hlist_node link ; struct callback_head callback_head ; }; struct se_dev_attrib { int emulate_model_alias ; int emulate_dpo ; int emulate_fua_write ; int emulate_fua_read ; int emulate_write_cache ; int emulate_ua_intlck_ctrl ; int emulate_tas ; int emulate_tpu ; int emulate_tpws ; int emulate_caw ; int emulate_3pc ; int pi_prot_format ; enum target_prot_type pi_prot_type ; enum target_prot_type hw_pi_prot_type ; int enforce_pr_isids ; int force_pr_aptpl ; int is_nonrot ; int emulate_rest_reord ; u32 hw_block_size ; u32 block_size ; u32 hw_max_sectors ; u32 optimal_sectors ; u32 hw_queue_depth ; u32 queue_depth ; u32 max_unmap_lba_count ; u32 max_unmap_block_desc_count ; u32 unmap_granularity ; u32 unmap_granularity_alignment ; u32 max_write_same_len ; u32 max_bytes_per_io ; struct se_device *da_dev ; struct config_group da_group ; }; struct se_port_stat_grps { struct config_group stat_group ; struct config_group scsi_port_group ; struct config_group scsi_tgt_port_group ; struct config_group scsi_transport_group ; }; struct scsi_port_stats { atomic_long_t cmd_pdus ; atomic_long_t tx_data_octets ; atomic_long_t rx_data_octets ; }; struct se_lun { u64 unpacked_lun ; u32 lun_link_magic ; u32 lun_access ; u32 lun_flags ; u32 lun_index ; u16 lun_rtpi ; atomic_t lun_acl_count ; struct se_device *lun_se_dev ; struct list_head lun_deve_list ; spinlock_t lun_deve_lock ; int lun_tg_pt_secondary_stat ; int lun_tg_pt_secondary_write_md ; atomic_t lun_tg_pt_secondary_offline ; struct mutex lun_tg_pt_md_mutex ; struct list_head lun_tg_pt_gp_link ; struct t10_alua_tg_pt_gp *lun_tg_pt_gp ; spinlock_t lun_tg_pt_gp_lock ; struct se_portal_group *lun_tpg ; struct scsi_port_stats lun_stats ; struct config_group lun_group ; struct se_port_stat_grps port_stat_grps ; struct completion lun_ref_comp ; struct percpu_ref lun_ref ; struct list_head lun_dev_link ; struct hlist_node link ; struct callback_head callback_head ; }; struct se_dev_stat_grps { struct config_group stat_group ; struct config_group scsi_dev_group ; struct config_group scsi_tgt_dev_group ; struct config_group scsi_lu_group ; }; struct se_hba; struct target_backend_ops; struct se_device { u32 dev_link_magic ; u16 dev_rpti_counter ; u32 dev_cur_ordered_id ; u32 dev_flags ; u32 queue_depth ; u64 dev_res_bin_isid ; u32 dev_index ; u64 creation_time ; atomic_long_t num_resets ; atomic_long_t num_cmds ; atomic_long_t read_bytes ; atomic_long_t write_bytes ; atomic_t simple_cmds ; atomic_t dev_ordered_id ; atomic_t dev_ordered_sync ; atomic_t dev_qf_count ; u32 export_count ; spinlock_t delayed_cmd_lock ; spinlock_t execute_task_lock ; spinlock_t dev_reservation_lock ; unsigned int dev_reservation_flags ; spinlock_t se_port_lock ; spinlock_t se_tmr_lock ; spinlock_t qf_cmd_lock ; struct semaphore caw_sem ; struct se_node_acl *dev_reserved_node_acl ; struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem ; struct t10_pr_registration *dev_pr_res_holder ; struct list_head dev_sep_list ; struct list_head dev_tmr_list ; struct workqueue_struct *tmr_wq ; struct work_struct qf_work_queue ; struct list_head delayed_cmd_list ; struct list_head state_list ; struct list_head qf_cmd_list ; struct list_head g_dev_node ; struct se_hba *se_hba ; struct t10_wwn t10_wwn ; struct t10_alua t10_alua ; struct t10_reservation t10_pr ; struct se_dev_attrib dev_attrib ; struct config_group dev_group ; struct config_group dev_pr_group ; struct se_dev_stat_grps dev_stat_grps ; unsigned char dev_alias[512U] ; unsigned char udev_path[512U] ; struct target_backend_ops const *transport ; struct list_head dev_list ; struct se_lun xcopy_lun ; int prot_length ; u32 hba_index ; struct callback_head callback_head ; }; struct target_backend; struct se_hba { u16 hba_tpgt ; u32 hba_id ; u32 hba_flags ; u32 dev_count ; u32 hba_index ; void *hba_ptr ; struct list_head hba_node ; spinlock_t device_lock ; struct config_group hba_group ; struct mutex hba_access_mutex ; struct target_backend *backend ; }; struct se_wwn; struct se_portal_group { int proto_id ; u32 num_node_acls ; atomic_t tpg_pr_ref_count ; struct mutex acl_node_mutex ; spinlock_t session_lock ; struct mutex tpg_lun_mutex ; struct list_head se_tpg_node ; struct list_head acl_node_list ; struct hlist_head tpg_lun_hlist ; struct se_lun *tpg_virt_lun0 ; struct list_head tpg_sess_list ; struct target_core_fabric_ops const *se_tpg_tfo ; struct se_wwn *se_tpg_wwn ; struct config_group tpg_group ; struct config_group *tpg_default_groups[7U] ; struct config_group tpg_lun_group ; struct config_group tpg_np_group ; struct config_group tpg_acl_group ; struct config_group tpg_attrib_group ; struct config_group tpg_auth_group ; struct config_group tpg_param_group ; }; struct target_fabric_configfs; struct se_wwn { struct target_fabric_configfs *wwn_tf ; struct config_group wwn_group ; struct config_group *wwn_default_groups[2U] ; struct config_group fabric_stat_group ; }; struct qla_tgt { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; int irq_cmd_count ; int datasegs_per_cmd ; int datasegs_per_cont ; int sg_tablesize ; unsigned char tgt_enable_64bit_addr : 1 ; unsigned char link_reinit_iocb_pending : 1 ; int tgt_stop ; int tgt_stopped ; int sess_count ; struct list_head sess_list ; struct list_head del_sess_list ; struct delayed_work sess_del_work ; spinlock_t sess_work_lock ; struct list_head sess_works_list ; struct work_struct sess_work ; struct imm_ntfy_from_isp link_reinit_iocb ; wait_queue_head_t waitQ ; int notify_ack_expected ; int abts_resp_expected ; int modify_lun_expected ; int ctio_srr_id ; int imm_srr_id ; spinlock_t srr_lock ; struct list_head srr_ctio_list ; struct list_head srr_imm_list ; struct work_struct srr_work ; atomic_t tgt_global_resets_count ; struct list_head tgt_list_entry ; }; struct qla_tgt_sess { uint16_t loop_id ; port_id_t s_id ; unsigned char conf_compl_supported : 1 ; unsigned char deleted : 1 ; unsigned char local : 1 ; struct se_session *se_sess ; struct scsi_qla_host *vha ; struct qla_tgt *tgt ; struct list_head sess_list_entry ; unsigned long expires ; struct list_head del_list_entry ; uint8_t port_name[8U] ; struct work_struct free_work ; }; struct qla_tgt_cmd { struct se_cmd se_cmd ; struct qla_tgt_sess *sess ; int state ; struct work_struct free_work ; struct work_struct work ; unsigned char sense_buffer[96U] ; unsigned char conf_compl_supported : 1 ; unsigned char sg_mapped : 1 ; unsigned char free_sg : 1 ; unsigned char aborted : 1 ; unsigned char write_data_transferred : 1 ; unsigned char ctx_dsd_alloced : 1 ; unsigned char q_full : 1 ; unsigned char term_exchg : 1 ; unsigned char cmd_sent_to_fw : 1 ; unsigned char cmd_in_wq : 1 ; struct scatterlist *sg ; int sg_cnt ; int bufflen ; int offset ; uint32_t unpacked_lun ; enum dma_data_direction dma_data_direction ; uint32_t reset_count ; uint16_t loop_id ; struct qla_tgt *tgt ; struct scsi_qla_host *vha ; struct list_head cmd_list ; struct atio_from_isp atio ; struct scatterlist *prot_sg ; uint32_t prot_sg_cnt ; uint32_t blk_sz ; struct crc_context *ctx ; uint64_t jiffies_at_alloc ; uint64_t jiffies_at_free ; uint32_t cmd_flags ; }; union __anonunion_orig_iocb_519 { struct atio_from_isp atio ; struct imm_ntfy_from_isp imm_ntfy ; struct abts_recv_from_24xx abts ; }; struct qla_tgt_mgmt_cmd { uint8_t tmr_func ; uint8_t fc_tm_rsp ; struct qla_tgt_sess *sess ; struct se_cmd se_cmd ; struct work_struct free_work ; unsigned int flags ; uint32_t reset_count ; union __anonunion_orig_iocb_519 orig_iocb ; }; struct attribute_container { struct list_head node ; struct klist containers ; struct class *class ; struct attribute_group const *grp ; struct device_attribute **attrs ; int (*match)(struct attribute_container * , struct device * ) ; unsigned long flags ; }; struct transport_container; struct transport_container { struct attribute_container ac ; struct attribute_group const *statistics ; }; struct scsi_transport_template { struct transport_container host_attrs ; struct transport_container target_attrs ; struct transport_container device_attrs ; int (*user_scan)(struct Scsi_Host * , uint , uint , u64 ) ; int device_size ; int device_private_offset ; int target_size ; int target_private_offset ; int host_size ; unsigned char create_work_queue : 1 ; void (*eh_strategy_handler)(struct Scsi_Host * ) ; enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd * ) ; int (*it_nexus_response)(struct Scsi_Host * , u64 , int ) ; int (*tsk_mgmt_response)(struct Scsi_Host * , u64 , u64 , int ) ; }; typedef bool ldv_func_ret_type; typedef bool ldv_func_ret_type___0; typedef bool ldv_func_ret_type___1; typedef bool ldv_func_ret_type___2; typedef int ldv_func_ret_type___3; typedef int ldv_func_ret_type___4; typedef int ldv_func_ret_type___5; typedef bool ldv_func_ret_type___6; typedef bool ldv_func_ret_type___7; typedef bool ldv_func_ret_type___8; typedef bool ldv_func_ret_type___9; typedef bool ldv_func_ret_type___10; typedef struct Scsi_Host *ldv_func_ret_type___11; typedef int ldv_func_ret_type___12; typedef int ldv_func_ret_type___13; enum hrtimer_restart; struct fc_rport_identifiers { u64 node_name ; u64 port_name ; u32 port_id ; u32 roles ; }; struct nvram_24xx { uint8_t id[4U] ; uint16_t nvram_version ; uint16_t reserved_0 ; uint16_t version ; uint16_t reserved_1 ; __le16 frame_payload_size ; uint16_t execution_throttle ; uint16_t exchange_count ; uint16_t hard_address ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; uint16_t login_retry_count ; uint16_t link_down_on_nos ; uint16_t interrupt_delay_timer ; uint16_t login_timeout ; uint32_t firmware_options_1 ; uint32_t firmware_options_2 ; uint32_t firmware_options_3 ; uint16_t seriallink_options[4U] ; uint16_t reserved_2[16U] ; uint16_t reserved_3[16U] ; uint16_t reserved_4[16U] ; uint16_t reserved_5[16U] ; uint16_t reserved_6[16U] ; uint16_t reserved_7[16U] ; uint32_t host_p ; uint8_t alternate_port_name[8U] ; uint8_t alternate_node_name[8U] ; uint8_t boot_port_name[8U] ; uint16_t boot_lun_number ; uint16_t reserved_8 ; uint8_t alt1_boot_port_name[8U] ; uint16_t alt1_boot_lun_number ; uint16_t reserved_9 ; uint8_t alt2_boot_port_name[8U] ; uint16_t alt2_boot_lun_number ; uint16_t reserved_10 ; uint8_t alt3_boot_port_name[8U] ; uint16_t alt3_boot_lun_number ; uint16_t reserved_11 ; uint32_t efi_parameters ; uint8_t reset_delay ; uint8_t reserved_12 ; uint16_t reserved_13 ; uint16_t boot_id_number ; uint16_t reserved_14 ; uint16_t max_luns_per_target ; uint16_t reserved_15 ; uint16_t port_down_retry_count ; uint16_t link_down_timeout ; uint16_t fcode_parameter ; uint16_t reserved_16[3U] ; uint8_t prev_drv_ver_major ; uint8_t prev_drv_ver_submajob ; uint8_t prev_drv_ver_minor ; uint8_t prev_drv_ver_subminor ; uint16_t prev_bios_ver_major ; uint16_t prev_bios_ver_minor ; uint16_t prev_efi_ver_major ; uint16_t prev_efi_ver_minor ; uint16_t prev_fw_ver_major ; uint8_t prev_fw_ver_minor ; uint8_t prev_fw_ver_subminor ; uint16_t reserved_17[8U] ; uint16_t reserved_18[16U] ; uint16_t reserved_19[16U] ; uint16_t reserved_20[16U] ; uint8_t model_name[16U] ; uint16_t reserved_21[2U] ; uint16_t pcie_table_sig ; uint16_t pcie_table_offset ; uint16_t subsystem_vendor_id ; uint16_t subsystem_device_id ; uint32_t checksum ; }; struct init_cb_24xx { uint16_t version ; uint16_t reserved_1 ; uint16_t frame_payload_size ; uint16_t execution_throttle ; uint16_t exchange_count ; uint16_t hard_address ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; uint16_t response_q_inpointer ; uint16_t request_q_outpointer ; uint16_t login_retry_count ; uint16_t prio_request_q_outpointer ; uint16_t response_q_length ; uint16_t request_q_length ; uint16_t link_down_on_nos ; uint16_t prio_request_q_length ; uint32_t request_q_address[2U] ; uint32_t response_q_address[2U] ; uint32_t prio_request_q_address[2U] ; uint16_t msix ; uint16_t msix_atio ; uint8_t reserved_2[4U] ; uint16_t atio_q_inpointer ; uint16_t atio_q_length ; uint32_t atio_q_address[2U] ; uint16_t interrupt_delay_timer ; uint16_t login_timeout ; uint32_t firmware_options_1 ; uint32_t firmware_options_2 ; uint32_t firmware_options_3 ; uint16_t qos ; uint16_t rid ; uint8_t reserved_3[20U] ; }; struct mid_conf_entry_24xx { uint16_t reserved_1 ; uint8_t options ; uint8_t hard_address ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; }; struct mid_init_cb_24xx { struct init_cb_24xx init_cb ; uint16_t count ; uint16_t options ; struct mid_conf_entry_24xx entries[256U] ; }; struct nvram_81xx { uint8_t id[4U] ; uint16_t nvram_version ; uint16_t reserved_0 ; uint16_t version ; uint16_t reserved_1 ; uint16_t frame_payload_size ; uint16_t execution_throttle ; uint16_t exchange_count ; uint16_t reserved_2 ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; uint16_t login_retry_count ; uint16_t reserved_3 ; uint16_t interrupt_delay_timer ; uint16_t login_timeout ; uint32_t firmware_options_1 ; uint32_t firmware_options_2 ; uint32_t firmware_options_3 ; uint16_t reserved_4[4U] ; uint8_t enode_mac[6U] ; uint16_t reserved_5[5U] ; uint16_t reserved_6[24U] ; uint16_t ex_version ; uint8_t prio_fcf_matching_flags ; uint8_t reserved_6_1[3U] ; uint16_t pri_fcf_vlan_id ; uint8_t pri_fcf_fabric_name[8U] ; uint16_t reserved_6_2[7U] ; uint8_t spma_mac_addr[6U] ; uint16_t reserved_6_3[14U] ; uint16_t reserved_7[32U] ; uint32_t host_p ; uint8_t alternate_port_name[8U] ; uint8_t alternate_node_name[8U] ; uint8_t boot_port_name[8U] ; uint16_t boot_lun_number ; uint16_t reserved_8 ; uint8_t alt1_boot_port_name[8U] ; uint16_t alt1_boot_lun_number ; uint16_t reserved_9 ; uint8_t alt2_boot_port_name[8U] ; uint16_t alt2_boot_lun_number ; uint16_t reserved_10 ; uint8_t alt3_boot_port_name[8U] ; uint16_t alt3_boot_lun_number ; uint16_t reserved_11 ; uint32_t efi_parameters ; uint8_t reset_delay ; uint8_t reserved_12 ; uint16_t reserved_13 ; uint16_t boot_id_number ; uint16_t reserved_14 ; uint16_t max_luns_per_target ; uint16_t reserved_15 ; uint16_t port_down_retry_count ; uint16_t link_down_timeout ; uint16_t fcode_parameter ; uint16_t reserved_16[3U] ; uint8_t reserved_17[4U] ; uint16_t reserved_18[5U] ; uint8_t reserved_19[2U] ; uint16_t reserved_20[8U] ; uint8_t reserved_21[16U] ; uint16_t reserved_22[3U] ; uint8_t enhanced_features ; uint8_t reserved_23 ; uint16_t reserved_24[4U] ; uint16_t reserved_25[32U] ; uint8_t model_name[16U] ; uint16_t feature_mask_l ; uint16_t feature_mask_h ; uint16_t reserved_26[2U] ; uint16_t subsystem_vendor_id ; uint16_t subsystem_device_id ; uint32_t checksum ; }; struct init_cb_81xx { uint16_t version ; uint16_t reserved_1 ; uint16_t frame_payload_size ; uint16_t execution_throttle ; uint16_t exchange_count ; uint16_t reserved_2 ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; uint16_t response_q_inpointer ; uint16_t request_q_outpointer ; uint16_t login_retry_count ; uint16_t prio_request_q_outpointer ; uint16_t response_q_length ; uint16_t request_q_length ; uint16_t reserved_3 ; uint16_t prio_request_q_length ; uint32_t request_q_address[2U] ; uint32_t response_q_address[2U] ; uint32_t prio_request_q_address[2U] ; uint8_t reserved_4[8U] ; uint16_t atio_q_inpointer ; uint16_t atio_q_length ; uint32_t atio_q_address[2U] ; uint16_t interrupt_delay_timer ; uint16_t login_timeout ; uint32_t firmware_options_1 ; uint32_t firmware_options_2 ; uint32_t firmware_options_3 ; uint8_t reserved_5[8U] ; uint8_t enode_mac[6U] ; uint8_t reserved_6[10U] ; }; struct __anonstruct_nvram_t_326 { uint8_t id[4U] ; uint8_t nvram_version ; uint8_t reserved_0 ; uint8_t parameter_block_version ; uint8_t reserved_1 ; uint8_t firmware_options[2U] ; uint16_t frame_payload_size ; uint16_t max_iocb_allocation ; uint16_t execution_throttle ; uint8_t retry_count ; uint8_t retry_delay ; uint8_t port_name[8U] ; uint16_t hard_address ; uint8_t inquiry_data ; uint8_t login_timeout ; uint8_t node_name[8U] ; uint8_t add_firmware_options[2U] ; uint8_t response_accumulation_timer ; uint8_t interrupt_delay_timer ; uint8_t special_options[2U] ; uint8_t reserved_2[22U] ; uint8_t seriallink_options[4U] ; uint8_t host_p[2U] ; uint8_t boot_node_name[8U] ; uint8_t boot_lun_number ; uint8_t reset_delay ; uint8_t port_down_retry_count ; uint8_t boot_id_number ; uint16_t max_luns_per_target ; uint8_t fcode_boot_port_name[8U] ; uint8_t alternate_port_name[8U] ; uint8_t alternate_node_name[8U] ; uint8_t efi_parameters ; uint8_t link_down_timeout ; uint8_t adapter_id[16U] ; uint8_t alt1_boot_node_name[8U] ; uint16_t alt1_boot_lun_number ; uint8_t alt2_boot_node_name[8U] ; uint16_t alt2_boot_lun_number ; uint8_t alt3_boot_node_name[8U] ; uint16_t alt3_boot_lun_number ; uint8_t alt4_boot_node_name[8U] ; uint16_t alt4_boot_lun_number ; uint8_t alt5_boot_node_name[8U] ; uint16_t alt5_boot_lun_number ; uint8_t alt6_boot_node_name[8U] ; uint16_t alt6_boot_lun_number ; uint8_t alt7_boot_node_name[8U] ; uint16_t alt7_boot_lun_number ; uint8_t reserved_3[2U] ; uint8_t model_number[16U] ; uint8_t oem_specific[16U] ; uint8_t adapter_features[2U] ; uint8_t reserved_4[16U] ; uint16_t subsystem_vendor_id_2200 ; uint16_t subsystem_device_id_2200 ; uint8_t reserved_5 ; uint8_t checksum ; }; typedef struct __anonstruct_nvram_t_326 nvram_t; struct __anonstruct_sw_info_t_346 { port_id_t d_id ; uint8_t node_name[8U] ; uint8_t port_name[8U] ; uint8_t fabric_port_name[8U] ; uint16_t fp_speed ; uint8_t fc4_type ; }; typedef struct __anonstruct_sw_info_t_346 sw_info_t; struct paravirt_callee_save { void *func ; }; struct pv_irq_ops { struct paravirt_callee_save save_fl ; struct paravirt_callee_save restore_fl ; struct paravirt_callee_save irq_disable ; struct paravirt_callee_save irq_enable ; void (*safe_halt)(void) ; void (*halt)(void) ; void (*adjust_exception_frame)(void) ; }; enum hrtimer_restart; enum fc_tgtid_binding_type { FC_TGTID_BIND_NONE = 0, FC_TGTID_BIND_BY_WWPN = 1, FC_TGTID_BIND_BY_WWNN = 2, FC_TGTID_BIND_BY_ID = 3 } ; struct fc_host_attrs { u64 node_name ; u64 port_name ; u64 permanent_port_name ; u32 supported_classes ; u8 supported_fc4s[32U] ; u32 supported_speeds ; u32 maxframe_size ; u16 max_npiv_vports ; char serial_number[80U] ; char manufacturer[80U] ; char model[256U] ; char model_description[256U] ; char hardware_version[64U] ; char driver_version[64U] ; char firmware_version[64U] ; char optionrom_version[64U] ; u32 port_id ; enum fc_port_type port_type ; enum fc_port_state port_state ; u8 active_fc4s[32U] ; u32 speed ; u64 fabric_name ; char symbolic_name[256U] ; char system_hostname[256U] ; u32 dev_loss_tmo ; enum fc_tgtid_binding_type tgtid_bind_type ; struct list_head rports ; struct list_head rport_bindings ; struct list_head vports ; u32 next_rport_number ; u32 next_target_id ; u32 next_vport_number ; u16 npiv_vports_inuse ; char work_q_name[20U] ; struct workqueue_struct *work_q ; char devloss_work_q_name[20U] ; struct workqueue_struct *devloss_work_q ; struct request_queue *rqst_q ; }; struct port_database_24xx { uint16_t flags ; uint8_t current_login_state ; uint8_t last_login_state ; uint8_t hard_address[3U] ; uint8_t reserved_1 ; uint8_t port_id[3U] ; uint8_t sequence_id ; uint16_t port_timer ; uint16_t nport_handle ; uint16_t receive_data_size ; uint16_t reserved_2 ; uint8_t prli_svc_param_word_0[2U] ; uint8_t prli_svc_param_word_3[2U] ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; uint8_t reserved_3[24U] ; }; struct sts_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t comp_status ; uint16_t ox_id ; uint32_t residual_len ; uint16_t reserved_1 ; uint16_t state_flags ; uint16_t retry_delay ; uint16_t scsi_status ; uint32_t rsp_residual_count ; uint32_t sense_len ; uint32_t rsp_data_len ; uint8_t data[28U] ; }; struct logio_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t comp_status ; uint16_t nport_handle ; uint16_t control_flags ; uint8_t vp_index ; uint8_t reserved_1 ; uint8_t port_id[3U] ; uint8_t rsp_size ; uint32_t io_parameter[11U] ; }; struct tsk_mgmt_entry { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t reserved_1 ; uint16_t delay ; uint16_t timeout ; struct scsi_lun lun ; uint32_t control_flags ; uint8_t reserved_2[20U] ; uint8_t port_id[3U] ; uint8_t vp_index ; uint8_t reserved_3[12U] ; }; struct abort_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t options ; uint32_t handle_to_abort ; uint16_t req_que_no ; uint8_t reserved_1[30U] ; uint8_t port_id[3U] ; uint8_t vp_index ; uint8_t reserved_2[12U] ; }; struct vp_ctrl_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t vp_idx_failed ; uint16_t comp_status ; uint16_t command ; uint16_t vp_count ; uint8_t vp_idx_map[16U] ; uint16_t flags ; uint16_t id ; uint16_t reserved_4 ; uint16_t hopct ; uint8_t reserved_5[24U] ; }; struct vp_config_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t handle ; uint16_t flags ; uint16_t comp_status ; uint8_t command ; uint8_t vp_count ; uint8_t vp_index1 ; uint8_t vp_index2 ; uint8_t options_idx1 ; uint8_t hard_address_idx1 ; uint16_t reserved_vp1 ; uint8_t port_name_idx1[8U] ; uint8_t node_name_idx1[8U] ; uint8_t options_idx2 ; uint8_t hard_address_idx2 ; uint16_t reserved_vp2 ; uint8_t port_name_idx2[8U] ; uint8_t node_name_idx2[8U] ; uint16_t id ; uint16_t reserved_4 ; uint16_t hopct ; uint8_t reserved_5[2U] ; }; struct vp_rpt_id_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t vp_count ; uint16_t vp_idx ; uint8_t port_id[3U] ; uint8_t format ; uint8_t vp_idx_map[16U] ; uint8_t reserved_4[32U] ; }; struct verify_chip_entry_84xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_defined ; uint8_t entry_status ; uint32_t handle ; uint16_t options ; uint16_t reserved_1 ; uint16_t data_seg_cnt ; uint16_t reserved_2[3U] ; uint32_t fw_ver ; uint32_t exchange_address ; uint32_t reserved_3[3U] ; uint32_t fw_size ; uint32_t fw_seq_size ; uint32_t relative_offset ; uint32_t dseg_address[2U] ; uint32_t dseg_length ; }; struct verify_chip_rsp_84xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_defined ; uint8_t entry_status ; uint32_t handle ; uint16_t comp_status ; uint16_t failure_code ; uint16_t reserved_1[4U] ; uint32_t fw_ver ; uint32_t exchange_address ; uint32_t reserved_2[6U] ; }; struct msg_echo_lb { dma_addr_t send_dma ; dma_addr_t rcv_dma ; uint16_t req_sg_cnt ; uint16_t rsp_sg_cnt ; uint16_t options ; uint32_t transfer_size ; uint32_t iteration_count ; }; struct __anonstruct_port_database_t_324 { uint8_t options ; uint8_t control ; uint8_t master_state ; uint8_t slave_state ; uint8_t reserved[2U] ; uint8_t hard_address ; uint8_t reserved_1 ; uint8_t port_id[4U] ; uint8_t node_name[8U] ; uint8_t port_name[8U] ; uint16_t execution_throttle ; uint16_t execution_count ; uint8_t reset_count ; uint8_t reserved_2 ; uint16_t resource_allocation ; uint16_t current_allocation ; uint16_t queue_head ; uint16_t queue_tail ; uint16_t transmit_execution_list_next ; uint16_t transmit_execution_list_previous ; uint16_t common_features ; uint16_t total_concurrent_sequences ; uint16_t RO_by_information_category ; uint8_t recipient ; uint8_t initiator ; uint16_t receive_data_size ; uint16_t concurrent_sequences ; uint16_t open_sequences_per_exchange ; uint16_t lun_abort_flags ; uint16_t lun_stop_flags ; uint16_t stop_queue_head ; uint16_t stop_queue_tail ; uint16_t port_retry_timer ; uint16_t next_sequence_id ; uint16_t frame_count ; uint16_t PRLI_payload_length ; uint8_t prli_svc_param_word_0[2U] ; uint8_t prli_svc_param_word_3[2U] ; uint16_t loop_id ; uint16_t extended_lun_info_list_pointer ; uint16_t extended_lun_stop_list_pointer ; }; typedef struct __anonstruct_port_database_t_324 port_database_t; struct link_statistics { uint32_t link_fail_cnt ; uint32_t loss_sync_cnt ; uint32_t loss_sig_cnt ; uint32_t prim_seq_err_cnt ; uint32_t inval_xmit_word_cnt ; uint32_t inval_crc_cnt ; uint32_t lip_cnt ; uint32_t unused1[26U] ; uint32_t tx_frames ; uint32_t rx_frames ; uint32_t discarded_frames ; uint32_t dropped_frames ; uint32_t unused2[1U] ; uint32_t nos_rcvd ; }; struct __anonstruct_sts_entry_t_338 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t scsi_status ; uint16_t comp_status ; uint16_t state_flags ; uint16_t status_flags ; uint16_t rsp_info_len ; uint16_t req_sense_length ; uint32_t residual_length ; uint8_t rsp_info[8U] ; uint8_t req_sense_data[32U] ; }; typedef struct __anonstruct_sts_entry_t_338 sts_entry_t; struct qla_port_24xx_data { uint8_t port_name[8U] ; uint16_t loop_id ; uint16_t reserved ; }; union __anonunion_p_520 { struct tsk_mgmt_entry tsk ; struct sts_entry_24xx sts ; }; struct tsk_mgmt_cmd { union __anonunion_p_520 p ; }; union __anonunion_p_521 { struct verify_chip_entry_84xx req ; struct verify_chip_rsp_84xx rsp ; }; struct cs84xx_mgmt_cmd { union __anonunion_p_521 p ; }; enum hrtimer_restart; struct cmd_bidir { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t timeout ; uint16_t wr_dseg_count ; uint16_t rd_dseg_count ; struct scsi_lun lun ; uint16_t control_flags ; uint16_t fcp_cmnd_dseg_len ; uint32_t fcp_cmnd_dseg_address[2U] ; uint16_t reserved[2U] ; uint32_t rd_byte_count ; uint32_t wr_byte_count ; uint8_t port_id[3U] ; uint8_t vp_index ; uint32_t fcp_data_dseg_address[2U] ; uint16_t fcp_data_dseg_len ; }; struct cmd_type_6 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t timeout ; uint16_t dseg_count ; uint16_t fcp_rsp_dsd_len ; struct scsi_lun lun ; uint16_t control_flags ; uint16_t fcp_cmnd_dseg_len ; uint32_t fcp_cmnd_dseg_address[2U] ; uint32_t fcp_rsp_dseg_address[2U] ; uint32_t byte_count ; uint8_t port_id[3U] ; uint8_t vp_index ; uint32_t fcp_data_dseg_address[2U] ; uint32_t fcp_data_dseg_len ; }; struct cmd_type_7 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t timeout ; uint16_t dseg_count ; uint16_t reserved_1 ; struct scsi_lun lun ; uint16_t task_mgmt_flags ; uint8_t task ; uint8_t crn ; uint8_t fcp_cdb[16U] ; uint32_t byte_count ; uint8_t port_id[3U] ; uint8_t vp_index ; uint32_t dseg_0_address[2U] ; uint32_t dseg_0_len ; }; struct cmd_type_crc_2 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t timeout ; uint16_t dseg_count ; uint16_t fcp_rsp_dseg_len ; struct scsi_lun lun ; uint16_t control_flags ; uint16_t fcp_cmnd_dseg_len ; uint32_t fcp_cmnd_dseg_address[2U] ; uint32_t fcp_rsp_dseg_address[2U] ; uint32_t byte_count ; uint8_t port_id[3U] ; uint8_t vp_index ; uint32_t crc_context_address[2U] ; uint16_t crc_context_len ; uint16_t reserved_1 ; }; struct mrk_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint8_t modifier ; uint8_t reserved_1 ; uint8_t reserved_2 ; uint8_t vp_index ; uint16_t reserved_3 ; uint8_t lun[8U] ; uint8_t reserved_4[40U] ; }; struct ct_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t comp_status ; uint16_t nport_handle ; uint16_t cmd_dsd_count ; uint8_t vp_index ; uint8_t reserved_1 ; uint16_t timeout ; uint16_t reserved_2 ; uint16_t rsp_dsd_count ; uint8_t reserved_3[10U] ; uint32_t rsp_byte_count ; uint32_t cmd_byte_count ; uint32_t dseg_0_address[2U] ; uint32_t dseg_0_len ; uint32_t dseg_1_address[2U] ; uint32_t dseg_1_len ; }; struct els_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t reserved_1 ; uint16_t nport_handle ; uint16_t tx_dsd_count ; uint8_t vp_index ; uint8_t sof_type ; uint32_t rx_xchg_address ; uint16_t rx_dsd_count ; uint8_t opcode ; uint8_t reserved_2 ; uint8_t port_id[3U] ; uint8_t reserved_3 ; uint16_t reserved_4 ; uint16_t control_flags ; uint32_t rx_byte_count ; uint32_t tx_byte_count ; uint32_t tx_address[2U] ; uint32_t tx_len ; uint32_t rx_address[2U] ; uint32_t rx_len ; }; struct __anonstruct_cont_entry_t_333 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t reserved ; uint32_t dseg_0_address ; uint32_t dseg_0_length ; uint32_t dseg_1_address ; uint32_t dseg_1_length ; uint32_t dseg_2_address ; uint32_t dseg_2_length ; uint32_t dseg_3_address ; uint32_t dseg_3_length ; uint32_t dseg_4_address ; uint32_t dseg_4_length ; uint32_t dseg_5_address ; uint32_t dseg_5_length ; uint32_t dseg_6_address ; uint32_t dseg_6_length ; }; typedef struct __anonstruct_cont_entry_t_333 cont_entry_t; struct __anonstruct_cont_a64_entry_t_334 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t dseg_0_address[2U] ; uint32_t dseg_0_length ; uint32_t dseg_1_address[2U] ; uint32_t dseg_1_length ; uint32_t dseg_2_address[2U] ; uint32_t dseg_2_length ; uint32_t dseg_3_address[2U] ; uint32_t dseg_3_length ; uint32_t dseg_4_address[2U] ; uint32_t dseg_4_length ; }; typedef struct __anonstruct_cont_a64_entry_t_334 cont_a64_entry_t; struct __anonstruct_mrk_entry_t_342 { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t sys_define_2 ; target_id_t target ; uint8_t modifier ; uint8_t reserved_1 ; uint16_t sequence_number ; uint16_t lun ; uint8_t reserved_2[48U] ; }; typedef struct __anonstruct_mrk_entry_t_342 mrk_entry_t; struct mbx_entry { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define1 ; uint8_t entry_status ; uint32_t handle ; target_id_t loop_id ; uint16_t status ; uint16_t state_flags ; uint16_t status_flags ; uint32_t sys_define2[2U] ; uint16_t mb0 ; uint16_t mb1 ; uint16_t mb2 ; uint16_t mb3 ; uint16_t mb6 ; uint16_t mb7 ; uint16_t mb9 ; uint16_t mb10 ; uint32_t reserved_2[2U] ; uint8_t node_name[8U] ; uint8_t port_name[8U] ; }; struct tsk_mgmt_entry_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; __le32 handle ; uint32_t reserved_0 ; __le16 tgt_id ; uint16_t reserved_1 ; uint16_t reserved_3 ; uint16_t reserved_4 ; struct scsi_lun lun ; __le32 control_flags ; uint8_t reserved_2[32U] ; }; struct abort_iocb_entry_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; __le32 handle ; __le32 reserved_0 ; __le16 tgt_id_sts ; __le16 options ; __le32 abort_handle ; __le32 reserved_2 ; __le16 req_que_no ; uint8_t reserved_1[38U] ; }; struct fxdisc_entry_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; __le32 handle ; __le32 reserved_0 ; __le16 func_num ; __le16 req_xfrcnt ; __le16 req_dsdcnt ; __le16 rsp_xfrcnt ; __le16 rsp_dsdcnt ; uint8_t flags ; uint8_t reserved_1 ; __le32 dseg_rq_address[2U] ; __le32 dseg_rq_len ; __le32 dseg_rsp_address[2U] ; __le32 dseg_rsp_len ; __le32 dataword ; __le32 adapid ; __le32 adapid_hi ; __le32 dataword_extra ; }; struct fw_dif_context { uint32_t ref_tag ; uint16_t app_tag ; uint8_t ref_tag_mask[4U] ; uint8_t app_tag_mask[2U] ; }; struct qla2_sgx { dma_addr_t dma_addr ; uint32_t dma_len ; uint32_t tot_bytes ; struct scatterlist *cur_sg ; uint32_t bytes_consumed ; uint32_t num_bytes ; uint32_t tot_partial ; uint32_t num_sg ; srb_t *sp ; }; enum hrtimer_restart; struct msix_entry { u32 vector ; u16 entry ; }; struct els_sts_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t comp_status ; uint16_t nport_handle ; uint16_t reserved_1 ; uint8_t vp_index ; uint8_t sof_type ; uint32_t rx_xchg_address ; uint16_t reserved_2 ; uint8_t opcode ; uint8_t reserved_3 ; uint8_t port_id[3U] ; uint8_t reserved_4 ; uint16_t reserved_5 ; uint16_t control_flags ; uint32_t total_byte_count ; uint32_t error_subcode_1 ; uint32_t error_subcode_2 ; }; struct sd_dif_tuple { __be16 guard_tag ; __be16 app_tag ; __be32 ref_tag ; }; struct __anonstruct_sts_cont_entry_t_339 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint8_t data[60U] ; }; typedef struct __anonstruct_sts_cont_entry_t_339 sts_cont_entry_t; struct __anonstruct_sts21_entry_t_340 { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t handle[15U] ; }; typedef struct __anonstruct_sts21_entry_t_340 sts21_entry_t; struct __anonstruct_sts22_entry_t_341 { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint16_t handle[30U] ; }; typedef struct __anonstruct_sts22_entry_t_341 sts22_entry_t; struct qla_init_msix_entry { char const *name ; irqreturn_t (*handler)(int , void * ) ; }; typedef bool ldv_func_ret_type___14; enum hrtimer_restart; struct new_utsname { char sysname[65U] ; char nodename[65U] ; char release[65U] ; char version[65U] ; char machine[65U] ; char domainname[65U] ; }; struct uts_namespace { struct kref kref ; struct new_utsname name ; struct user_namespace *user_ns ; struct ns_common ns ; }; struct va_list; typedef __builtin_va_list __gnuc_va_list; typedef __gnuc_va_list va_list; struct va_format { char const *fmt ; va_list *va ; }; enum hrtimer_restart; struct qla2xxx_fce_chain { uint32_t type ; uint32_t chain_size ; uint32_t size ; uint32_t addr_l ; uint32_t addr_h ; uint32_t eregs[8U] ; }; struct qla2xxx_mq_chain { uint32_t type ; uint32_t chain_size ; uint32_t count ; uint32_t qregs[128U] ; }; struct qla2xxx_mqueue_header { uint32_t queue ; uint32_t number ; uint32_t size ; }; struct qla2xxx_mqueue_chain { uint32_t type ; uint32_t chain_size ; }; struct __anonstruct_aq_521 { int length ; void *ring ; }; struct __anonstruct_aqp_522 { int length ; void *ring ; }; enum hrtimer_restart; struct fc_vport_identifiers { u64 node_name ; u64 port_name ; u32 roles ; bool disable ; enum fc_port_type vport_type ; char symbolic_name[64U] ; }; struct qla_fdt_layout { uint8_t sig[4U] ; uint16_t version ; uint16_t len ; uint16_t checksum ; uint8_t unused1[2U] ; uint8_t model[16U] ; uint16_t man_id ; uint16_t id ; uint8_t flags ; uint8_t erase_cmd ; uint8_t alt_erase_cmd ; uint8_t wrt_enable_cmd ; uint8_t wrt_enable_bits ; uint8_t wrt_sts_reg_cmd ; uint8_t unprotect_sec_cmd ; uint8_t read_man_id_cmd ; uint32_t block_size ; uint32_t alt_block_size ; uint32_t flash_size ; uint32_t wrt_enable_data ; uint8_t read_id_addr_len ; uint8_t wrt_disable_bits ; uint8_t read_dev_id_len ; uint8_t chip_erase_cmd ; uint16_t read_timeout ; uint8_t protect_sec_cmd ; uint8_t unused2[65U] ; }; struct qla_flt_location { uint8_t sig[4U] ; uint16_t start_lo ; uint16_t start_hi ; uint8_t version ; uint8_t unused[5U] ; uint16_t checksum ; }; struct qla_flt_header { uint16_t version ; uint16_t length ; uint16_t checksum ; uint16_t unused ; }; struct qla_flt_region { uint32_t code ; uint32_t size ; uint32_t start ; uint32_t end ; }; struct qla_npiv_header { uint8_t sig[2U] ; uint16_t version ; uint16_t entries ; uint16_t unused[4U] ; uint16_t checksum ; }; enum hrtimer_restart; struct fc_starget_attrs { u64 node_name ; u64 port_name ; u32 port_id ; }; struct sysfs_entry { char *name ; struct bin_attribute *attr ; int is4GBp_only ; }; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; struct __anonstruct_mem_290 { uint32_t start_addr ; }; struct __anonstruct_config_291 { uint32_t id ; uint32_t param0 ; uint32_t param1 ; }; struct __anonstruct_info_292 { uint32_t type ; uint32_t context ; }; union __anonunion_u_289 { struct __anonstruct_mem_290 mem ; struct __anonstruct_config_291 config ; struct __anonstruct_info_292 info ; }; struct qla84_mgmt_param { union __anonunion_u_289 u ; }; struct qla84_msg_mgmt { uint16_t cmd ; uint16_t rsrvd ; struct qla84_mgmt_param mgmtp ; uint32_t len ; uint8_t payload[0U] ; }; struct qla_bsg_a84_mgmt { struct qla84_msg_mgmt mgmt ; }; struct qla_scsi_addr { uint16_t bus ; uint16_t target ; }; union __anonunion_dest_addr_293 { uint8_t wwnn[8U] ; uint8_t wwpn[8U] ; uint8_t id[4U] ; struct qla_scsi_addr scsi_addr ; }; struct qla_ext_dest_addr { union __anonunion_dest_addr_293 dest_addr ; uint16_t dest_type ; uint16_t lun ; uint16_t padding[2U] ; }; struct qla_port_param { struct qla_ext_dest_addr fc_scsi_addr ; uint16_t mode ; uint16_t speed ; }; struct qla_field_address { uint16_t offset ; uint16_t device ; uint16_t option ; }; struct qla_field_info { uint8_t version[36U] ; }; struct qla_image_version { struct qla_field_address field_address ; struct qla_field_info field_info ; }; struct qla_image_version_list { uint32_t count ; struct qla_image_version version[0U] ; }; struct qla_status_reg { struct qla_field_address field_address ; uint8_t status_reg ; uint8_t reserved[7U] ; }; struct qla_i2c_access { uint16_t device ; uint16_t offset ; uint16_t option ; uint16_t length ; uint8_t buffer[64U] ; }; struct qla_serdes_reg { uint16_t cmd ; uint16_t addr ; uint16_t val ; }; struct qla_serdes_reg_ex { uint16_t cmd ; uint32_t addr ; uint32_t val ; }; struct access_chip_84xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_defined ; uint8_t entry_status ; uint32_t handle ; uint16_t options ; uint16_t reserved1 ; uint16_t dseg_count ; uint16_t reserved2[3U] ; uint32_t parameter1 ; uint32_t parameter2 ; uint32_t parameter3 ; uint32_t reserved3[3U] ; uint32_t total_byte_cnt ; uint32_t reserved4 ; uint32_t dseg_address[2U] ; uint32_t dseg_length ; }; struct qla_mt_iocb_rqst_fx00 { __le32 reserved_0 ; __le16 func_type ; uint8_t flags ; uint8_t reserved_1 ; __le32 dataword ; __le32 adapid ; __le32 adapid_hi ; __le32 dataword_extra ; __le16 req_len ; __le16 reserved_2 ; __le16 rsp_len ; __le16 reserved_3 ; }; typedef __u64 __le64; enum hrtimer_restart; struct ratelimit_state { raw_spinlock_t lock ; int interval ; int burst ; int printed ; int missed ; unsigned long begin ; }; struct crb_128M_2M_sub_block_map { unsigned int valid ; unsigned int start_128M ; unsigned int end_128M ; unsigned int start_2M ; }; struct crb_128M_2M_block_map { struct crb_128M_2M_sub_block_map sub_block[16U] ; }; struct crb_addr_pair { long addr ; long data ; }; struct qla82xx_uri_table_desc { uint32_t findex ; uint32_t num_entries ; uint32_t entry_size ; uint32_t reserved[5U] ; }; struct qla82xx_uri_data_desc { uint32_t findex ; uint32_t size ; uint32_t reserved[5U] ; }; struct qla82xx_md_template_hdr { uint32_t entry_type ; uint32_t first_entry_offset ; uint32_t size_of_template ; uint32_t capture_debug_level ; uint32_t num_of_entries ; uint32_t version ; uint32_t driver_timestamp ; uint32_t template_checksum ; uint32_t driver_capture_mask ; uint32_t driver_info[3U] ; uint32_t saved_state_array[16U] ; uint32_t capture_size_array[8U] ; uint32_t markers_array[8U] ; uint32_t num_of_free_entries ; uint32_t free_entry_offset ; uint32_t total_table_size ; uint32_t bkup_table_offset ; }; struct __anonstruct_d_ctrl_294 { uint8_t entry_capture_mask ; uint8_t entry_code ; uint8_t driver_code ; uint8_t driver_flags ; }; struct qla82xx_md_entry_hdr { uint32_t entry_type ; uint32_t entry_size ; uint32_t entry_capture_size ; struct __anonstruct_d_ctrl_294 d_ctrl ; }; typedef struct qla82xx_md_entry_hdr qla82xx_md_entry_hdr_t; struct __anonstruct_crb_strd_295 { uint8_t addr_stride ; uint8_t state_index_a ; uint16_t poll_timeout ; }; struct __anonstruct_crb_ctrl_296 { uint8_t opcode ; uint8_t state_index_v ; uint8_t shl ; uint8_t shr ; }; struct qla82xx_md_entry_crb { qla82xx_md_entry_hdr_t h ; uint32_t addr ; struct __anonstruct_crb_strd_295 crb_strd ; uint32_t data_size ; uint32_t op_count ; struct __anonstruct_crb_ctrl_296 crb_ctrl ; uint32_t value_1 ; uint32_t value_2 ; uint32_t value_3 ; }; struct __anonstruct_addr_ctrl_297 { uint16_t tag_value_stride ; uint16_t init_tag_value ; }; struct __anonstruct_cache_ctrl_298 { uint16_t write_value ; uint8_t poll_mask ; uint8_t poll_wait ; }; struct __anonstruct_read_ctrl_299 { uint8_t read_addr_stride ; uint8_t read_addr_cnt ; uint16_t rsvd_1 ; }; struct qla82xx_md_entry_cache { qla82xx_md_entry_hdr_t h ; uint32_t tag_reg_addr ; struct __anonstruct_addr_ctrl_297 addr_ctrl ; uint32_t data_size ; uint32_t op_count ; uint32_t control_addr ; struct __anonstruct_cache_ctrl_298 cache_ctrl ; uint32_t read_addr ; struct __anonstruct_read_ctrl_299 read_ctrl ; }; struct qla82xx_md_entry_rdocm { qla82xx_md_entry_hdr_t h ; uint32_t rsvd_0 ; uint32_t rsvd_1 ; uint32_t data_size ; uint32_t op_count ; uint32_t rsvd_2 ; uint32_t rsvd_3 ; uint32_t read_addr ; uint32_t read_addr_stride ; uint32_t read_addr_cntrl ; }; struct qla82xx_md_entry_rdmem { qla82xx_md_entry_hdr_t h ; uint32_t rsvd[6U] ; uint32_t read_addr ; uint32_t read_data_size ; }; struct qla82xx_md_entry_rdrom { qla82xx_md_entry_hdr_t h ; uint32_t rsvd[6U] ; uint32_t read_addr ; uint32_t read_data_size ; }; struct qla82xx_md_entry_mux { qla82xx_md_entry_hdr_t h ; uint32_t select_addr ; uint32_t rsvd_0 ; uint32_t data_size ; uint32_t op_count ; uint32_t select_value ; uint32_t select_value_stride ; uint32_t read_addr ; uint32_t rsvd_1 ; }; struct __anonstruct_q_strd_300 { uint16_t queue_id_stride ; uint16_t rsvd_0 ; }; struct __anonstruct_rd_strd_301 { uint8_t read_addr_stride ; uint8_t read_addr_cnt ; uint16_t rsvd_3 ; }; struct qla82xx_md_entry_queue { qla82xx_md_entry_hdr_t h ; uint32_t select_addr ; struct __anonstruct_q_strd_300 q_strd ; uint32_t data_size ; uint32_t op_count ; uint32_t rsvd_1 ; uint32_t rsvd_2 ; uint32_t read_addr ; struct __anonstruct_rd_strd_301 rd_strd ; }; struct crb_addr_pair___0 { long addr ; long data ; }; typedef __kernel_long_t __kernel_suseconds_t; struct timeval { __kernel_time_t tv_sec ; __kernel_suseconds_t tv_usec ; }; enum hrtimer_restart; struct cmd_type_7_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint8_t reserved_0 ; uint8_t port_path_ctrl ; uint16_t reserved_1 ; __le16 tgt_idx ; uint16_t timeout ; __le16 dseg_count ; uint8_t scsi_rsp_dsd_len ; uint8_t reserved_2 ; struct scsi_lun lun ; uint8_t cntrl_flags ; uint8_t task_mgmt_flags ; uint8_t task ; uint8_t crn ; uint8_t fcp_cdb[16U] ; __le32 byte_count ; uint32_t dseg_0_address[2U] ; uint32_t dseg_0_len ; }; struct sts_entry_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint32_t reserved_3 ; __le16 comp_status ; uint16_t reserved_0 ; __le32 residual_len ; uint16_t reserved_1 ; uint16_t state_flags ; uint16_t reserved_2 ; __le16 scsi_status ; uint32_t sense_len ; uint8_t data[32U] ; }; struct multi_sts_entry_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; __le32 handles[15U] ; }; struct ioctl_iocb_entry_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint32_t reserved_0 ; uint16_t comp_func_num ; __le16 fw_iotcl_flags ; __le32 dataword_r ; uint32_t adapid ; uint32_t dataword_r_extra ; __le32 seq_no ; uint8_t reserved_2[20U] ; uint32_t residuallen ; __le32 status ; }; struct qlafx00_tgt_node_info { uint8_t tgt_node_wwpn[8U] ; uint8_t tgt_node_wwnn[8U] ; uint32_t tgt_node_state ; uint8_t reserved[128U] ; uint32_t reserved_1[8U] ; uint64_t reserved_2[4U] ; }; struct port_info_data { uint8_t port_state ; uint8_t port_type ; uint16_t port_identifier ; uint32_t up_port_state ; uint8_t fw_ver_num[32U] ; uint8_t portal_attrib ; uint16_t host_option ; uint8_t reset_delay ; uint8_t pdwn_retry_cnt ; uint16_t max_luns2tgt ; uint8_t risc_ver ; uint8_t pconn_option ; uint16_t risc_option ; uint16_t max_frame_len ; uint16_t max_iocb_alloc ; uint16_t exec_throttle ; uint8_t retry_cnt ; uint8_t retry_delay ; uint8_t port_name[8U] ; uint8_t port_id[3U] ; uint8_t link_status ; uint8_t plink_rate ; uint32_t link_config ; uint16_t adap_haddr ; uint8_t tgt_disc ; uint8_t log_tout ; uint8_t node_name[8U] ; uint16_t erisc_opt1 ; uint8_t resp_acc_tmr ; uint8_t intr_del_tmr ; uint8_t erisc_opt2 ; uint8_t alt_port_name[8U] ; uint8_t alt_node_name[8U] ; uint8_t link_down_tout ; uint8_t conn_type ; uint8_t fc_fw_mode ; uint32_t uiReserved[48U] ; }; struct host_system_info { uint32_t os_type ; char sysname[128U] ; char nodename[64U] ; char release[64U] ; char version[64U] ; char machine[64U] ; char domainname[64U] ; char hostdriver[64U] ; uint32_t reserved[64U] ; }; struct register_host_info { struct host_system_info hsi ; uint64_t utc ; uint32_t reserved[64U] ; }; struct config_info_data { uint8_t model_num[16U] ; uint8_t model_description[80U] ; uint8_t reserved0[160U] ; uint8_t symbolic_name[64U] ; uint8_t serial_num[32U] ; uint8_t hw_version[16U] ; uint8_t fw_version[16U] ; uint8_t uboot_version[16U] ; uint8_t fru_serial_num[32U] ; uint8_t fc_port_count ; uint8_t iscsi_port_count ; uint8_t reserved1[2U] ; uint8_t mode ; uint8_t log_level ; uint8_t reserved2[2U] ; uint32_t log_size ; uint8_t tgt_pres_mode ; uint8_t iqn_flags ; uint8_t lun_mapping ; uint64_t adapter_id ; uint32_t cluster_key_len ; uint8_t cluster_key[16U] ; uint64_t cluster_master_id ; uint64_t cluster_slave_id ; uint8_t cluster_flags ; uint32_t enabled_capabilities ; uint32_t nominal_temp_value ; }; struct qla_mt_iocb_rsp_fx00 { uint32_t reserved_1 ; uint16_t func_type ; __le16 ioctl_flags ; __le32 ioctl_data ; uint32_t adapid ; uint32_t adapid_hi ; uint32_t reserved_2 ; __le32 seq_number ; uint8_t reserved_3[20U] ; int32_t res_count ; __le32 status ; }; enum hrtimer_restart; struct qla8044_reset_entry_hdr { uint16_t cmd ; uint16_t size ; uint16_t count ; uint16_t delay ; }; struct qla8044_poll { uint32_t test_mask ; uint32_t test_value ; }; struct qla8044_rmw { uint32_t test_mask ; uint32_t xor_value ; uint32_t or_value ; uint8_t shl ; uint8_t shr ; uint8_t index_a ; uint8_t rsvd ; }; struct qla8044_entry { uint32_t arg1 ; uint32_t arg2 ; }; struct qla8044_quad_entry { uint32_t dr_addr ; uint32_t dr_value ; uint32_t ar_addr ; uint32_t ar_value ; }; struct __anonstruct_d_ctrl_302 { uint8_t entry_capture_mask ; uint8_t entry_code ; uint8_t driver_code ; uint8_t driver_flags ; }; struct qla8044_minidump_entry_hdr { uint32_t entry_type ; uint32_t entry_size ; uint32_t entry_capture_size ; struct __anonstruct_d_ctrl_302 d_ctrl ; }; struct __anonstruct_crb_strd_303 { uint8_t addr_stride ; uint8_t state_index_a ; uint16_t poll_timeout ; }; struct __anonstruct_crb_ctrl_304 { uint8_t opcode ; uint8_t state_index_v ; uint8_t shl ; uint8_t shr ; }; struct qla8044_minidump_entry_crb { struct qla8044_minidump_entry_hdr h ; uint32_t addr ; struct __anonstruct_crb_strd_303 crb_strd ; uint32_t data_size ; uint32_t op_count ; struct __anonstruct_crb_ctrl_304 crb_ctrl ; uint32_t value_1 ; uint32_t value_2 ; uint32_t value_3 ; }; struct __anonstruct_addr_ctrl_305 { uint16_t tag_value_stride ; uint16_t init_tag_value ; }; struct __anonstruct_cache_ctrl_306 { uint16_t write_value ; uint8_t poll_mask ; uint8_t poll_wait ; }; struct __anonstruct_read_ctrl_307 { uint8_t read_addr_stride ; uint8_t read_addr_cnt ; uint16_t rsvd_1 ; }; struct qla8044_minidump_entry_cache { struct qla8044_minidump_entry_hdr h ; uint32_t tag_reg_addr ; struct __anonstruct_addr_ctrl_305 addr_ctrl ; uint32_t data_size ; uint32_t op_count ; uint32_t control_addr ; struct __anonstruct_cache_ctrl_306 cache_ctrl ; uint32_t read_addr ; struct __anonstruct_read_ctrl_307 read_ctrl ; }; struct qla8044_minidump_entry_rdocm { struct qla8044_minidump_entry_hdr h ; uint32_t rsvd_0 ; uint32_t rsvd_1 ; uint32_t data_size ; uint32_t op_count ; uint32_t rsvd_2 ; uint32_t rsvd_3 ; uint32_t read_addr ; uint32_t read_addr_stride ; }; struct qla8044_minidump_entry_rdmem { struct qla8044_minidump_entry_hdr h ; uint32_t rsvd[6U] ; uint32_t read_addr ; uint32_t read_data_size ; }; struct qla8044_minidump_entry_rdmem_pex_dma { struct qla8044_minidump_entry_hdr h ; uint32_t desc_card_addr ; uint16_t dma_desc_cmd ; uint8_t rsvd[2U] ; uint32_t start_dma_cmd ; uint8_t rsvd2[12U] ; uint32_t read_addr ; uint32_t read_data_size ; }; struct qla8044_minidump_entry_rdrom { struct qla8044_minidump_entry_hdr h ; uint32_t rsvd[6U] ; uint32_t read_addr ; uint32_t read_data_size ; }; struct qla8044_minidump_entry_mux { struct qla8044_minidump_entry_hdr h ; uint32_t select_addr ; uint32_t rsvd_0 ; uint32_t data_size ; uint32_t op_count ; uint32_t select_value ; uint32_t select_value_stride ; uint32_t read_addr ; uint32_t rsvd_1 ; }; struct __anonstruct_q_strd_308 { uint16_t queue_id_stride ; uint16_t rsvd_0 ; }; struct __anonstruct_rd_strd_309 { uint8_t read_addr_stride ; uint8_t read_addr_cnt ; uint16_t rsvd_3 ; }; struct qla8044_minidump_entry_queue { struct qla8044_minidump_entry_hdr h ; uint32_t select_addr ; struct __anonstruct_q_strd_308 q_strd ; uint32_t data_size ; uint32_t op_count ; uint32_t rsvd_1 ; uint32_t rsvd_2 ; uint32_t read_addr ; struct __anonstruct_rd_strd_309 rd_strd ; }; struct qla8044_minidump_entry_pollrd { struct qla8044_minidump_entry_hdr h ; uint32_t select_addr ; uint32_t read_addr ; uint32_t select_value ; uint16_t select_value_stride ; uint16_t op_count ; uint32_t poll_wait ; uint32_t poll_mask ; uint32_t data_size ; uint32_t rsvd_1 ; }; struct qla8044_minidump_entry_rddfe { struct qla8044_minidump_entry_hdr h ; uint32_t addr_1 ; uint32_t value ; uint8_t stride ; uint8_t stride2 ; uint16_t count ; uint32_t poll ; uint32_t mask ; uint32_t modify_mask ; uint32_t data_size ; uint32_t rsvd ; }; struct qla8044_minidump_entry_rdmdio { struct qla8044_minidump_entry_hdr h ; uint32_t addr_1 ; uint32_t addr_2 ; uint32_t value_1 ; uint8_t stride_1 ; uint8_t stride_2 ; uint16_t count ; uint32_t poll ; uint32_t mask ; uint32_t value_2 ; uint32_t data_size ; }; struct qla8044_minidump_entry_pollwr { struct qla8044_minidump_entry_hdr h ; uint32_t addr_1 ; uint32_t addr_2 ; uint32_t value_1 ; uint32_t value_2 ; uint32_t poll ; uint32_t mask ; uint32_t data_size ; uint32_t rsvd ; }; struct qla8044_minidump_entry_rdmux2 { struct qla8044_minidump_entry_hdr h ; uint32_t select_addr_1 ; uint32_t select_addr_2 ; uint32_t select_value_1 ; uint32_t select_value_2 ; uint32_t op_count ; uint32_t select_value_mask ; uint32_t read_addr ; uint8_t select_value_stride ; uint8_t data_size ; uint8_t rsvd[2U] ; }; struct qla8044_minidump_entry_pollrdmwr { struct qla8044_minidump_entry_hdr h ; uint32_t addr_1 ; uint32_t addr_2 ; uint32_t value_1 ; uint32_t value_2 ; uint32_t poll_wait ; uint32_t poll_mask ; uint32_t modify_mask ; uint32_t data_size ; }; struct qla8044_minidump_template_hdr { uint32_t entry_type ; uint32_t first_entry_offset ; uint32_t size_of_template ; uint32_t capture_debug_level ; uint32_t num_of_entries ; uint32_t version ; uint32_t driver_timestamp ; uint32_t checksum ; uint32_t driver_capture_mask ; uint32_t driver_info_word2 ; uint32_t driver_info_word3 ; uint32_t driver_info_word4 ; uint32_t saved_state_array[16U] ; uint32_t capture_size_array[8U] ; uint32_t ocm_window_reg[16U] ; }; struct __anonstruct_cmd_310 { uint32_t read_data_size ; uint8_t rsvd[2U] ; uint16_t dma_desc_cmd ; }; struct qla8044_pex_dma_descriptor { struct __anonstruct_cmd_310 cmd ; uint64_t src_addr ; uint64_t dma_bus_addr ; uint8_t rsvd[24U] ; }; typedef __u64 __be64; struct _ddebug { char const *modname ; char const *function ; char const *filename ; char const *format ; unsigned int lineno : 18 ; unsigned char flags ; }; struct __wait_queue; typedef struct __wait_queue wait_queue_t; struct __wait_queue { unsigned int flags ; void *private ; int (*func)(wait_queue_t * , unsigned int , int , void * ) ; struct list_head task_list ; }; enum hrtimer_restart; struct se_tpg_np { struct se_portal_group *tpg_np_parent ; struct config_group tpg_np_group ; }; struct target_core_fabric_ops { struct module *module ; char const *name ; size_t node_acl_size ; char *(*get_fabric_name)(void) ; char *(*tpg_get_wwn)(struct se_portal_group * ) ; u16 (*tpg_get_tag)(struct se_portal_group * ) ; u32 (*tpg_get_default_depth)(struct se_portal_group * ) ; int (*tpg_check_demo_mode)(struct se_portal_group * ) ; int (*tpg_check_demo_mode_cache)(struct se_portal_group * ) ; int (*tpg_check_demo_mode_write_protect)(struct se_portal_group * ) ; int (*tpg_check_prod_mode_write_protect)(struct se_portal_group * ) ; int (*tpg_check_demo_mode_login_only)(struct se_portal_group * ) ; int (*tpg_check_prot_fabric_only)(struct se_portal_group * ) ; u32 (*tpg_get_inst_index)(struct se_portal_group * ) ; int (*check_stop_free)(struct se_cmd * ) ; void (*release_cmd)(struct se_cmd * ) ; int (*shutdown_session)(struct se_session * ) ; void (*close_session)(struct se_session * ) ; u32 (*sess_get_index)(struct se_session * ) ; u32 (*sess_get_initiator_sid)(struct se_session * , unsigned char * , u32 ) ; int (*write_pending)(struct se_cmd * ) ; int (*write_pending_status)(struct se_cmd * ) ; void (*set_default_node_attributes)(struct se_node_acl * ) ; int (*get_cmd_state)(struct se_cmd * ) ; int (*queue_data_in)(struct se_cmd * ) ; int (*queue_status)(struct se_cmd * ) ; void (*queue_tm_rsp)(struct se_cmd * ) ; void (*aborted_task)(struct se_cmd * ) ; struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs * , struct config_group * , char const * ) ; void (*fabric_drop_wwn)(struct se_wwn * ) ; struct se_portal_group *(*fabric_make_tpg)(struct se_wwn * , struct config_group * , char const * ) ; void (*fabric_drop_tpg)(struct se_portal_group * ) ; int (*fabric_post_link)(struct se_portal_group * , struct se_lun * ) ; void (*fabric_pre_unlink)(struct se_portal_group * , struct se_lun * ) ; struct se_tpg_np *(*fabric_make_np)(struct se_portal_group * , struct config_group * , char const * ) ; void (*fabric_drop_np)(struct se_tpg_np * ) ; int (*fabric_init_nodeacl)(struct se_node_acl * , char const * ) ; void (*fabric_cleanup_nodeacl)(struct se_node_acl * ) ; struct configfs_attribute **tfc_discovery_attrs ; struct configfs_attribute **tfc_wwn_attrs ; struct configfs_attribute **tfc_tpg_base_attrs ; struct configfs_attribute **tfc_tpg_np_base_attrs ; struct configfs_attribute **tfc_tpg_attrib_attrs ; struct configfs_attribute **tfc_tpg_auth_attrs ; struct configfs_attribute **tfc_tpg_param_attrs ; struct configfs_attribute **tfc_tpg_nacl_base_attrs ; struct configfs_attribute **tfc_tpg_nacl_attrib_attrs ; struct configfs_attribute **tfc_tpg_nacl_auth_attrs ; struct configfs_attribute **tfc_tpg_nacl_param_attrs ; }; struct __anonstruct_isp2x_508 { uint32_t sys_define_2 ; target_id_t target ; uint8_t target_id ; uint8_t reserved_1 ; uint16_t flags ; uint16_t resp_code ; uint16_t status ; uint16_t task_flags ; uint16_t seq_id ; uint16_t srr_rx_id ; uint32_t srr_rel_offs ; uint16_t srr_ui ; uint16_t srr_flags ; uint16_t srr_reject_code ; uint8_t srr_reject_vendor_uniq ; uint8_t srr_reject_code_expl ; uint8_t reserved_2[24U] ; }; struct __anonstruct_isp24_509 { uint32_t handle ; uint16_t nport_handle ; uint16_t reserved_1 ; uint16_t flags ; uint16_t srr_rx_id ; uint16_t status ; uint8_t status_subcode ; uint8_t fw_handle ; uint32_t exchange_address ; uint32_t srr_rel_offs ; uint16_t srr_ui ; uint16_t srr_flags ; uint8_t reserved_4[19U] ; uint8_t vp_index ; uint8_t srr_reject_vendor_uniq ; uint8_t srr_reject_code_expl ; uint8_t srr_reject_code ; uint8_t reserved_5[5U] ; }; union __anonunion_u_507 { struct __anonstruct_isp2x_508 isp2x ; struct __anonstruct_isp24_509 isp24 ; }; struct nack_to_isp { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; union __anonunion_u_507 u ; uint8_t reserved[2U] ; uint16_t ox_id ; }; struct ctio_to_2xxx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; target_id_t target ; uint16_t rx_id ; uint16_t flags ; uint16_t status ; uint16_t timeout ; uint16_t dseg_count ; uint32_t relative_offset ; uint32_t residual ; uint16_t reserved_1[3U] ; uint16_t scsi_status ; uint32_t transfer_length ; uint32_t dseg_0_address ; uint32_t dseg_0_length ; uint32_t dseg_1_address ; uint32_t dseg_1_length ; uint32_t dseg_2_address ; uint32_t dseg_2_length ; }; struct __anonstruct_status0_515 { uint16_t reserved1 ; __le16 flags ; uint32_t residual ; __le16 ox_id ; uint16_t scsi_status ; uint32_t relative_offset ; uint32_t reserved2 ; uint32_t transfer_length ; uint32_t reserved3 ; uint32_t dseg_0_address[2U] ; uint32_t dseg_0_length ; }; struct __anonstruct_status1_516 { uint16_t sense_length ; uint16_t flags ; uint32_t residual ; __le16 ox_id ; uint16_t scsi_status ; uint16_t response_len ; uint16_t reserved ; uint8_t sense_data[24U] ; }; union __anonunion_u_514 { struct __anonstruct_status0_515 status0 ; struct __anonstruct_status1_516 status1 ; }; struct ctio7_to_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t timeout ; uint16_t dseg_count ; uint8_t vp_index ; uint8_t add_flags ; uint8_t initiator_id[3U] ; uint8_t reserved ; uint32_t exchange_addr ; union __anonunion_u_514 u ; }; struct ctio7_from_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t status ; uint16_t timeout ; uint16_t dseg_count ; uint8_t vp_index ; uint8_t reserved1[5U] ; uint32_t exchange_address ; uint16_t reserved2 ; uint16_t flags ; uint32_t residual ; uint16_t ox_id ; uint16_t reserved3 ; uint32_t relative_offset ; uint8_t reserved4[24U] ; }; struct ctio_crc2_to_fw { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; __le16 timeout ; uint16_t dseg_count ; uint8_t vp_index ; uint8_t add_flags ; uint8_t initiator_id[3U] ; uint8_t reserved1 ; uint32_t exchange_addr ; uint16_t reserved2 ; __le16 flags ; uint32_t residual ; __le16 ox_id ; uint16_t scsi_status ; __le32 relative_offset ; uint32_t reserved5 ; __le32 transfer_length ; uint32_t reserved6 ; __le32 crc_context_address[2U] ; uint16_t crc_context_len ; uint16_t reserved_1 ; }; struct ctio_crc_from_fw { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t status ; uint16_t timeout ; uint16_t dseg_count ; uint32_t reserved1 ; uint16_t state_flags ; uint32_t exchange_address ; uint16_t reserved2 ; uint16_t flags ; uint32_t resid_xfer_length ; uint16_t ox_id ; uint8_t reserved3[12U] ; uint16_t runt_guard ; uint8_t actual_dif[8U] ; uint8_t expected_dif[8U] ; }; struct ba_acc_le { uint16_t reserved ; uint8_t seq_id_last ; uint8_t seq_id_valid ; uint16_t rx_id ; uint16_t ox_id ; uint16_t high_seq_cnt ; uint16_t low_seq_cnt ; }; struct ba_rjt_le { uint8_t vendor_uniq ; uint8_t reason_expl ; uint8_t reason_code ; uint8_t reserved ; }; union __anonunion_payload_517 { struct ba_acc_le ba_acct ; struct ba_rjt_le ba_rjt ; }; struct abts_resp_to_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t reserved_1 ; uint16_t nport_handle ; uint16_t control_flags ; uint8_t vp_index ; unsigned char reserved_3 : 4 ; unsigned char sof_type : 4 ; uint32_t exchange_address ; struct fcp_hdr_le fcp_hdr_le ; union __anonunion_payload_517 payload ; uint32_t reserved_4 ; uint32_t exchange_addr_to_abort ; }; struct abts_resp_from_24xx_fw { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t compl_status ; uint16_t nport_handle ; uint16_t reserved_1 ; uint8_t reserved_2 ; unsigned char reserved_3 : 4 ; unsigned char sof_type : 4 ; uint32_t exchange_address ; struct fcp_hdr_le fcp_hdr_le ; uint8_t reserved_4[8U] ; uint32_t error_subcode1 ; uint32_t error_subcode2 ; uint32_t exchange_addr_to_abort ; }; struct qla_tgt_sess_op { struct scsi_qla_host *vha ; struct atio_from_isp atio ; struct work_struct work ; }; union __anonunion____missing_field_name_518 { struct abts_recv_from_24xx abts ; struct imm_ntfy_from_isp tm_iocb ; struct atio_from_isp tm_iocb2 ; }; struct qla_tgt_sess_work_param { struct list_head sess_works_list_entry ; int type ; union __anonunion____missing_field_name_518 __annonCompField127 ; }; struct qla_tgt_prm { struct qla_tgt_cmd *cmd ; struct qla_tgt *tgt ; void *pkt ; struct scatterlist *sg ; unsigned char *sense_buffer ; int seg_cnt ; int req_cnt ; uint16_t rq_result ; uint16_t scsi_status ; int sense_buffer_len ; int residual ; int add_status_pkt ; struct scatterlist *prot_sg ; uint16_t prot_seg_cnt ; uint16_t tot_dsds ; }; struct qla_tgt_srr_imm { struct list_head srr_list_entry ; int srr_id ; struct imm_ntfy_from_isp imm_ntfy ; }; struct qla_tgt_srr_ctio { struct list_head srr_list_entry ; int srr_id ; struct qla_tgt_cmd *cmd ; }; enum hrtimer_restart; struct qla27xx_fwdt_template { uint32_t template_type ; uint32_t entry_offset ; uint32_t template_size ; uint32_t reserved_1 ; uint32_t entry_count ; uint32_t template_version ; uint32_t capture_timestamp ; uint32_t template_checksum ; uint32_t reserved_2 ; uint32_t driver_info[3U] ; uint32_t saved_state[16U] ; uint32_t reserved_3[8U] ; uint32_t firmware_version[5U] ; }; struct __anonstruct_hdr_520 { uint32_t entry_type ; uint32_t entry_size ; uint32_t reserved_1 ; uint8_t capture_flags ; uint8_t reserved_2[2U] ; uint8_t driver_flags ; }; struct __anonstruct_t0_522 { }; struct __anonstruct_t255_523 { }; struct __anonstruct_t256_524 { uint32_t base_addr ; uint8_t reg_width ; uint16_t reg_count ; uint8_t pci_offset ; }; struct __anonstruct_t257_525 { uint32_t base_addr ; uint32_t write_data ; uint8_t pci_offset ; uint8_t reserved[3U] ; }; struct __anonstruct_t258_526 { uint32_t base_addr ; uint8_t reg_width ; uint16_t reg_count ; uint8_t pci_offset ; uint8_t banksel_offset ; uint8_t reserved[3U] ; uint32_t bank ; }; struct __anonstruct_t259_527 { uint32_t base_addr ; uint32_t write_data ; uint8_t reserved[2U] ; uint8_t pci_offset ; uint8_t banksel_offset ; uint32_t bank ; }; struct __anonstruct_t260_528 { uint8_t pci_offset ; uint8_t reserved[3U] ; }; struct __anonstruct_t261_529 { uint8_t pci_offset ; uint8_t reserved[3U] ; uint32_t write_data ; }; struct __anonstruct_t262_530 { uint8_t ram_area ; uint8_t reserved[3U] ; uint32_t start_addr ; uint32_t end_addr ; }; struct __anonstruct_t263_531 { uint32_t num_queues ; uint8_t queue_type ; uint8_t reserved[3U] ; }; struct __anonstruct_t264_532 { uint32_t fce_trace_size ; uint64_t write_pointer ; uint64_t base_pointer ; uint32_t fce_enable_mb0 ; uint32_t fce_enable_mb2 ; uint32_t fce_enable_mb3 ; uint32_t fce_enable_mb4 ; uint32_t fce_enable_mb5 ; uint32_t fce_enable_mb6 ; }; struct __anonstruct_t265_533 { }; struct __anonstruct_t266_534 { }; struct __anonstruct_t267_535 { uint8_t pci_offset ; uint8_t reserved[3U] ; uint32_t data ; }; struct __anonstruct_t268_536 { uint8_t buf_type ; uint8_t reserved[3U] ; uint32_t buf_size ; uint64_t start_addr ; }; struct __anonstruct_t269_537 { uint32_t scratch_size ; }; struct __anonstruct_t270_538 { uint32_t addr ; uint32_t count ; }; struct __anonstruct_t271_539 { uint32_t addr ; uint32_t data ; }; struct __anonstruct_t272_540 { uint32_t addr ; uint32_t count ; }; struct __anonstruct_t273_541 { uint32_t addr ; uint32_t count ; }; struct __anonstruct_t274_542 { uint32_t num_queues ; uint8_t queue_type ; uint8_t reserved[3U] ; }; struct __anonstruct_t275_543 { uint32_t length ; uint8_t buffer[] ; }; union __anonunion____missing_field_name_521 { struct __anonstruct_t0_522 t0 ; struct __anonstruct_t255_523 t255 ; struct __anonstruct_t256_524 t256 ; struct __anonstruct_t257_525 t257 ; struct __anonstruct_t258_526 t258 ; struct __anonstruct_t259_527 t259 ; struct __anonstruct_t260_528 t260 ; struct __anonstruct_t261_529 t261 ; struct __anonstruct_t262_530 t262 ; struct __anonstruct_t263_531 t263 ; struct __anonstruct_t264_532 t264 ; struct __anonstruct_t265_533 t265 ; struct __anonstruct_t266_534 t266 ; struct __anonstruct_t267_535 t267 ; struct __anonstruct_t268_536 t268 ; struct __anonstruct_t269_537 t269 ; struct __anonstruct_t270_538 t270 ; struct __anonstruct_t271_539 t271 ; struct __anonstruct_t272_540 t272 ; struct __anonstruct_t273_541 t273 ; struct __anonstruct_t274_542 t274 ; struct __anonstruct_t275_543 t275 ; }; struct qla27xx_fwdt_entry { struct __anonstruct_hdr_520 hdr ; union __anonunion____missing_field_name_521 __annonCompField128 ; }; struct qla27xx_fwdt_entry_call { uint type ; int (*call)(struct scsi_qla_host * , struct qla27xx_fwdt_entry * , void * , ulong * ) ; }; __inline static long ldv__builtin_expect(long exp , long c ) ; extern struct module __this_module ; __inline static void set_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return; } } __inline static void clear_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr)); return; } } __inline static int test_and_set_bit(long nr , unsigned long volatile *addr ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %2, %0; setc %1": "+m" (*addr), "=qm" (c): "Ir" (nr): "memory"); return ((int )((signed char )c) != 0); } } __inline static int test_and_clear_bit(long nr , unsigned long volatile *addr ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %2, %0; setc %1": "+m" (*addr), "=qm" (c): "Ir" (nr): "memory"); return ((int )((signed char )c) != 0); } } __inline static int constant_test_bit(long nr , unsigned long const volatile *addr ) { { return ((int )((unsigned long )*(addr + (unsigned long )(nr >> 6)) >> ((int )nr & 63)) & 1); } } extern int sprintf(char * , char const * , ...) ; extern int snprintf(char * , size_t , char const * , ...) ; bool ldv_is_err(void const *ptr ) ; long ldv_ptr_err(void const *ptr ) ; __inline static void INIT_LIST_HEAD(struct list_head *list ) { { list->next = list; list->prev = list; return; } } extern void __list_add(struct list_head * , struct list_head * , struct list_head * ) ; __inline static void list_add_tail(struct list_head *new , struct list_head *head ) { { __list_add(new, head->prev, head); return; } } extern void __list_del_entry(struct list_head * ) ; extern void list_del(struct list_head * ) ; __inline static void list_del_init(struct list_head *entry ) { { __list_del_entry(entry); INIT_LIST_HEAD(entry); return; } } __inline static int list_empty(struct list_head const *head ) { { return ((unsigned long )((struct list_head const *)head->next) == (unsigned long )head); } } __inline static void __list_splice(struct list_head const *list , struct list_head *prev , struct list_head *next ) { struct list_head *first ; struct list_head *last ; { first = list->next; last = list->prev; first->prev = prev; prev->next = first; last->next = next; next->prev = last; return; } } __inline static void list_splice(struct list_head const *list , struct list_head *head ) { int tmp ; { tmp = list_empty(list); if (tmp == 0) { __list_splice(list, head, head->next); } else { } return; } } __inline static void list_splice_init(struct list_head *list , struct list_head *head ) { int tmp ; { tmp = list_empty((struct list_head const *)list); if (tmp == 0) { __list_splice((struct list_head const *)list, head, head->next); INIT_LIST_HEAD(list); } else { } return; } } extern void __bad_percpu_size(void) ; extern struct task_struct *current_task ; __inline static struct task_struct *get_current(void) { struct task_struct *pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& current_task)); goto ldv_3129; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_3129; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_3129; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_3129; default: __bad_percpu_size(); } ldv_3129: ; return (pfo_ret__); } } extern void *memcpy(void * , void const * , size_t ) ; extern void *memset(void * , int , size_t ) ; extern char *strcpy(char * , char const * ) ; extern char *strcat(char * , char const * ) ; extern int __bitmap_weight(unsigned long const * , unsigned int ) ; __inline static int bitmap_weight(unsigned long const *src , unsigned int nbits ) { int tmp___0 ; { tmp___0 = __bitmap_weight(src, nbits); return (tmp___0); } } extern int nr_cpu_ids ; extern struct cpumask const * const cpu_online_mask ; __inline static unsigned int cpumask_weight(struct cpumask const *srcp ) { int tmp ; { tmp = bitmap_weight((unsigned long const *)(& srcp->bits), (unsigned int )nr_cpu_ids); return ((unsigned int )tmp); } } __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static void rep_nop(void) { { __asm__ volatile ("rep; nop": : : "memory"); return; } } __inline static void cpu_relax(void) { { rep_nop(); return; } } extern void __xchg_wrong_size(void) ; __inline static int atomic_read(atomic_t const *v ) { int __var ; { __var = 0; return ((int )*((int const volatile *)(& v->counter))); } } __inline static void atomic_set(atomic_t *v , int i ) { { v->counter = i; return; } } __inline static void atomic_inc(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; incl %0": "+m" (v->counter)); return; } } __inline static void atomic_dec(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0": "+m" (v->counter)); return; } } __inline static int atomic_dec_and_test(atomic_t *v ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0; sete %1": "+m" (v->counter), "=qm" (c): : "memory"); return ((int )((signed char )c) != 0); } } extern int __preempt_count ; __inline static int preempt_count(void) { int pfo_ret__ ; { switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (__preempt_count)); goto ldv_6002; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6002; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6002; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6002; default: __bad_percpu_size(); } ldv_6002: ; return (pfo_ret__ & 2147483647); } } extern void lockdep_init_map(struct lockdep_map * , char const * , struct lock_class_key * , int ) ; extern void __raw_spin_lock_init(raw_spinlock_t * , char const * , struct lock_class_key * ) ; extern unsigned long _raw_spin_lock_irqsave(raw_spinlock_t * ) ; extern void _raw_spin_unlock_irqrestore(raw_spinlock_t * , unsigned long ) ; __inline static raw_spinlock_t *spinlock_check(spinlock_t *lock ) { { return (& lock->__annonCompField18.rlock); } } __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) { { _raw_spin_unlock_irqrestore(& lock->__annonCompField18.rlock, flags); return; } } extern void __init_waitqueue_head(wait_queue_head_t * , char const * , struct lock_class_key * ) ; extern void __mutex_init(struct mutex * , char const * , struct lock_class_key * ) ; extern void mutex_lock_nested(struct mutex * , unsigned int ) ; extern void mutex_unlock(struct mutex * ) ; __inline static void init_completion(struct completion *x ) { struct lock_class_key __key ; { x->done = 0U; __init_waitqueue_head(& x->wait, "&x->wait", & __key); return; } } extern void complete(struct completion * ) ; extern unsigned long volatile jiffies ; extern int mod_timer(struct timer_list * , unsigned long ) ; int ldv_mod_timer_11(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; extern void add_timer(struct timer_list * ) ; extern int del_timer_sync(struct timer_list * ) ; int ldv_del_timer_sync_12(struct timer_list *ldv_func_arg1 ) ; extern void __init_work(struct work_struct * , int ) ; extern struct workqueue_struct *__alloc_workqueue_key(char const * , unsigned int , int , struct lock_class_key * , char const * , ...) ; extern void destroy_workqueue(struct workqueue_struct * ) ; void ldv_destroy_workqueue_13(struct workqueue_struct *ldv_func_arg1 ) ; void ldv_destroy_workqueue_16(struct workqueue_struct *ldv_func_arg1 ) ; void ldv_destroy_workqueue_18(struct workqueue_struct *ldv_func_arg1 ) ; void ldv_destroy_workqueue_22(struct workqueue_struct *ldv_func_arg1 ) ; extern bool queue_work_on(int , struct workqueue_struct * , struct work_struct * ) ; bool ldv_queue_work_on_5(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_7(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; extern bool queue_delayed_work_on(int , struct workqueue_struct * , struct delayed_work * , unsigned long ) ; bool ldv_queue_delayed_work_on_6(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_9(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; extern void flush_workqueue(struct workqueue_struct * ) ; void ldv_flush_workqueue_8(struct workqueue_struct *ldv_func_arg1 ) ; void ldv_flush_workqueue_14(struct workqueue_struct *ldv_func_arg1 ) ; void ldv_flush_workqueue_15(struct workqueue_struct *ldv_func_arg1 ) ; extern bool cancel_work_sync(struct work_struct * ) ; bool ldv_cancel_work_sync_17(struct work_struct *ldv_func_arg1 ) ; bool ldv_cancel_work_sync_19(struct work_struct *ldv_func_arg1 ) ; bool ldv_cancel_work_sync_20(struct work_struct *ldv_func_arg1 ) ; bool ldv_cancel_work_sync_21(struct work_struct *ldv_func_arg1 ) ; bool ldv_cancel_work_sync_23(struct work_struct *ldv_func_arg1 ) ; __inline static bool queue_work(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_5(8192, wq, work); return (tmp); } } __inline static unsigned short readw(void const volatile *addr ) { unsigned short ret ; { __asm__ volatile ("movw %1,%0": "=r" (ret): "m" (*((unsigned short volatile *)addr)): "memory"); return (ret); } } __inline static unsigned int readl(void const volatile *addr ) { unsigned int ret ; { __asm__ volatile ("movl %1,%0": "=r" (ret): "m" (*((unsigned int volatile *)addr)): "memory"); return (ret); } } __inline static void writew(unsigned short val , void volatile *addr ) { { __asm__ volatile ("movw %0,%1": : "r" (val), "m" (*((unsigned short volatile *)addr)): "memory"); return; } } __inline static void writel(unsigned int val , void volatile *addr ) { { __asm__ volatile ("movl %0,%1": : "r" (val), "m" (*((unsigned int volatile *)addr)): "memory"); return; } } extern void *ioremap_nocache(resource_size_t , unsigned long ) ; __inline static void *ioremap(resource_size_t offset , unsigned long size ) { void *tmp ; { tmp = ioremap_nocache(offset, size); return (tmp); } } extern void iounmap(void volatile * ) ; __inline static char const *kobject_name(struct kobject const *kobj ) { { return ((char const *)kobj->name); } } extern int kobject_uevent_env(struct kobject * , enum kobject_action , char ** ) ; extern void schedule(void) ; extern void set_user_nice(struct task_struct * , long ) ; extern int wake_up_process(struct task_struct * ) ; extern struct kmem_cache *kmem_cache_create(char const * , size_t , size_t , unsigned long , void (*)(void * ) ) ; extern void kmem_cache_destroy(struct kmem_cache * ) ; extern void kfree(void const * ) ; extern void *__kmalloc(size_t , gfp_t ) ; __inline static void *kmalloc(size_t size , gfp_t flags ) { void *tmp___2 ; { tmp___2 = __kmalloc(size, flags); return (tmp___2); } } __inline static void *kzalloc(size_t size , gfp_t flags ) { void *tmp ; { tmp = kmalloc(size, flags | 32768U); return (tmp); } } extern void *malloc(size_t ) ; extern void *calloc(size_t , size_t ) ; extern int __VERIFIER_nondet_int(void) ; extern unsigned long __VERIFIER_nondet_ulong(void) ; extern void *__VERIFIER_nondet_pointer(void) ; extern void __VERIFIER_assume(int ) ; void *ldv_malloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = malloc(size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_zalloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_init_zalloc(size_t size ) { void *p ; void *tmp ; { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } void *ldv_memset(void *s , int c , size_t n ) { void *tmp ; { tmp = memset(s, c, n); return (tmp); } } int ldv_undef_int(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); return (tmp); } } void *ldv_undef_ptr(void) { void *tmp ; { tmp = __VERIFIER_nondet_pointer(); return (tmp); } } unsigned long ldv_undef_ulong(void) { unsigned long tmp ; { tmp = __VERIFIER_nondet_ulong(); return (tmp); } } __inline static void ldv_stop(void) { { LDV_STOP: ; goto LDV_STOP; } } __inline static long ldv__builtin_expect(long exp , long c ) { { return (exp); } } struct kobject *sysfs_fw_dump_attr_group0 ; int ldv_timer_state_22 = 0; struct work_struct *ldv_work_struct_9_2 ; int ldv_state_variable_47 ; int ldv_state_variable_20 ; struct work_struct *ldv_work_struct_3_1 ; struct scsi_qla_host *qla2300_isp_ops_group0 ; int ldv_work_12_3 ; struct timer_list *ldv_timer_list_26 ; struct fc_port *qla24xx_isp_ops_group2 ; int ldv_work_1_1 ; struct device_attribute *dev_attr_beacon_group0 ; struct scsi_qla_host *qla81xx_isp_ops_group0 ; int ldv_work_9_3 ; struct timer_list *ldv_timer_list_29 ; int ldv_state_variable_54 ; int ldv_state_variable_14 ; struct kobject *sysfs_fw_dump_template_attr_group0 ; int ldv_state_variable_37 ; int ldv_state_variable_17 ; int ldv_state_variable_51 ; struct work_struct *ldv_work_struct_10_1 ; struct scsi_qla_host *qlafx00_isp_ops_group0 ; int ldv_work_7_2 ; int ldv_state_variable_66 ; int ldv_state_variable_19 ; struct timer_list *ldv_timer_list_18 ; struct work_struct *ldv_work_struct_4_3 ; int ldv_state_variable_27 ; int ldv_state_variable_9 ; struct work_struct *ldv_work_struct_7_1 ; struct work_struct *ldv_work_struct_2_2 ; int ldv_state_variable_42 ; int ldv_state_variable_83 ; int ldv_work_3_3 ; struct work_struct *ldv_work_struct_11_0 ; struct work_struct *ldv_work_struct_4_0 ; int ldv_state_variable_7 ; struct fc_port *qla83xx_isp_ops_group2 ; int ldv_state_variable_55 ; struct fc_port *qla27xx_isp_ops_group2 ; struct fc_rport *qla2xxx_transport_vport_functions_group2 ; int ldv_work_1_3 ; struct work_struct *ldv_work_struct_2_1 ; int ldv_work_4_0 ; struct work_struct *ldv_work_struct_3_2 ; struct scsi_target *qla2xxx_transport_vport_functions_group1 ; int ldv_state_variable_80 ; struct work_struct *ldv_work_struct_12_3 ; struct Scsi_Host *qla2xxx_transport_vport_functions_group0 ; struct work_struct *ldv_work_struct_7_2 ; int ldv_state_variable_64 ; struct pci_dev *qla2xxx_pci_driver_group1 ; int ldv_state_variable_26 ; int ldv_state_variable_28 ; struct work_struct *ldv_work_struct_7_0 ; struct timer_list *ldv_timer_list_20 ; int LDV_IN_INTERRUPT = 1; struct timer_list *ldv_timer_list_17 ; struct work_struct *ldv_work_struct_6_0 ; struct fc_port *qla8044_isp_ops_group2 ; int ldv_timer_state_18 = 0; int ldv_work_8_3 ; int ldv_state_variable_58 ; struct bin_attribute *sysfs_fw_dump_attr_group2 ; int ldv_work_5_2 ; int ldv_work_7_1 ; int ldv_state_variable_78 ; int ldv_state_variable_76 ; int ldv_state_variable_31 ; int ldv_work_6_2 ; int ldv_state_variable_68 ; int ldv_work_2_1 ; struct fc_vport *qla2xxx_transport_functions_group2 ; int ldv_state_variable_8 ; int ldv_state_variable_46 ; int ldv_state_variable_15 ; struct work_struct *ldv_work_struct_1_3 ; int ldv_work_8_0 ; int ldv_state_variable_75 ; struct qla_hw_data *qla8044_isp_ops_group1 ; int ldv_state_variable_74 ; struct file *sysfs_fw_dump_template_attr_group1 ; int ldv_state_variable_21 ; int ldv_state_variable_33 ; struct work_struct *ldv_work_struct_8_0 ; struct fc_port *qla25xx_isp_ops_group2 ; int ldv_state_variable_69 ; struct qla_hw_data *qla81xx_isp_ops_group1 ; struct timer_list *ldv_timer_list_21 ; int ldv_work_3_0 ; struct work_struct *ldv_work_struct_11_1 ; int ldv_work_10_0 ; int ldv_state_variable_88 ; int ldv_state_variable_65 ; int ldv_work_12_2 ; struct qla_hw_data *qla82xx_isp_ops_group1 ; struct kobject *sysfs_optrom_attr_group0 ; int ldv_state_variable_70 ; struct qla_hw_data *qlafx00_isp_ops_group1 ; int ldv_state_variable_62 ; int ldv_state_variable_41 ; int ldv_work_5_3 ; int ldv_state_variable_40 ; int ldv_timer_state_17 = 0; struct timer_list *ldv_timer_list_19 ; int ldv_work_6_1 ; struct device *dev_attr_zio_group1 ; struct device *dev_attr_zio_timer_group1 ; struct work_struct *ldv_work_struct_1_0 ; int ldv_state_variable_10 ; int ldv_work_7_0 ; struct work_struct *ldv_work_struct_12_0 ; int ldv_work_4_1 ; struct kobject *sysfs_vpd_attr_group0 ; struct inode *dfs_fce_ops_group1 ; int ldv_state_variable_63 ; int ldv_work_10_1 ; struct work_struct *ldv_work_struct_7_3 ; struct kobject *sysfs_nvram_attr_group0 ; int ldv_state_variable_2 ; int ldv_state_variable_25 ; int ldv_timer_state_29 = 0; int ldv_work_2_0 ; struct file *sysfs_optrom_attr_group1 ; struct scsi_device *qla2xxx_driver_template_group2 ; struct work_struct *ldv_work_struct_10_0 ; struct pci_dev *qla2xxx_err_handler_group0 ; int ldv_work_4_2 ; int ldv_state_variable_11 ; int ldv_work_1_2 ; int ldv_state_variable_79 ; struct timer_list *ldv_timer_list_27 ; int ldv_state_variable_18 ; struct work_struct *ldv_work_struct_5_0 ; struct device *dev_attr_beacon_group1 ; struct work_struct *ldv_work_struct_9_1 ; struct timer_list *ldv_timer_list_13 ; int ldv_work_2_2 ; int ldv_timer_state_30 = 0; int ldv_state_variable_32 ; int ldv_work_11_3 ; struct qla_hw_data *qla25xx_isp_ops_group1 ; struct fc_bsg_job *qla2xxx_transport_vport_functions_group3 ; int ldv_work_11_2 ; int pci_counter ; struct scsi_qla_host *qla24xx_isp_ops_group0 ; int ldv_state_variable_30 ; int ldv_work_8_1 ; int ldv_state_variable_0 ; int ldv_state_variable_81 ; int ldv_state_variable_45 ; int ldv_timer_state_23 = 0; struct qla_hw_data *qla2100_isp_ops_group1 ; int ldv_state_variable_12 ; struct timer_list *ldv_timer_list_22 ; int ldv_state_variable_87 ; int ldv_timer_state_16 = 0; int ldv_state_variable_22 ; struct timer_list *ldv_timer_list_16 ; struct fc_port *qla2300_isp_ops_group2 ; int ldv_state_variable_73 ; int ldv_timer_state_28 = 0; int ldv_state_variable_29 ; struct timer_list *ldv_timer_list_24 ; struct fc_port *qlafx00_isp_ops_group2 ; struct Scsi_Host *qla2xxx_transport_functions_group0 ; struct device *dev_attr_allow_cna_fw_dump_group1 ; struct work_struct *ldv_work_struct_8_1 ; struct work_struct *ldv_work_struct_2_0 ; int ldv_state_variable_61 ; int ldv_work_6_0 ; int ldv_work_9_0 ; struct scsi_qla_host *qla82xx_isp_ops_group0 ; struct work_struct *ldv_work_struct_6_1 ; int ref_cnt ; struct work_struct *ldv_work_struct_10_3 ; struct work_struct *ldv_work_struct_8_3 ; struct scsi_qla_host *qla27xx_isp_ops_group0 ; struct work_struct *ldv_work_struct_3_3 ; int ldv_state_variable_23 ; int ldv_state_variable_72 ; struct work_struct *ldv_work_struct_1_1 ; int ldv_state_variable_59 ; struct file *sysfs_vpd_attr_group1 ; int ldv_state_variable_6 ; int ldv_timer_state_26 = 0; int ldv_work_5_0 ; int ldv_state_variable_50 ; struct work_struct *ldv_work_struct_4_2 ; int ldv_state_variable_84 ; int ldv_state_variable_86 ; int ldv_state_variable_44 ; int ldv_state_variable_38 ; int ldv_state_variable_39 ; struct work_struct *ldv_work_struct_5_1 ; int ldv_state_variable_56 ; int ldv_state_variable_3 ; struct file *sysfs_nvram_attr_group1 ; struct timer_list *ldv_timer_list_28 ; struct work_struct *ldv_work_struct_12_1 ; int ldv_state_variable_52 ; int ldv_work_11_0 ; struct fc_rport *qla2xxx_transport_functions_group3 ; int ldv_work_1_0 ; struct work_struct *ldv_work_struct_11_2 ; struct file *apidev_fops_group2 ; int ldv_state_variable_4 ; struct work_struct *ldv_work_struct_9_0 ; struct work_struct *ldv_work_struct_9_3 ; int ldv_work_10_2 ; struct device_attribute *dev_attr_allow_cna_fw_dump_group0 ; struct device_attribute *dev_attr_zio_timer_group0 ; int ldv_state_variable_60 ; int ldv_state_variable_36 ; int ldv_work_9_2 ; struct qla_hw_data *qla83xx_isp_ops_group1 ; struct work_struct *ldv_work_struct_6_3 ; int ldv_state_variable_48 ; struct work_struct *ldv_work_struct_5_2 ; struct timer_list *ldv_timer_list_23 ; int ldv_work_9_1 ; int ldv_state_variable_5 ; struct work_struct *ldv_work_struct_5_3 ; struct timer_list *ldv_timer_list_25 ; struct qla_hw_data *qla2300_isp_ops_group1 ; int ldv_state_variable_13 ; int ldv_timer_state_19 = 0; int ldv_timer_state_24 = 0; struct qla_hw_data *qla27xx_isp_ops_group1 ; struct fc_port *qla81xx_isp_ops_group2 ; int ldv_work_3_2 ; struct scsi_cmnd *qla2xxx_driver_template_group0 ; struct scsi_qla_host *qla2100_isp_ops_group0 ; struct qla_hw_data *qla24xx_isp_ops_group1 ; struct fc_port *qla82xx_isp_ops_group2 ; struct scsi_qla_host *qla25xx_isp_ops_group0 ; int ldv_state_variable_82 ; struct work_struct *ldv_work_struct_2_3 ; int ldv_timer_state_14 = 0; struct bin_attribute *sysfs_optrom_attr_group2 ; int ldv_work_11_1 ; struct timer_list *ldv_timer_list_15 ; struct device_attribute *dev_attr_zio_group0 ; int ldv_state_variable_49 ; int ldv_timer_state_27 = 0; int ldv_state_variable_24 ; int ldv_work_7_3 ; int ldv_work_12_0 ; struct file *dfs_fce_ops_group2 ; int ldv_state_variable_1 ; struct Scsi_Host *qla2xxx_driver_template_group1 ; int ldv_timer_state_21 = 0; int ldv_state_variable_85 ; int ldv_timer_state_25 = 0; int ldv_work_12_1 ; struct scsi_qla_host *qla83xx_isp_ops_group0 ; struct timer_list *ldv_timer_list_14 ; int ldv_state_variable_71 ; struct work_struct *ldv_work_struct_6_2 ; struct work_struct *ldv_work_struct_10_2 ; struct scsi_target *qla2xxx_transport_functions_group1 ; struct fc_port *qla2100_isp_ops_group2 ; int ldv_state_variable_77 ; struct file *sysfs_fw_dump_attr_group1 ; struct work_struct *ldv_work_struct_8_2 ; struct timer_list *ldv_timer_list_30 ; int ldv_work_4_3 ; int ldv_work_3_1 ; int ldv_state_variable_16 ; struct work_struct *ldv_work_struct_12_2 ; int ldv_state_variable_43 ; int ldv_work_5_1 ; struct bin_attribute *sysfs_vpd_attr_group2 ; int ldv_state_variable_57 ; int ldv_work_6_3 ; struct bin_attribute *sysfs_nvram_attr_group2 ; struct work_struct *ldv_work_struct_3_0 ; int ldv_timer_state_20 = 0; struct fc_bsg_job *qla2xxx_transport_functions_group4 ; int ldv_state_variable_67 ; int ldv_state_variable_53 ; int ldv_timer_state_15 = 0; struct work_struct *ldv_work_struct_1_2 ; int ldv_timer_state_13 = 0; int ldv_work_8_2 ; struct work_struct *ldv_work_struct_4_1 ; int ldv_state_variable_34 ; struct bin_attribute *sysfs_fw_dump_template_attr_group2 ; int ldv_work_2_3 ; int ldv_work_10_3 ; void *apidev_fops_group1 ; struct scsi_qla_host *qla8044_isp_ops_group0 ; int ldv_state_variable_35 ; struct work_struct *ldv_work_struct_11_3 ; void activate_work_5(struct work_struct *work , int state ) ; void choose_timer_13(struct timer_list *timer ) ; void ldv_initialize_isp_operations_85(void) ; void ldv_initialize_isp_operations_78(void) ; void work_init_9(void) ; void work_init_5(void) ; void call_and_disable_all_4(int state ) ; void activate_work_1(struct work_struct *work , int state ) ; void ldv_initialize_isp_operations_81(void) ; int reg_timer_21(struct timer_list *timer ) ; void call_and_disable_work_3(struct work_struct *work ) ; void disable_work_7(struct work_struct *work ) ; void disable_work_3(struct work_struct *work ) ; void ldv_initialize_isp_operations_82(void) ; void work_init_1(void) ; void disable_suitable_timer_21(struct timer_list *timer ) ; void invoke_work_4(void) ; void ldv_initialize_device_attribute_53(void) ; void ldv_initialize_bin_attribute_69(void) ; void activate_pending_timer_13(struct timer_list *timer , unsigned long data , int pending_flag ) ; void work_init_8(void) ; void ldv_initialize_fc_function_template_32(void) ; void ldv_pci_driver_76(void) ; void activate_work_2(struct work_struct *work , int state ) ; void ldv_file_operations_31(void) ; void work_init_10(void) ; void invoke_work_5(void) ; void ldv_initialize_scsi_host_template_88(void) ; void ldv_initialize_isp_operations_79(void) ; void ldv_initialize_isp_operations_83(void) ; void disable_work_4(struct work_struct *work ) ; void work_init_4(void) ; void invoke_work_1(void) ; void ldv_initialize_device_attribute_54(void) ; void call_and_disable_all_3(int state ) ; void activate_pending_timer_14(struct timer_list *timer , unsigned long data , int pending_flag ) ; void call_and_disable_work_4(struct work_struct *work ) ; void ldv_initialize_bin_attribute_71(void) ; void ldv_initialize_fc_function_template_33(void) ; void work_init_3(void) ; void call_and_disable_all_7(int state ) ; void call_and_disable_work_1(struct work_struct *work ) ; void disable_suitable_timer_13(struct timer_list *timer ) ; void ldv_initialize_bin_attribute_73(void) ; void work_init_11(void) ; void ldv_file_operations_75(void) ; void ldv_initialize_bin_attribute_72(void) ; void call_and_disable_all_2(int state ) ; void activate_work_3(struct work_struct *work , int state ) ; void work_init_7(void) ; void ldv_initialize_isp_operations_87(void) ; void disable_work_5(struct work_struct *work ) ; void disable_work_1(struct work_struct *work ) ; void ldv_initialize_isp_operations_80(void) ; void call_and_disable_work_5(struct work_struct *work ) ; int reg_timer_14(struct timer_list *timer ) ; void invoke_work_2(void) ; void ldv_initialize_device_attribute_55(void) ; void ldv_initialize_pci_error_handlers_77(void) ; void activate_work_4(struct work_struct *work , int state ) ; void ldv_initialize_isp_operations_84(void) ; void call_and_disable_all_5(int state ) ; void work_init_2(void) ; void disable_suitable_timer_14(struct timer_list *timer ) ; void call_and_disable_all_1(int state ) ; void ldv_initialize_bin_attribute_74(void) ; void work_init_6(void) ; void work_init_12(void) ; void ldv_initialize_isp_operations_86(void) ; void activate_pending_timer_21(struct timer_list *timer , unsigned long data , int pending_flag ) ; void activate_work_7(struct work_struct *work , int state ) ; void disable_work_2(struct work_struct *work ) ; void ldv_initialize_device_attribute_34(void) ; void invoke_work_3(void) ; int reg_timer_13(struct timer_list *timer ) ; void choose_timer_14(struct timer_list *timer ) ; void call_and_disable_work_2(struct work_struct *work ) ; __inline static char const *dev_name(struct device const *dev ) { char const *tmp ; { if ((unsigned long )dev->init_name != (unsigned long )((char const */* const */)0)) { return ((char const *)dev->init_name); } else { } tmp = kobject_name(& dev->kobj); return (tmp); } } __inline static void *dev_get_drvdata(struct device const *dev ) { { return ((void *)dev->driver_data); } } __inline static void dev_set_drvdata(struct device *dev , void *data ) { { dev->driver_data = data; return; } } __inline static int pci_channel_offline(struct pci_dev *pdev ) { { return (pdev->error_state != 1U); } } extern void pci_dev_put(struct pci_dev * ) ; extern struct pci_dev *pci_get_domain_bus_and_slot(int , unsigned int , unsigned int ) ; extern int pci_bus_read_config_byte(struct pci_bus * , unsigned int , int , u8 * ) ; extern int pci_bus_read_config_word(struct pci_bus * , unsigned int , int , u16 * ) ; __inline static int pci_read_config_byte(struct pci_dev const *dev , int where , u8 *val ) { int tmp ; { tmp = pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); return (tmp); } } __inline static int pci_read_config_word(struct pci_dev const *dev , int where , u16 *val ) { int tmp ; { tmp = pci_bus_read_config_word(dev->bus, dev->devfn, where, val); return (tmp); } } extern int pcie_capability_read_dword(struct pci_dev * , int , u32 * ) ; extern int pci_enable_device(struct pci_dev * ) ; extern int pci_enable_device_mem(struct pci_dev * ) ; extern void pci_disable_device(struct pci_dev * ) ; extern int pci_select_bars(struct pci_dev * , unsigned long ) ; extern int pci_save_state(struct pci_dev * ) ; extern void pci_restore_state(struct pci_dev * ) ; extern int pci_request_selected_regions(struct pci_dev * , int , char const * ) ; extern void pci_release_selected_regions(struct pci_dev * , int ) ; extern int __pci_register_driver(struct pci_driver * , struct module * , char const * ) ; int ldv___pci_register_driver_28(struct pci_driver *ldv_func_arg1 , struct module *ldv_func_arg2 , char const *ldv_func_arg3 ) ; extern void pci_unregister_driver(struct pci_driver * ) ; void ldv_pci_unregister_driver_30(struct pci_driver *ldv_func_arg1 ) ; extern struct dma_pool *dma_pool_create(char const * , struct device * , size_t , size_t , size_t ) ; extern void dma_pool_destroy(struct dma_pool * ) ; extern void *dma_pool_alloc(struct dma_pool * , gfp_t , dma_addr_t * ) ; extern void dma_pool_free(struct dma_pool * , void * , dma_addr_t ) ; __inline static int pci_domain_nr(struct pci_bus *bus ) { struct pci_sysdata *sd ; { sd = (struct pci_sysdata *)bus->sysdata; return (sd->domain); } } __inline static int valid_dma_direction(int dma_direction ) { { return ((dma_direction == 0 || dma_direction == 1) || dma_direction == 2); } } extern void debug_dma_unmap_sg(struct device * , struct scatterlist * , int , int ) ; extern struct dma_map_ops *dma_ops ; __inline static struct dma_map_ops *get_dma_ops(struct device *dev ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )dev == (unsigned long )((struct device *)0), 0L); if (tmp != 0L || (unsigned long )dev->archdata.dma_ops == (unsigned long )((struct dma_map_ops *)0)) { return (dma_ops); } else { return (dev->archdata.dma_ops); } } } __inline static void dma_unmap_sg_attrs(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (70), "i" (12UL)); ldv_27049: ; goto ldv_27049; } else { } debug_dma_unmap_sg(dev, sg, nents, (int )dir); if ((unsigned long )ops->unmap_sg != (unsigned long )((void (*)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_sg))(dev, sg, nents, dir, attrs); } else { } return; } } extern int dma_supported(struct device * , u64 ) ; extern int dma_set_mask(struct device * , u64 ) ; extern void *dma_alloc_attrs(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ) ; extern void dma_free_attrs(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ) ; __inline static int dma_set_coherent_mask(struct device *dev , u64 mask ) { int tmp ; { tmp = dma_supported(dev, mask); if (tmp == 0) { return (-5); } else { } dev->coherent_dma_mask = mask; return (0); } } extern u64 dma_get_required_mask(struct device * ) ; __inline static int pci_set_consistent_dma_mask(struct pci_dev *dev , u64 mask ) { int tmp ; { tmp = dma_set_coherent_mask(& dev->dev, mask); return (tmp); } } __inline static void *pci_get_drvdata(struct pci_dev *pdev ) { void *tmp ; { tmp = dev_get_drvdata((struct device const *)(& pdev->dev)); return (tmp); } } __inline static void pci_set_drvdata(struct pci_dev *pdev , void *data ) { { dev_set_drvdata(& pdev->dev, data); return; } } __inline static char const *pci_name(struct pci_dev const *pdev ) { char const *tmp ; { tmp = dev_name(& pdev->dev); return (tmp); } } __inline static int pci_pcie_cap(struct pci_dev *dev ) { { return ((int )dev->pcie_cap); } } __inline static bool pci_is_pcie(struct pci_dev *dev ) { int tmp ; { tmp = pci_pcie_cap(dev); return (tmp != 0); } } extern mempool_t *mempool_create(int , mempool_alloc_t * , mempool_free_t * , void * ) ; extern void mempool_destroy(mempool_t * ) ; extern void *mempool_alloc(mempool_t * , gfp_t ) ; extern void mempool_free(void * , mempool_t * ) ; extern void *mempool_alloc_slab(gfp_t , void * ) ; extern void mempool_free_slab(void * , void * ) ; __inline static mempool_t *mempool_create_slab_pool(int min_nr , struct kmem_cache *kc ) { mempool_t *tmp ; { tmp = mempool_create(min_nr, & mempool_alloc_slab, & mempool_free_slab, (void *)kc); return (tmp); } } extern int request_firmware(struct firmware const ** , char const * , struct device * ) ; extern void release_firmware(struct firmware const * ) ; extern int pci_enable_pcie_error_reporting(struct pci_dev * ) ; extern int pci_disable_pcie_error_reporting(struct pci_dev * ) ; extern int pci_cleanup_aer_uncorrect_error_status(struct pci_dev * ) ; extern int __register_chrdev(unsigned int , unsigned int , unsigned int , char const * , struct file_operations const * ) ; extern void __unregister_chrdev(unsigned int , unsigned int , unsigned int , char const * ) ; __inline static int register_chrdev(unsigned int major , char const *name , struct file_operations const *fops ) { int tmp ; { tmp = __register_chrdev(major, 0U, 256U, name, fops); return (tmp); } } __inline static int ldv_register_chrdev_27(unsigned int major , char const *name , struct file_operations const *fops ) ; __inline static void unregister_chrdev(unsigned int major , char const *name ) { { __unregister_chrdev(major, 0U, 256U, name); return; } } __inline static void ldv_unregister_chrdev_29(unsigned int major , char const *name ) ; extern loff_t noop_llseek(struct file * , loff_t , int ) ; extern void blk_queue_update_dma_alignment(struct request_queue * , int ) ; __inline static void *shost_priv(struct Scsi_Host *shost ) { { return ((void *)(& shost->hostdata)); } } extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template * , int ) ; struct Scsi_Host *ldv_scsi_host_alloc_25(struct scsi_host_template *sht , int privsize ) ; extern int scsi_add_host_with_dma(struct Scsi_Host * , struct device * , struct device * ) ; int ldv_scsi_add_host_with_dma_10(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; extern void scsi_scan_host(struct Scsi_Host * ) ; extern void scsi_remove_host(struct Scsi_Host * ) ; void ldv_scsi_remove_host_24(struct Scsi_Host *shost ) ; void ldv_scsi_remove_host_26(struct Scsi_Host *shost ) ; extern struct Scsi_Host *scsi_host_get(struct Scsi_Host * ) ; extern void scsi_host_put(struct Scsi_Host * ) ; __inline static int scsi_add_host(struct Scsi_Host *host , struct device *dev ) { int tmp ; { tmp = ldv_scsi_add_host_with_dma_10(host, dev, dev); return (tmp); } } __inline static void scsi_host_set_prot(struct Scsi_Host *shost , unsigned int mask ) { { shost->prot_capabilities = mask; return; } } __inline static void scsi_host_set_guard(struct Scsi_Host *shost , unsigned char type ) { { shost->prot_guard_type = type; return; } } __inline static struct scsi_target *scsi_target(struct scsi_device *sdev ) { struct device const *__mptr ; { __mptr = (struct device const *)sdev->sdev_gendev.parent; return ((struct scsi_target *)__mptr + 0xffffffffffffffd8UL); } } extern int scsi_change_queue_depth(struct scsi_device * , int ) ; extern void scsi_dma_unmap(struct scsi_cmnd * ) ; __inline static unsigned char scsi_get_prot_op(struct scsi_cmnd *scmd ) { { return (scmd->prot_op); } } __inline static unsigned int scsi_prot_sg_count(struct scsi_cmnd *cmd ) { { return ((unsigned long )cmd->prot_sdb != (unsigned long )((struct scsi_data_buffer *)0) ? (cmd->prot_sdb)->table.nents : 0U); } } __inline static struct scatterlist *scsi_prot_sglist(struct scsi_cmnd *cmd ) { { return ((unsigned long )cmd->prot_sdb != (unsigned long )((struct scsi_data_buffer *)0) ? (cmd->prot_sdb)->table.sgl : (struct scatterlist *)0); } } __inline static int fc_remote_port_chkready(struct fc_rport *rport ) { int result ; { switch ((unsigned int )rport->port_state) { case 2U: ; if ((int )rport->roles & 1) { result = 0; } else if ((int )rport->flags & 1) { result = 786432; } else { result = 65536; } goto ldv_41580; case 4U: ; if (((int )rport->flags & 4) != 0) { result = 983040; } else { result = 786432; } goto ldv_41580; default: result = 65536; goto ldv_41580; } ldv_41580: ; return (result); } } extern struct scsi_transport_template *fc_attach_transport(struct fc_function_template * ) ; extern void fc_release_transport(struct scsi_transport_template * ) ; extern void fc_remove_host(struct Scsi_Host * ) ; extern void fc_remote_port_delete(struct fc_rport * ) ; extern int scsi_is_fc_rport(struct device const * ) ; extern u32 fc_get_event_number(void) ; extern void fc_host_post_event(struct Scsi_Host * , u32 , enum fc_host_event_code , u32 ) ; extern int fc_vport_terminate(struct fc_vport * ) ; extern int fc_block_scsi_eh(struct scsi_cmnd * ) ; static char const * const port_state_str[5U] = { "Unknown", "UNCONFIGURED", "DEAD", "LOST", "ONLINE"}; int qla2x00_initialize_adapter(scsi_qla_host_t *vha ) ; int qla2100_pci_config(struct scsi_qla_host *vha ) ; int qla2300_pci_config(struct scsi_qla_host *vha ) ; int qla24xx_pci_config(scsi_qla_host_t *vha ) ; int qla25xx_pci_config(scsi_qla_host_t *vha ) ; void qla2x00_reset_chip(struct scsi_qla_host *vha ) ; void qla24xx_reset_chip(struct scsi_qla_host *vha ) ; int qla2x00_chip_diag(struct scsi_qla_host *vha ) ; int qla24xx_chip_diag(struct scsi_qla_host *vha ) ; void qla2x00_config_rings(struct scsi_qla_host *vha ) ; void qla24xx_config_rings(struct scsi_qla_host *vha ) ; void qla2x00_reset_adapter(struct scsi_qla_host *vha ) ; void qla24xx_reset_adapter(struct scsi_qla_host *vha ) ; int qla2x00_nvram_config(struct scsi_qla_host *vha ) ; int qla24xx_nvram_config(struct scsi_qla_host *vha ) ; int qla81xx_nvram_config(struct scsi_qla_host *vha ) ; void qla2x00_update_fw_options(struct scsi_qla_host *vha ) ; void qla24xx_update_fw_options(scsi_qla_host_t *vha ) ; void qla81xx_update_fw_options(scsi_qla_host_t *vha ) ; int qla2x00_load_risc(struct scsi_qla_host *vha , uint32_t *srisc_addr ) ; int qla24xx_load_risc(scsi_qla_host_t *vha , uint32_t *srisc_addr ) ; int qla81xx_load_risc(scsi_qla_host_t *vha , uint32_t *srisc_addr ) ; int qla2x00_perform_loop_resync(scsi_qla_host_t *ha ) ; int qla2x00_loop_resync(scsi_qla_host_t *vha ) ; int qla2x00_find_new_loop_id(scsi_qla_host_t *vha , fc_port_t *dev ) ; int qla2x00_fabric_login(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t *next_loopid ) ; int qla2x00_local_device_login(scsi_qla_host_t *vha , fc_port_t *fcport ) ; void qla2x00_update_fcports(scsi_qla_host_t *base_vha ) ; int qla2x00_abort_isp(scsi_qla_host_t *vha ) ; void qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha ) ; void qla2x00_quiesce_io(scsi_qla_host_t *vha ) ; void qla2x00_update_fcport(scsi_qla_host_t *vha , fc_port_t *fcport ) ; void qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha ) ; void qla84xx_put_chip(struct scsi_qla_host *vha ) ; int qla2x00_async_login(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int qla2x00_async_logout(struct scsi_qla_host *vha , fc_port_t *fcport ) ; int qla2x00_async_adisc(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; void qla2x00_async_login_done(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; void qla2x00_async_logout_done(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; void qla2x00_async_adisc_done(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int __qla83xx_set_idc_control(scsi_qla_host_t *vha , uint32_t idc_control ) ; int __qla83xx_get_idc_control(scsi_qla_host_t *vha , uint32_t *idc_control ) ; void qla83xx_idc_audit(scsi_qla_host_t *vha , int audit_type ) ; int qla83xx_nic_core_reset(scsi_qla_host_t *vha ) ; void qla83xx_reset_ownership(scsi_qla_host_t *vha ) ; int qla2xxx_mctp_dump(scsi_qla_host_t *vha ) ; char qla2x00_version_str[40U] ; int ql2xlogintimeout ; int qlport_down_retry ; int ql2xplogiabsentdevice ; int ql2xloginretrycount ; int ql2xfdmienable ; int ql2xallocfwdump ; int ql2xextended_error_logging ; int ql2xiidmaenable ; int ql2xmaxqueues ; int ql2xmultique_tag ; int ql2xfwloadbin ; int ql2xetsenable ; int ql2xshiftctondsd ; int ql2xdbwr ; int ql2xasynctmfenable ; int ql2xgffidenable ; int ql2xenabledif ; int ql2xenablehba_err_chk ; int ql2xtargetreset ; int ql2xdontresethba ; uint64_t ql2xmaxlun ; int ql2xmdcapmask ; int ql2xmdenable ; int qla2x00_loop_reset(scsi_qla_host_t *vha ) ; void qla2x00_abort_all_cmds(scsi_qla_host_t *vha , int res ) ; int qla2x00_post_aen_work(struct scsi_qla_host *vha , enum fc_host_event_code code , u32 data ) ; int qla2x00_post_idc_ack_work(struct scsi_qla_host *vha , uint16_t *mb ) ; int qla2x00_post_async_login_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int qla2x00_post_async_login_done_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int qla2x00_post_async_logout_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int qla2x00_post_async_logout_done_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int qla2x00_post_async_adisc_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht , struct qla_hw_data *ha ) ; void qla2x00_relogin(struct scsi_qla_host *vha ) ; void qla2x00_do_work(struct scsi_qla_host *vha ) ; void qla2x00_free_fcports(struct scsi_qla_host *vha ) ; void qla83xx_schedule_work(scsi_qla_host_t *base_vha , int work_code ) ; void qla83xx_service_idc_aen(struct work_struct *work ) ; void qla83xx_nic_core_unrecoverable_work(struct work_struct *work ) ; void qla83xx_idc_state_handler_work(struct work_struct *work ) ; void qla83xx_nic_core_reset_work(struct work_struct *work ) ; void qla83xx_idc_lock(scsi_qla_host_t *base_vha , uint16_t requester_id ) ; void qla83xx_idc_unlock(scsi_qla_host_t *base_vha , uint16_t requester_id ) ; int qla83xx_idc_state_handler(scsi_qla_host_t *base_vha ) ; int qla83xx_set_drv_presence(scsi_qla_host_t *vha ) ; int __qla83xx_set_drv_presence(scsi_qla_host_t *vha ) ; int qla83xx_clear_drv_presence(scsi_qla_host_t *vha ) ; int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha ) ; int qla2x00_post_uevent_work(struct scsi_qla_host *vha , u32 code ) ; void qla2x00_disable_board_on_pci_error(struct work_struct *work ) ; struct scsi_host_template qla2xxx_driver_template ; struct scsi_transport_template *qla2xxx_transport_vport_template ; void qla2x00_timer(scsi_qla_host_t *vha ) ; __inline void qla2x00_start_timer(scsi_qla_host_t *vha , void *func , unsigned long interval ) ; int qla2x00_send_change_request(scsi_qla_host_t *vha , uint16_t format , uint16_t vp_idx ) ; void qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha ) ; void qla2x00_sp_free_dma(void *vha , void *ptr ) ; void qla2x00_mark_device_lost(scsi_qla_host_t *vha , fc_port_t *fcport , int do_login , int defer ) ; void qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha , int defer ) ; struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *vha ) ; int qla2x00_wait_for_hba_online(scsi_qla_host_t *vha ) ; int qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha ) ; void qla2xxx_wake_dpc(struct scsi_qla_host *vha ) ; int qla2x00_vp_abort_isp(scsi_qla_host_t *vha ) ; uint16_t qla2x00_calc_iocbs_32(uint16_t dsds ) ; uint16_t qla2x00_calc_iocbs_64(uint16_t dsds ) ; void qla2x00_build_scsi_iocbs_32(srb_t *sp , cmd_entry_t *cmd_pkt , uint16_t tot_dsds ) ; void qla2x00_build_scsi_iocbs_64(srb_t *sp , cmd_entry_t *cmd_pkt , uint16_t tot_dsds ) ; int qla2x00_start_scsi(srb_t *sp ) ; int qla24xx_start_scsi(srb_t *sp ) ; int qla24xx_dif_start_scsi(srb_t *sp ) ; int qla2x00_abort_command(srb_t *sp ) ; int qla2x00_abort_target(struct fc_port *fcport , uint64_t l , int tag ) ; int qla2x00_lun_reset(struct fc_port *fcport , uint64_t l , int tag ) ; int qla2x00_get_port_database(scsi_qla_host_t *vha , fc_port_t *fcport , uint8_t opt ) ; int qla2x00_lip_reset(scsi_qla_host_t *vha ) ; int qla2x00_login_fabric(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa , uint16_t *mb , uint8_t opt ) ; int qla24xx_login_fabric(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa , uint16_t *mb , uint8_t opt ) ; int qla2x00_fabric_logout(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa ) ; int qla24xx_fabric_logout(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa ) ; int qla2x00_full_login_lip(scsi_qla_host_t *vha ) ; int qla24xx_abort_command(srb_t *sp ) ; int qla24xx_async_abort_command(srb_t *sp ) ; int qla24xx_abort_target(struct fc_port *fcport , uint64_t l , int tag ) ; int qla24xx_lun_reset(struct fc_port *fcport , uint64_t l , int tag ) ; int qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha , unsigned int t , uint64_t l , enum nexus_wait_type type ) ; int qla2x00_disable_eft_trace(scsi_qla_host_t *vha ) ; int qla2x00_disable_fce_trace(scsi_qla_host_t *vha , uint64_t *wr , uint64_t *rd ) ; int qla81xx_idc_ack(scsi_qla_host_t *vha , uint16_t *mb ) ; irqreturn_t qla2100_intr_handler(int irq , void *dev_id ) ; irqreturn_t qla2300_intr_handler(int irq , void *dev_id ) ; irqreturn_t qla24xx_intr_handler(int irq , void *dev_id ) ; int qla2x00_request_irqs(struct qla_hw_data *ha , struct rsp_que *rsp ) ; void qla2x00_free_irqs(scsi_qla_host_t *vha ) ; uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) ; uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) ; int qla2x00_write_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) ; int qla24xx_write_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) ; uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) ; int qla25xx_write_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) ; bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha , uint16_t reg ) ; int qla2x00_beacon_on(struct scsi_qla_host *vha ) ; int qla2x00_beacon_off(struct scsi_qla_host *vha ) ; void qla2x00_beacon_blink(struct scsi_qla_host *vha ) ; int qla24xx_beacon_on(struct scsi_qla_host *vha ) ; int qla24xx_beacon_off(struct scsi_qla_host *vha ) ; void qla24xx_beacon_blink(struct scsi_qla_host *vha ) ; void qla83xx_beacon_blink(struct scsi_qla_host *vha ) ; int qla82xx_beacon_on(struct scsi_qla_host *vha ) ; int qla82xx_beacon_off(struct scsi_qla_host *vha ) ; int qla83xx_wr_reg(scsi_qla_host_t *vha , uint32_t reg , uint32_t data ) ; int qla83xx_rd_reg(scsi_qla_host_t *vha , uint32_t reg , uint32_t *data ) ; int qla83xx_restart_nic_firmware(scsi_qla_host_t *vha ) ; int qla83xx_access_control(scsi_qla_host_t *vha , uint16_t options , uint32_t start_addr , uint32_t end_addr , uint16_t *sector_size ) ; uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; int qla2x00_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; int qla24xx_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; void qla8044_watchdog(struct scsi_qla_host *vha ) ; int qla2x00_get_flash_version(scsi_qla_host_t *vha , void *mbuf ) ; int qla24xx_get_flash_version(scsi_qla_host_t *vha , void *mbuf ) ; int qla82xx_get_flash_version(scsi_qla_host_t *vha , void *mbuf ) ; void qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha ) ; void qla2100_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void qla2300_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void qla24xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void qla25xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void qla81xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void qla82xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void qla8044_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void qla27xx_fwdump(scsi_qla_host_t *vha , int hardware_locked ) ; void *qla2x00_prep_ms_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) ; void *qla24xx_prep_ms_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) ; void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) ; void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) ; struct device_attribute *qla2x00_host_attrs[32U] ; struct fc_function_template qla2xxx_transport_functions ; struct fc_function_template qla2xxx_transport_vport_functions ; void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha ) ; void qla2x00_free_sysfs_attr(scsi_qla_host_t *vha , bool stop_beacon ) ; void qla2x00_init_host_attr(scsi_qla_host_t *vha ) ; int qla2x00_dfs_setup(scsi_qla_host_t *vha ) ; int qla2x00_dfs_remove(scsi_qla_host_t *vha ) ; int qla25xx_create_req_que(struct qla_hw_data *ha , uint16_t options , uint8_t vp_idx , uint16_t rid , int rsp_que , uint8_t qos ) ; int qla25xx_create_rsp_que(struct qla_hw_data *ha , uint16_t options , uint8_t vp_idx , uint16_t rid , int req ) ; int qla25xx_delete_queues(struct scsi_qla_host *vha ) ; int qlafx00_pci_config(struct scsi_qla_host *vha ) ; int qlafx00_initialize_adapter(struct scsi_qla_host *vha ) ; void qlafx00_soft_reset(scsi_qla_host_t *vha ) ; int qlafx00_chip_diag(scsi_qla_host_t *vha ) ; void qlafx00_config_rings(struct scsi_qla_host *vha ) ; char *qlafx00_pci_info_str(struct scsi_qla_host *vha , char *str ) ; char *qlafx00_fw_version_str(struct scsi_qla_host *vha , char *str , size_t size ) ; irqreturn_t qlafx00_intr_handler(int irq , void *dev_id ) ; void qlafx00_enable_intrs(struct qla_hw_data *ha ) ; void qlafx00_disable_intrs(struct qla_hw_data *ha ) ; int qlafx00_abort_target(fc_port_t *fcport , uint64_t l , int tag ) ; int qlafx00_lun_reset(fc_port_t *fcport , uint64_t l , int tag ) ; int qlafx00_start_scsi(srb_t *sp ) ; int qlafx00_abort_isp(scsi_qla_host_t *vha ) ; int qlafx00_iospace_config(struct qla_hw_data *ha ) ; int qlafx00_driver_shutdown(scsi_qla_host_t *vha , int tmo ) ; int qlafx00_reset_initialize(scsi_qla_host_t *vha ) ; int qlafx00_fx_disc(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t fx_type ) ; int qlafx00_process_aen(struct scsi_qla_host *vha , struct qla_work_evt *evt ) ; int qlafx00_post_aenfx_work(struct scsi_qla_host *vha , uint32_t evtcode , uint32_t *data , int cnt ) ; void qlafx00_timer_routine(scsi_qla_host_t *vha ) ; int qlafx00_rescan_isp(scsi_qla_host_t *vha ) ; int qlafx00_loop_reset(scsi_qla_host_t *vha ) ; int qla82xx_pci_config(struct scsi_qla_host *vha ) ; int qla82xx_iospace_config(struct qla_hw_data *ha ) ; void qla82xx_reset_chip(struct scsi_qla_host *vha ) ; void qla82xx_config_rings(struct scsi_qla_host *vha ) ; void qla82xx_watchdog(scsi_qla_host_t *vha ) ; int qla82xx_start_firmware(scsi_qla_host_t *vha ) ; int qla82xx_load_risc(scsi_qla_host_t *vha , uint32_t *srisc_addr ) ; uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; int qla82xx_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; int qla82xx_abort_isp(scsi_qla_host_t *vha ) ; int qla82xx_restart_isp(scsi_qla_host_t *vha ) ; int qla82xx_start_scsi(srb_t *sp ) ; irqreturn_t qla82xx_intr_handler(int irq , void *dev_id ) ; void qla82xx_enable_intrs(struct qla_hw_data *ha ) ; void qla82xx_disable_intrs(struct qla_hw_data *ha ) ; void qla82xx_init_flags(struct qla_hw_data *ha ) ; void qla82xx_set_drv_active(scsi_qla_host_t *vha ) ; int qla82xx_wr_32(struct qla_hw_data *ha , ulong off , u32 data ) ; int qla82xx_rd_32(struct qla_hw_data *ha , ulong off ) ; void qla82xx_clear_drv_active(struct qla_hw_data *ha ) ; int qla82xx_idc_lock(struct qla_hw_data *ha ) ; void qla82xx_idc_unlock(struct qla_hw_data *ha ) ; int qla82xx_device_state_handler(scsi_qla_host_t *vha ) ; void qla8xxx_dev_failed_handler(scsi_qla_host_t *vha ) ; void qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha ) ; int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha ) ; void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha ) ; void qla83xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void qla82xx_md_free(scsi_qla_host_t *vha ) ; int qla8044_idc_lock(struct qla_hw_data *ha ) ; void qla8044_idc_unlock(struct qla_hw_data *ha ) ; void qla8044_wr_direct(struct scsi_qla_host *vha , uint32_t const crb_reg , uint32_t const value ) ; int qla8044_device_state_handler(struct scsi_qla_host *vha ) ; void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha ) ; void qla8044_clear_drv_active(struct qla_hw_data *ha ) ; int qla8044_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; irqreturn_t qla8044_intr_handler(int irq , void *dev_id ) ; int qla8044_abort_isp(scsi_qla_host_t *vha ) ; void qlt_host_reset_handler(struct qla_hw_data *ha ) ; int ql_errlev ; void ql_dbg(uint32_t level , scsi_qla_host_t *vha , int32_t id , char const *fmt , ...) ; void ql_dbg_pci(uint32_t level , struct pci_dev *pdev , int32_t id , char const *fmt , ...) ; void ql_log(uint32_t level , scsi_qla_host_t *vha , int32_t id , char const *fmt , ...) ; void ql_log_pci(uint32_t level , struct pci_dev *pdev , int32_t id , char const *fmt , ...) ; extern void msleep(unsigned int ) ; int qlt_add_target(struct qla_hw_data *ha , struct scsi_qla_host *base_vha ) ; int qlt_remove_target(struct qla_hw_data *ha , struct scsi_qla_host *vha ) ; void qlt_fc_port_deleted(struct scsi_qla_host *vha , fc_port_t *fcport ) ; int qlt_init(void) ; void qlt_exit(void) ; __inline static bool qla_ini_mode_enabled(struct scsi_qla_host *ha ) { { return (((int )(ha->host)->active_mode & 1) != 0); } } void qlt_probe_one_stage1(struct scsi_qla_host *base_vha , struct qla_hw_data *ha ) ; int qlt_mem_alloc(struct qla_hw_data *ha ) ; void qlt_mem_free(struct qla_hw_data *ha ) ; void qlt_83xx_iospace_config(struct qla_hw_data *ha ) ; __inline static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha ) { int i ; { if ((ha->device_type & 134217728U) != 0U) { return; } else { } i = 0; goto ldv_65776; ldv_65775: set_bit((long )i, (unsigned long volatile *)ha->loop_id_map); i = i + 1; ldv_65776: ; if (i <= 128) { goto ldv_65775; } else { } set_bit(254L, (unsigned long volatile *)ha->loop_id_map); set_bit(255L, (unsigned long volatile *)ha->loop_id_map); return; } } __inline static int qla2x00_is_reserved_id(scsi_qla_host_t *vha , uint16_t loop_id ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((ha->device_type & 134217728U) != 0U) { return ((unsigned int )loop_id > 2031U); } else { } return ((((int )ha->max_loop_id < (int )loop_id && (unsigned int )loop_id <= 128U) || (unsigned int )loop_id == 254U) || (unsigned int )loop_id == 255U); } } __inline static void qla2x00_clear_loop_id(fc_port_t *fcport ) { struct qla_hw_data *ha ; int tmp ; { ha = (fcport->vha)->hw; if ((unsigned int )fcport->loop_id == 4096U) { return; } else { tmp = qla2x00_is_reserved_id(fcport->vha, (int )fcport->loop_id); if (tmp != 0) { return; } else { } } clear_bit((long )fcport->loop_id, (unsigned long volatile *)ha->loop_id_map); fcport->loop_id = 4096U; return; } } __inline static void qla2x00_clean_dsd_pool(struct qla_hw_data *ha , srb_t *sp , struct qla_tgt_cmd *tc ) { struct dsd_dma *dsd_ptr ; struct dsd_dma *tdsd_ptr ; struct crc_context *ctx ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { ctx = (struct crc_context *)sp->u.scmd.ctx; } else if ((unsigned long )tc != (unsigned long )((struct qla_tgt_cmd *)0)) { ctx = tc->ctx; } else { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/scsi/qla2xxx/qla_inline.h"), "i" (143), "i" (12UL)); ldv_65795: ; goto ldv_65795; return; } __mptr = (struct list_head const *)ctx->dsd_list.next; dsd_ptr = (struct dsd_dma *)__mptr; __mptr___0 = (struct list_head const *)dsd_ptr->list.next; tdsd_ptr = (struct dsd_dma *)__mptr___0; goto ldv_65803; ldv_65802: dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); list_del(& dsd_ptr->list); kfree((void const *)dsd_ptr); dsd_ptr = tdsd_ptr; __mptr___1 = (struct list_head const *)tdsd_ptr->list.next; tdsd_ptr = (struct dsd_dma *)__mptr___1; ldv_65803: ; if ((unsigned long )(& dsd_ptr->list) != (unsigned long )(& ctx->dsd_list)) { goto ldv_65802; } else { } INIT_LIST_HEAD(& ctx->dsd_list); return; } } __inline static void qla2x00_set_fcport_state(fc_port_t *fcport , int state ) { int old_state ; { old_state = atomic_read((atomic_t const *)(& fcport->state)); atomic_set(& fcport->state, state); if (old_state != 0 && old_state != state) { ql_dbg(268435456U, fcport->vha, 8317, "FCPort state transitioned from %s to %s - portid=%02x%02x%02x.\n", port_state_str[old_state], port_state_str[state], (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } return; } } __inline static int qla2x00_reset_active(scsi_qla_host_t *vha ) { scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; { tmp = pci_get_drvdata((vha->hw)->pdev); base_vha = (scsi_qla_host_t *)tmp; tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___0 != 0) { tmp___5 = 1; } else { tmp___1 = constant_test_bit(3L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___1 != 0) { tmp___5 = 1; } else { tmp___2 = constant_test_bit(10L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___2 != 0) { tmp___5 = 1; } else { tmp___3 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___3 != 0) { tmp___5 = 1; } else { tmp___4 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___4 != 0) { tmp___5 = 1; } else { tmp___5 = 0; } } } } } return (tmp___5); } } __inline static srb_t *qla2x00_get_sp(scsi_qla_host_t *vha , fc_port_t *fcport , gfp_t flag ) { srb_t *sp ; struct qla_hw_data *ha ; uint8_t bail ; long tmp ; void *tmp___0 ; { sp = (srb_t *)0; ha = vha->hw; atomic_inc(& vha->vref_count); __asm__ volatile ("mfence": : : "memory"); if (*((unsigned long *)vha + 19UL) != 0UL) { atomic_dec(& vha->vref_count); bail = 1U; } else { bail = 0U; } tmp = ldv__builtin_expect((unsigned int )bail != 0U, 0L); if (tmp != 0L) { return ((srb_t *)0); } else { } tmp___0 = mempool_alloc(ha->srb_mempool, flag); sp = (srb_t *)tmp___0; if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } memset((void *)sp, 0, 376UL); sp->fcport = fcport; sp->iocbs = 1; done: ; if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { atomic_dec(& vha->vref_count); } else { } return (sp); } } __inline static void qla2x00_rel_sp(scsi_qla_host_t *vha , srb_t *sp ) { { mempool_free((void *)sp, (vha->hw)->srb_mempool); atomic_dec(& vha->vref_count); return; } } __inline static int qla2x00_gid_list_size(struct qla_hw_data *ha ) { { if ((ha->device_type & 131072U) != 0U) { return (128); } else { return ((int )((unsigned int )ha->max_fibre_devices * 8U)); } } } extern void vfree(void const * ) ; extern struct task_struct *kthread_create_on_node(int (*)(void * ) , void * , int , char const * , ...) ; extern int kthread_stop(struct task_struct * ) ; extern bool kthread_should_stop(void) ; static int apidev_major ; static struct kmem_cache *srb_cachep ; static struct kmem_cache *ctx_cachep ; int ql_errlev = 3; static int ql2xenableclass2 ; int ql2xlogintimeout = 20; int ql2xloginretrycount = 0; int ql2xallocfwdump = 1; int ql2xshiftctondsd = 6; int ql2xfdmienable = 1; static int ql2xmaxqdepth = 32; int ql2xenabledif = 2; int ql2xenablehba_err_chk = 2; int ql2xiidmaenable = 1; int ql2xmaxqueues = 1; int ql2xdbwr = 1; int ql2xtargetreset = 1; uint64_t ql2xmaxlun = 65535ULL; int ql2xmdcapmask = 31; int ql2xmdenable = 1; static int qla2xxx_slave_configure(struct scsi_device *sdev ) ; static int qla2xxx_slave_alloc(struct scsi_device *sdev ) ; static int qla2xxx_scan_finished(struct Scsi_Host *shost , unsigned long time ) ; static void qla2xxx_scan_start(struct Scsi_Host *shost ) ; static void qla2xxx_slave_destroy(struct scsi_device *sdev ) ; static int qla2xxx_queuecommand(struct Scsi_Host *host , struct scsi_cmnd *cmd ) ; static int qla2xxx_eh_abort(struct scsi_cmnd *cmd ) ; static int qla2xxx_eh_device_reset(struct scsi_cmnd *cmd ) ; static int qla2xxx_eh_target_reset(struct scsi_cmnd *cmd ) ; static int qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd ) ; static int qla2xxx_eh_host_reset(struct scsi_cmnd *cmd ) ; static void qla2x00_clear_drv_active(struct qla_hw_data *ha ) ; static void qla2x00_free_device(scsi_qla_host_t *vha ) ; static void qla83xx_disable_laser(scsi_qla_host_t *vha ) ; struct scsi_host_template qla2xxx_driver_template = {& __this_module, "qla2xxx", 0, 0, 0, 0, 0, & qla2xxx_queuecommand, & qla2xxx_eh_abort, & qla2xxx_eh_device_reset, & qla2xxx_eh_target_reset, & qla2xxx_eh_bus_reset, & qla2xxx_eh_host_reset, & qla2xxx_slave_alloc, & qla2xxx_slave_configure, & qla2xxx_slave_destroy, 0, 0, & qla2xxx_scan_finished, & qla2xxx_scan_start, & scsi_change_queue_depth, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 128U, (unsigned short)0, 65535U, 0UL, 3, (unsigned char)0, 0, 1U, 1U, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 0U, (struct device_attribute **)(& qla2x00_host_attrs), 0, {0, 0}, 0ULL, 0U, 0, (_Bool)0}; static struct scsi_transport_template *qla2xxx_transport_template = (struct scsi_transport_template *)0; struct scsi_transport_template *qla2xxx_transport_vport_template = (struct scsi_transport_template *)0; __inline void qla2x00_start_timer(scsi_qla_host_t *vha , void *func , unsigned long interval ) { { reg_timer_21(& vha->timer); vha->timer.expires = interval * 250UL + (unsigned long )jiffies; vha->timer.data = (unsigned long )vha; vha->timer.function = (void (*)(unsigned long ))func; add_timer(& vha->timer); vha->timer_active = 1U; return; } } __inline static void qla2x00_restart_timer(scsi_qla_host_t *vha , unsigned long interval ) { { if ((vha->device_flags & 32U) != 0U) { ql_dbg(16777216U, vha, 24589, "Device in a failed state, returning.\n"); return; } else { } ldv_mod_timer_11(& vha->timer, interval * 250UL + (unsigned long )jiffies); return; } } __inline static void qla2x00_stop_timer(scsi_qla_host_t *vha ) { { ldv_del_timer_sync_12(& vha->timer); vha->timer_active = 0U; return; } } static int qla2x00_do_dpc(void *data ) ; static void qla2x00_rst_aen(scsi_qla_host_t *vha ) ; static int qla2x00_mem_alloc(struct qla_hw_data *ha , uint16_t req_len , uint16_t rsp_len , struct req_que **req , struct rsp_que **rsp ) ; static void qla2x00_free_fw_dump(struct qla_hw_data *ha ) ; static void qla2x00_mem_free(struct qla_hw_data *ha ) ; static int qla2x00_alloc_queues(struct qla_hw_data *ha , struct req_que *req , struct rsp_que *rsp ) { scsi_qla_host_t *vha ; void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = kzalloc((unsigned long )ha->max_req_queues * 8UL, 208U); ha->req_q_map = (struct req_que **)tmp___0; if ((unsigned long )ha->req_q_map == (unsigned long )((struct req_que **)0)) { ql_log(0U, vha, 59, "Unable to allocate memory for request queue ptrs.\n"); goto fail_req_map; } else { } tmp___1 = kzalloc((unsigned long )ha->max_rsp_queues * 8UL, 208U); ha->rsp_q_map = (struct rsp_que **)tmp___1; if ((unsigned long )ha->rsp_q_map == (unsigned long )((struct rsp_que **)0)) { ql_log(0U, vha, 60, "Unable to allocate memory for response queue ptrs.\n"); goto fail_rsp_map; } else { } *(ha->rsp_q_map) = rsp; *(ha->req_q_map) = req; set_bit(0L, (unsigned long volatile *)(& ha->rsp_qid_map)); set_bit(0L, (unsigned long volatile *)(& ha->req_qid_map)); return (1); fail_rsp_map: kfree((void const *)ha->req_q_map); ha->req_q_map = (struct req_que **)0; fail_req_map: ; return (-12); } } static void qla2x00_free_req_que(struct qla_hw_data *ha , struct req_que *req ) { { if ((ha->device_type & 131072U) != 0U) { if ((unsigned long )req != (unsigned long )((struct req_que *)0) && (unsigned long )req->ring_fx00 != (unsigned long )((request_t *)0)) { dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )req->length_fx00 + 1) * 64UL, (void *)req->ring_fx00, req->dma_fx00, (struct dma_attrs *)0); } else { } } else if ((unsigned long )req != (unsigned long )((struct req_que *)0) && (unsigned long )req->ring != (unsigned long )((request_t *)0)) { dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )req->length + 1) * 64UL, (void *)req->ring, req->dma, (struct dma_attrs *)0); } else { } if ((unsigned long )req != (unsigned long )((struct req_que *)0)) { kfree((void const *)req->outstanding_cmds); } else { } kfree((void const *)req); req = (struct req_que *)0; return; } } static void qla2x00_free_rsp_que(struct qla_hw_data *ha , struct rsp_que *rsp ) { { if ((ha->device_type & 131072U) != 0U) { if ((unsigned long )rsp != (unsigned long )((struct rsp_que *)0) && (unsigned long )rsp->ring != (unsigned long )((response_t *)0)) { dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )rsp->length_fx00 + 1) * 64UL, (void *)rsp->ring_fx00, rsp->dma_fx00, (struct dma_attrs *)0); } else { } } else if ((unsigned long )rsp != (unsigned long )((struct rsp_que *)0) && (unsigned long )rsp->ring != (unsigned long )((response_t *)0)) { dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )rsp->length + 1) * 64UL, (void *)rsp->ring, rsp->dma, (struct dma_attrs *)0); } else { } kfree((void const *)rsp); rsp = (struct rsp_que *)0; return; } } static void qla2x00_free_queues(struct qla_hw_data *ha ) { struct req_que *req ; struct rsp_que *rsp ; int cnt ; { cnt = 0; goto ldv_66851; ldv_66850: req = *(ha->req_q_map + (unsigned long )cnt); qla2x00_free_req_que(ha, req); cnt = cnt + 1; ldv_66851: ; if ((int )ha->max_req_queues > cnt) { goto ldv_66850; } else { } kfree((void const *)ha->req_q_map); ha->req_q_map = (struct req_que **)0; cnt = 0; goto ldv_66854; ldv_66853: rsp = *(ha->rsp_q_map + (unsigned long )cnt); qla2x00_free_rsp_que(ha, rsp); cnt = cnt + 1; ldv_66854: ; if ((int )ha->max_rsp_queues > cnt) { goto ldv_66853; } else { } kfree((void const *)ha->rsp_q_map); ha->rsp_q_map = (struct rsp_que **)0; return; } } static int qla25xx_setup_mode(struct scsi_qla_host *vha ) { uint16_t options ; int ques ; int req ; int ret ; struct qla_hw_data *ha ; struct lock_class_key __key ; char const *__lock_name ; struct workqueue_struct *tmp ; uint8_t tmp___0 ; { options = 0U; ha = vha->hw; if (((int )ha->fw_attributes & 64) == 0) { ql_log(1U, vha, 216, "Firmware is not multi-queue capable.\n"); goto fail; } else { } if (ql2xmultique_tag != 0) { options = (uint16_t )((unsigned int )options | 128U); req = qla25xx_create_req_que(ha, (int )options, 0, 0, -1, 5); if (req == 0) { ql_log(1U, vha, 224, "Failed to create request queue.\n"); goto fail; } else { } __lock_name = "\"qla2xxx_wq\""; tmp = __alloc_workqueue_key("qla2xxx_wq", 8U, 1, & __key, __lock_name); ha->wq = tmp; vha->req = *(ha->req_q_map + (unsigned long )req); options = (uint16_t )((unsigned int )options | 2U); ques = 1; goto ldv_66870; ldv_66869: ret = qla25xx_create_rsp_que(ha, (int )options, 0, 0, req); if (ret == 0) { ql_log(1U, vha, 232, "Failed to create response queue.\n"); goto fail2; } else { } ques = ques + 1; ldv_66870: ; if ((int )ha->max_rsp_queues > ques) { goto ldv_66869; } else { } ha->flags.cpu_affinity_enabled = 1U; ql_dbg(1048576U, vha, 49159, "CPU affinity mode enabled, no. of response queues:%d no. of request queues:%d.\n", (int )ha->max_rsp_queues, (int )ha->max_req_queues); ql_dbg(1073741824U, vha, 233, "CPU affinity mode enabled, no. of response queues:%d no. of request queues:%d.\n", (int )ha->max_rsp_queues, (int )ha->max_req_queues); } else { } return (0); fail2: qla25xx_delete_queues(vha); ldv_destroy_workqueue_13(ha->wq); ha->wq = (struct workqueue_struct *)0; vha->req = *(ha->req_q_map); fail: ha->mqenable = 0U; kfree((void const *)ha->req_q_map); kfree((void const *)ha->rsp_q_map); tmp___0 = 1U; ha->max_rsp_queues = tmp___0; ha->max_req_queues = tmp___0; return (1); } } static char *qla2x00_pci_info_str(struct scsi_qla_host *vha , char *str ) { struct qla_hw_data *ha ; char *pci_bus_modes[4U] ; uint16_t pci_bus ; { ha = vha->hw; pci_bus_modes[0] = (char *)"33"; pci_bus_modes[1] = (char *)"66"; pci_bus_modes[2] = (char *)"100"; pci_bus_modes[3] = (char *)"133"; strcpy(str, "PCI"); pci_bus = (uint16_t )((ha->pci_attr & 1536U) >> 9); if ((unsigned int )pci_bus != 0U) { strcat(str, "-X ("); strcat(str, (char const *)pci_bus_modes[(int )pci_bus]); } else { pci_bus = (uint16_t )((ha->pci_attr & 256U) >> 8); strcat(str, " ("); strcat(str, (char const *)pci_bus_modes[(int )pci_bus]); } strcat(str, " MHz)"); return (str); } } static char *qla24xx_pci_info_str(struct scsi_qla_host *vha , char *str ) { char *pci_bus_modes[4U] ; struct qla_hw_data *ha ; uint32_t pci_bus ; char lwstr[6U] ; uint32_t lstat ; uint32_t lspeed ; uint32_t lwidth ; bool tmp ; { pci_bus_modes[0] = (char *)"33"; pci_bus_modes[1] = (char *)"66"; pci_bus_modes[2] = (char *)"100"; pci_bus_modes[3] = (char *)"133"; ha = vha->hw; tmp = pci_is_pcie(ha->pdev); if ((int )tmp) { pcie_capability_read_dword(ha->pdev, 12, & lstat); lspeed = lstat & 15U; lwidth = (lstat & 1008U) >> 4; strcpy(str, "PCIe ("); switch (lspeed) { case 1U: strcat(str, "2.5GT/s "); goto ldv_66891; case 2U: strcat(str, "5.0GT/s "); goto ldv_66891; case 3U: strcat(str, "8.0GT/s "); goto ldv_66891; default: strcat(str, " "); goto ldv_66891; } ldv_66891: snprintf((char *)(& lwstr), 6UL, "x%d)", lwidth); strcat(str, (char const *)(& lwstr)); return (str); } else { } strcpy(str, "PCI"); pci_bus = (ha->pci_attr & 3840U) >> 8; if (pci_bus == 0U || pci_bus == 8U) { strcat(str, " ("); strcat(str, (char const *)pci_bus_modes[pci_bus >> 3]); } else { strcat(str, "-X "); if ((pci_bus & 4U) != 0U) { strcat(str, "Mode 2"); } else { strcat(str, "Mode 1"); } strcat(str, " ("); strcat(str, (char const *)pci_bus_modes[pci_bus & 4294967291U]); } strcat(str, " MHz)"); return (str); } } static char *qla2x00_fw_version_str(struct scsi_qla_host *vha , char *str , size_t size ) { char un_str[10U] ; struct qla_hw_data *ha ; { ha = vha->hw; snprintf(str, size, "%d.%02d.%02d ", (int )ha->fw_major_version, (int )ha->fw_minor_version, (int )ha->fw_subminor_version); if (((int )ha->fw_attributes & 512) != 0) { strcat(str, "FLX"); return (str); } else { } switch ((int )ha->fw_attributes & 255) { case 7: strcat(str, "EF"); goto ldv_66903; case 23: strcat(str, "TP"); goto ldv_66903; case 55: strcat(str, "IP"); goto ldv_66903; case 119: strcat(str, "VI"); goto ldv_66903; default: sprintf((char *)(& un_str), "(%x)", (int )ha->fw_attributes); strcat(str, (char const *)(& un_str)); goto ldv_66903; } ldv_66903: ; if (((int )ha->fw_attributes & 256) != 0) { strcat(str, "X"); } else { } return (str); } } static char *qla24xx_fw_version_str(struct scsi_qla_host *vha , char *str , size_t size ) { struct qla_hw_data *ha ; { ha = vha->hw; snprintf(str, size, "%d.%02d.%02d (%x)", (int )ha->fw_major_version, (int )ha->fw_minor_version, (int )ha->fw_subminor_version, (int )ha->fw_attributes); return (str); } } void qla2x00_sp_free_dma(void *vha , void *ptr ) { srb_t *sp ; struct scsi_cmnd *cmd ; struct qla_hw_data *ha ; void *ctx ; unsigned int tmp ; struct scatterlist *tmp___0 ; struct ct6_dsd *ctx1 ; { sp = (srb_t *)ptr; cmd = sp->u.scmd.cmd; ha = ((sp->fcport)->vha)->hw; ctx = sp->u.scmd.ctx; if ((int )sp->flags & 1) { scsi_dma_unmap(cmd); sp->flags = (unsigned int )sp->flags & 65534U; } else { } if (((int )sp->flags & 16) != 0) { tmp = scsi_prot_sg_count(cmd); tmp___0 = scsi_prot_sglist(cmd); dma_unmap_sg_attrs(& (ha->pdev)->dev, tmp___0, (int )tmp, cmd->sc_data_direction, (struct dma_attrs *)0); sp->flags = (unsigned int )sp->flags & 65519U; } else { } if (((int )sp->flags & 32) != 0) { qla2x00_clean_dsd_pool(ha, sp, (struct qla_tgt_cmd *)0); sp->flags = (unsigned int )sp->flags & 65503U; } else { } if (((int )sp->flags & 4) != 0) { dma_pool_free(ha->dl_dma_pool, ctx, ((struct crc_context *)ctx)->crc_ctx_dma); sp->flags = (unsigned int )sp->flags & 65531U; } else { } if (((int )sp->flags & 4096) != 0) { ctx1 = (struct ct6_dsd *)ctx; dma_pool_free(ha->fcp_cmnd_dma_pool, (void *)ctx1->fcp_cmnd, ctx1->fcp_cmnd_dma); list_splice((struct list_head const *)(& ctx1->dsd_list), & ha->gbl_dsd_list); ha->gbl_dsd_inuse = (int )ha->gbl_dsd_inuse - (int )((uint16_t )ctx1->dsd_use_cnt); ha->gbl_dsd_avail = (int )ha->gbl_dsd_avail + (int )((uint16_t )ctx1->dsd_use_cnt); mempool_free((void *)ctx1, ha->ctx_mempool); ctx1 = (struct ct6_dsd *)0; } else { } cmd->SCp.ptr = (char *)0; qla2x00_rel_sp((sp->fcport)->vha, sp); return; } } static void qla2x00_sp_compl(void *data , void *ptr , int res ) { struct qla_hw_data *ha ; srb_t *sp ; struct scsi_cmnd *cmd ; int tmp ; int tmp___0 ; { ha = (struct qla_hw_data *)data; sp = (srb_t *)ptr; cmd = sp->u.scmd.cmd; cmd->result = res; tmp = atomic_read((atomic_t const *)(& sp->ref_count)); if (tmp == 0) { ql_dbg(134217728U, (sp->fcport)->vha, 12309, "SP reference-count to ZERO -- sp=%p cmd=%p.\n", sp, sp->u.scmd.cmd); if ((ql2xextended_error_logging & 134217728) != 0) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_os.c"), "i" (659), "i" (12UL)); ldv_66931: ; goto ldv_66931; } else { } return; } else { } tmp___0 = atomic_dec_and_test(& sp->ref_count); if (tmp___0 == 0) { return; } else { } qla2x00_sp_free_dma((void *)ha, (void *)sp); (*(cmd->scsi_done))(cmd); return; } } static int qla2xxx_queuecommand(struct Scsi_Host *host , struct scsi_cmnd *cmd ) { scsi_qla_host_t *vha ; void *tmp ; fc_port_t *fcport ; struct fc_rport *rport ; struct device const *__mptr ; struct scsi_target *tmp___2 ; struct fc_rport *tmp___3 ; struct scsi_target *tmp___4 ; int tmp___5 ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp___6 ; srb_t *sp ; int rval ; unsigned char tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; { tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; fcport = (fc_port_t *)(cmd->device)->hostdata; tmp___4 = scsi_target(cmd->device); tmp___5 = scsi_is_fc_rport((struct device const *)tmp___4->dev.parent); if (tmp___5 != 0) { tmp___2 = scsi_target(cmd->device); __mptr = (struct device const *)tmp___2->dev.parent; tmp___3 = (struct fc_rport *)__mptr + 0xffffffffffffffa0UL; } else { tmp___3 = (struct fc_rport *)0; } rport = tmp___3; ha = vha->hw; tmp___6 = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp___6; if (*((unsigned long *)ha + 2UL) != 0UL) { if (*((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(2097152U, vha, 36880, "PCI Channel IO permanent failure, exiting cmd=%p.\n", cmd); cmd->result = 65536; } else { ql_dbg(2097152U, vha, 36881, "EEH_Busy, Requeuing the cmd=%p.\n", cmd); cmd->result = 851968; } goto qc24_fail_command; } else { } rval = fc_remote_port_chkready(rport); if (rval != 0) { cmd->result = rval; ql_dbg(134250496U, vha, 12291, "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", cmd, rval); goto qc24_fail_command; } else { } if (*((unsigned long *)vha + 19UL) == 0UL) { tmp___7 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___7 != 0U) { ql_dbg(134217728U, vha, 12292, "DIF Cap not reg, fail DIF capable cmd\'s:%p.\n", cmd); cmd->result = 65536; goto qc24_fail_command; } else { } } else { } if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { cmd->result = 65536; goto qc24_fail_command; } else { } tmp___12 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___12 != 4) { tmp___10 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___10 == 2) { tmp___8 = atomic_read((atomic_t const *)(& base_vha->loop_state)); tmp___9 = atomic_read((atomic_t const *)(& fcport->state)); ql_dbg(134217728U, vha, 12293, "Returning DNC, fcport_state=%d loop_state=%d.\n", tmp___9, tmp___8); cmd->result = 65536; goto qc24_fail_command; } else { tmp___11 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___11 == 6) { tmp___8 = atomic_read((atomic_t const *)(& base_vha->loop_state)); tmp___9 = atomic_read((atomic_t const *)(& fcport->state)); ql_dbg(134217728U, vha, 12293, "Returning DNC, fcport_state=%d loop_state=%d.\n", tmp___9, tmp___8); cmd->result = 65536; goto qc24_fail_command; } else { } } goto qc24_target_busy; } else { } if (fcport->retry_delay_timestamp == 0UL) { } else if ((long )(fcport->retry_delay_timestamp - (unsigned long )jiffies) < 0L) { fcport->retry_delay_timestamp = 0UL; } else { goto qc24_target_busy; } sp = qla2x00_get_sp(vha, fcport, 32U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto qc24_host_busy; } else { } sp->u.scmd.cmd = cmd; sp->type = 8U; atomic_set(& sp->ref_count, 1); cmd->SCp.ptr = (char *)sp; sp->free = & qla2x00_sp_free_dma; sp->done = & qla2x00_sp_compl; rval = (*((ha->isp_ops)->start_scsi))(sp); if (rval != 0) { ql_dbg(134250496U, vha, 12307, "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); goto qc24_host_busy_free_sp; } else { } return (0); qc24_host_busy_free_sp: qla2x00_sp_free_dma((void *)ha, (void *)sp); qc24_host_busy: ; return (4181); qc24_target_busy: ; return (4184); qc24_fail_command: (*(cmd->scsi_done))(cmd); return (0); } } static int qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd ) { unsigned long wait_iter ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int ret ; int tmp___0 ; long tmp___1 ; unsigned long tmp___2 ; { wait_iter = 2UL; tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; ret = 0; tmp___0 = pci_channel_offline(ha->pdev); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 0L); if (tmp___1 != 0L || *((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(4194304U, vha, 32773, "Return:eh_wait.\n"); return (ret); } else { } goto ldv_66963; ldv_66962: msleep(1000U); ldv_66963: ; if ((unsigned long )cmd->SCp.ptr != (unsigned long )((char *)0)) { tmp___2 = wait_iter; wait_iter = wait_iter - 1UL; if (tmp___2 != 0UL) { goto ldv_66962; } else { goto ldv_66964; } } else { } ldv_66964: ; if ((unsigned long )cmd->SCp.ptr != (unsigned long )((char *)0)) { ret = 258; } else { } return (ret); } } int qla2x00_wait_for_hba_online(scsi_qla_host_t *vha ) { int return_status ; unsigned long wait_online ; struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; wait_online = (unsigned long )jiffies + 75000UL; goto ldv_66979; ldv_66978: msleep(1000U); ldv_66979: tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___0 != 0) { goto _L; } else { tmp___1 = constant_test_bit(3L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___1 != 0) { goto _L; } else { tmp___2 = constant_test_bit(10L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___2 != 0) { goto _L; } else if ((unsigned int )ha->dpc_active != 0U) { _L: /* CIL Label */ if ((long )((unsigned long )jiffies - wait_online) < 0L) { goto ldv_66978; } else { goto ldv_66980; } } else { goto ldv_66980; } } } ldv_66980: ; if (*((unsigned long *)base_vha + 19UL) != 0UL) { return_status = 0; } else { return_status = 258; } return (return_status); } } static void qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; { ha = vha->hw; goto ldv_66986; ldv_66985: msleep(1000U); ldv_66986: tmp = qla2x00_reset_active(vha); if ((tmp != 0 || (unsigned int )ha->dpc_active != 0U) || *((unsigned long *)ha + 2UL) != 0UL) { goto ldv_66985; } else { tmp___0 = constant_test_bit(23L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { goto ldv_66985; } else { tmp___1 = constant_test_bit(24L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { goto ldv_66985; } else { goto ldv_66987; } } } ldv_66987: ; return; } } int qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha ) { int return_status ; unsigned long wait_reset ; struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; wait_reset = (unsigned long )jiffies + 75000UL; goto ldv_67003; ldv_67002: msleep(1000U); tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___0 == 0 && *((unsigned long *)ha + 2UL) != 0UL) { goto ldv_67001; } else { } ldv_67003: tmp___1 = constant_test_bit(2L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___1 != 0) { goto _L; } else { tmp___2 = constant_test_bit(3L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___2 != 0) { goto _L; } else { tmp___3 = constant_test_bit(10L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___3 != 0) { goto _L; } else if ((unsigned int )ha->dpc_active != 0U) { _L: /* CIL Label */ if ((long )((unsigned long )jiffies - wait_reset) < 0L) { goto ldv_67002; } else { goto ldv_67001; } } else { goto ldv_67001; } } } ldv_67001: ; if (*((unsigned long *)ha + 2UL) != 0UL) { return_status = 0; } else { return_status = 258; } return (return_status); } } static void sp_get(struct srb *sp ) { { atomic_inc(& sp->ref_count); return; } } static int qla2xxx_eh_abort(struct scsi_cmnd *cmd ) { scsi_qla_host_t *vha ; void *tmp ; srb_t *sp ; int ret ; unsigned int id ; uint64_t lun ; unsigned long flags ; int rval ; int wait ; struct qla_hw_data *ha ; raw_spinlock_t *tmp___0 ; raw_spinlock_t *tmp___1 ; int tmp___2 ; { tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; wait = 0; ha = vha->hw; if ((unsigned long )cmd->SCp.ptr == (unsigned long )((char *)0)) { return (8194); } else { } ret = fc_block_scsi_eh(cmd); if (ret != 0) { return (ret); } else { } ret = 8194; id = (cmd->device)->id; lun = (cmd->device)->lun; tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); sp = (srb_t *)cmd->SCp.ptr; if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { spin_unlock_irqrestore(& ha->hardware_lock, flags); return (8194); } else { } ql_dbg(4194304U, vha, 32770, "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p\n", vha->host_no, id, lun, sp, cmd); sp_get(sp); spin_unlock_irqrestore(& ha->hardware_lock, flags); rval = (*((ha->isp_ops)->abort_command))(sp); if (rval != 0) { if (rval == 257) { atomic_dec(& sp->ref_count); ret = 8194; } else { ret = 8195; } ql_dbg(4194304U, vha, 32771, "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval); } else { ql_dbg(4194304U, vha, 32772, "Abort command mbx success cmd=%p.\n", cmd); wait = 1; } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); if (rval == 257) { *((vha->req)->outstanding_cmds + (unsigned long )sp->handle) = (srb_t *)0; } else { } (*(sp->done))((void *)ha, (void *)sp, 0); spin_unlock_irqrestore(& ha->hardware_lock, flags); if (ret == 8195 && (unsigned long )cmd->SCp.ptr == (unsigned long )((char *)0)) { ret = 8194; } else { } if (wait != 0) { tmp___2 = qla2x00_eh_wait_on_command(cmd); if (tmp___2 != 0) { ql_log(1U, vha, 32774, "Abort handler timed out cmd=%p.\n", cmd); ret = 8195; } else { } } else { } ql_log(2U, vha, 32796, "Abort command issued nexus=%ld:%d:%llu -- %d %x.\n", vha->host_no, id, lun, wait, ret); return (ret); } } int qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha , unsigned int t , uint64_t l , enum nexus_wait_type type ) { int cnt ; int match ; int status ; unsigned long flags ; struct qla_hw_data *ha ; struct req_que *req ; srb_t *sp ; struct scsi_cmnd *cmd ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; { ha = vha->hw; status = 0; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); req = vha->req; cnt = 1; goto ldv_67051; ldv_67050: sp = *(req->outstanding_cmds + (unsigned long )cnt); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto ldv_67042; } else { } if ((unsigned int )sp->type != 8U) { goto ldv_67042; } else { } if ((int )vha->vp_idx != (int )((sp->fcport)->vha)->vp_idx) { goto ldv_67042; } else { } match = 0; cmd = sp->u.scmd.cmd; switch ((unsigned int )type) { case 0U: match = 1; goto ldv_67044; case 1U: match = (cmd->device)->id == t; goto ldv_67044; case 2U: match = (cmd->device)->id == t && (cmd->device)->lun == l; goto ldv_67044; } ldv_67044: ; if (match == 0) { goto ldv_67042; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); status = qla2x00_eh_wait_on_command(cmd); tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); ldv_67042: cnt = cnt + 1; ldv_67051: ; if (status == 0 && (int )req->num_outstanding_cmds > cnt) { goto ldv_67050; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (status); } } static char *reset_errors[4U] = { (char *)"HBA not online", (char *)"HBA not ready", (char *)"Task management failed", (char *)"Waiting for command completions"}; static int __qla2xxx_eh_generic_reset(char *name , enum nexus_wait_type type , struct scsi_cmnd *cmd , int (*do_reset)(struct fc_port * , uint64_t , int ) ) { scsi_qla_host_t *vha ; void *tmp ; fc_port_t *fcport ; int err ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; fcport = (fc_port_t *)(cmd->device)->hostdata; if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return (8195); } else { } err = fc_block_scsi_eh(cmd); if (err != 0) { return (err); } else { } ql_log(2U, vha, 32777, "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no, (cmd->device)->id, (cmd->device)->lun, cmd); err = 0; tmp___0 = qla2x00_wait_for_hba_online(vha); if (tmp___0 != 0) { ql_log(1U, vha, 32778, "Wait for hba online failed for cmd=%p.\n", cmd); goto eh_reset_failed; } else { } err = 2; tmp___1 = (*do_reset)(fcport, (cmd->device)->lun, (cmd->request)->cpu + 1); if (tmp___1 != 0) { ql_log(1U, vha, 32780, "do_reset failed for cmd=%p.\n", cmd); goto eh_reset_failed; } else { } err = 3; tmp___2 = qla2x00_eh_wait_for_pending_commands(vha, (cmd->device)->id, (cmd->device)->lun, type); if (tmp___2 != 0) { ql_log(1U, vha, 32781, "wait for pending cmds failed for cmd=%p.\n", cmd); goto eh_reset_failed; } else { } ql_log(2U, vha, 32782, "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name, vha->host_no, (cmd->device)->id, (cmd->device)->lun, cmd); return (8194); eh_reset_failed: ql_log(2U, vha, 32783, "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name, reset_errors[err], vha->host_no, (cmd->device)->id, (cmd->device)->lun, cmd); return (8195); } } static int qla2xxx_eh_device_reset(struct scsi_cmnd *cmd ) { scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = __qla2xxx_eh_generic_reset((char *)"DEVICE", 2, cmd, (ha->isp_ops)->lun_reset); return (tmp___0); } } static int qla2xxx_eh_target_reset(struct scsi_cmnd *cmd ) { scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = __qla2xxx_eh_generic_reset((char *)"TARGET", 1, cmd, (ha->isp_ops)->target_reset); return (tmp___0); } } static int qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd ) { scsi_qla_host_t *vha ; void *tmp ; fc_port_t *fcport ; int ret ; unsigned int id ; uint64_t lun ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; fcport = (fc_port_t *)(cmd->device)->hostdata; ret = 8195; id = (cmd->device)->id; lun = (cmd->device)->lun; if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return (ret); } else { } ret = fc_block_scsi_eh(cmd); if (ret != 0) { return (ret); } else { } ret = 8195; ql_log(2U, vha, 32786, "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); tmp___0 = qla2x00_wait_for_hba_online(vha); if (tmp___0 != 0) { ql_log(0U, vha, 32787, "Wait for hba online failed board disabled.\n"); goto eh_bus_reset_done; } else { } tmp___1 = qla2x00_loop_reset(vha); if (tmp___1 == 0) { ret = 8194; } else { } if (ret == 8195) { goto eh_bus_reset_done; } else { } tmp___2 = qla2x00_eh_wait_for_pending_commands(vha, 0U, 0ULL, 0); if (tmp___2 != 0) { ql_log(1U, vha, 32788, "Wait for pending commands failed.\n"); ret = 8195; } else { } eh_bus_reset_done: ql_log(1U, vha, 32811, "BUS RESET %s nexus=%ld:%d:%llu.\n", ret == 8195 ? (char *)"FAILED" : (char *)"SUCCEEDED", vha->host_no, id, lun); return (ret); } } static int qla2xxx_eh_host_reset(struct scsi_cmnd *cmd ) { scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int ret ; unsigned int id ; uint64_t lun ; scsi_qla_host_t *base_vha ; void *tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; ret = 8195; tmp___0 = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp___0; id = (cmd->device)->id; lun = (cmd->device)->lun; ql_log(2U, vha, 32792, "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); tmp___1 = qla2x00_reset_active(vha); if (tmp___1 != 0 || ha->optrom_state != 0) { goto eh_host_reset_lock; } else { } if ((unsigned long )vha != (unsigned long )base_vha) { tmp___2 = qla2x00_vp_abort_isp(vha); if (tmp___2 != 0) { goto eh_host_reset_lock; } else { } } else { if (((vha->hw)->device_type & 16384U) != 0U || ((vha->hw)->device_type & 262144U) != 0U) { tmp___3 = qla82xx_fcoe_ctx_reset(vha); if (tmp___3 == 0) { ret = 8194; goto eh_host_reset_lock; } else { } } else { } if ((unsigned long )ha->wq != (unsigned long )((struct workqueue_struct *)0)) { ldv_flush_workqueue_14(ha->wq); } else { } set_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); tmp___5 = (*((ha->isp_ops)->abort_isp))(base_vha); if (tmp___5 != 0) { clear_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); set_bit(2L, (unsigned long volatile *)(& base_vha->dpc_flags)); tmp___4 = qla2x00_wait_for_hba_online(vha); if (tmp___4 != 0) { ql_log(1U, vha, 32810, "wait for hba online failed.\n"); goto eh_host_reset_lock; } else { } } else { } clear_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); } tmp___6 = qla2x00_eh_wait_for_pending_commands(vha, 0U, 0ULL, 0); if (tmp___6 == 0) { ret = 8194; } else { } eh_host_reset_lock: ql_log(2U, vha, 32791, "ADAPTER RESET %s nexus=%ld:%d:%llu.\n", ret == 8195 ? (char *)"FAILED" : (char *)"SUCCEEDED", vha->host_no, id, lun); return (ret); } } int qla2x00_loop_reset(scsi_qla_host_t *vha ) { int ret ; struct fc_port *fcport ; struct qla_hw_data *ha ; int tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { ha = vha->hw; if ((ha->device_type & 131072U) != 0U) { tmp = qlafx00_loop_reset(vha); return (tmp); } else { } if (ql2xtargetreset == 1 && *((unsigned long *)ha + 2UL) != 0UL) { __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (struct fc_port *)__mptr; goto ldv_67108; ldv_67107: ; if ((unsigned int )fcport->port_type != 5U) { goto ldv_67106; } else { } ret = (*((ha->isp_ops)->target_reset))(fcport, 0ULL, 0); if (ret != 0) { ql_dbg(4194304U, vha, 32812, "Bus Reset failed: Reset=%d d_id=%x.\n", ret, (int )fcport->d_id.b24); } else { } ldv_67106: __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (struct fc_port *)__mptr___0; ldv_67108: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_67107; } else { } } else { } if (*((unsigned long *)ha + 2UL) != 0UL && ((((ha->device_type & 8192U) == 0U && (ha->device_type & 16384U) == 0U) && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U)) { atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); qla2x00_mark_all_devices_lost(vha, 0); ret = qla2x00_full_login_lip(vha); if (ret != 0) { ql_dbg(4194304U, vha, 32813, "full_login_lip=%d.\n", ret); } else { } } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { ret = qla2x00_lip_reset(vha); if (ret != 0) { ql_dbg(4194304U, vha, 32814, "lip_reset failed (%d).\n", ret); } else { } } else { } vha->marker_needed = 1U; return (0); } } void qla2x00_abort_all_cmds(scsi_qla_host_t *vha , int res ) { int que ; int cnt ; unsigned long flags ; srb_t *sp ; struct qla_hw_data *ha ; struct req_que *req ; raw_spinlock_t *tmp ; { ha = vha->hw; qlt_host_reset_handler(ha); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); que = 0; goto ldv_67128; ldv_67127: req = *(ha->req_q_map + (unsigned long )que); if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { goto ldv_67123; } else { } if ((unsigned long )req->outstanding_cmds == (unsigned long )((srb_t **)0)) { goto ldv_67123; } else { } cnt = 1; goto ldv_67125; ldv_67124: sp = *(req->outstanding_cmds + (unsigned long )cnt); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { *(req->outstanding_cmds + (unsigned long )cnt) = (srb_t *)0; (*(sp->done))((void *)vha, (void *)sp, res); } else { } cnt = cnt + 1; ldv_67125: ; if ((int )req->num_outstanding_cmds > cnt) { goto ldv_67124; } else { } ldv_67123: que = que + 1; ldv_67128: ; if ((int )ha->max_req_queues > que) { goto ldv_67127; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static int qla2xxx_slave_alloc(struct scsi_device *sdev ) { struct fc_rport *rport ; struct device const *__mptr ; struct scsi_target *tmp___1 ; struct fc_rport *tmp___2 ; struct scsi_target *tmp___3 ; int tmp___4 ; int tmp___5 ; { tmp___3 = scsi_target(sdev); tmp___4 = scsi_is_fc_rport((struct device const *)tmp___3->dev.parent); if (tmp___4 != 0) { tmp___1 = scsi_target(sdev); __mptr = (struct device const *)tmp___1->dev.parent; tmp___2 = (struct fc_rport *)__mptr + 0xffffffffffffffa0UL; } else { tmp___2 = (struct fc_rport *)0; } rport = tmp___2; if ((unsigned long )rport == (unsigned long )((struct fc_rport *)0)) { return (-6); } else { tmp___5 = fc_remote_port_chkready(rport); if (tmp___5 != 0) { return (-6); } else { } } sdev->hostdata = (void *)*((fc_port_t **)rport->dd_data); return (0); } } static int qla2xxx_slave_configure(struct scsi_device *sdev ) { scsi_qla_host_t *vha ; void *tmp ; struct req_que *req ; { tmp = shost_priv(sdev->host); vha = (scsi_qla_host_t *)tmp; req = vha->req; if (((vha->hw)->device_type & 33554432U) != 0U) { blk_queue_update_dma_alignment(sdev->request_queue, 7); } else { } scsi_change_queue_depth(sdev, req->max_q_depth); return (0); } } static void qla2xxx_slave_destroy(struct scsi_device *sdev ) { { sdev->hostdata = (void *)0; return; } } static void qla2x00_config_dma_addressing(struct qla_hw_data *ha ) { u64 tmp ; int tmp___0 ; int tmp___1 ; { ha->flags.enable_64bit_addressing = 0U; tmp___1 = dma_set_mask(& (ha->pdev)->dev, 0xffffffffffffffffULL); if (tmp___1 == 0) { tmp = dma_get_required_mask(& (ha->pdev)->dev); if ((unsigned int )(tmp >> 32ULL) != 0U) { tmp___0 = pci_set_consistent_dma_mask(ha->pdev, 0xffffffffffffffffULL); if (tmp___0 == 0) { ha->flags.enable_64bit_addressing = 1U; (ha->isp_ops)->calc_req_entries = & qla2x00_calc_iocbs_64; (ha->isp_ops)->build_iocbs = & qla2x00_build_scsi_iocbs_64; return; } else { } } else { } } else { } dma_set_mask(& (ha->pdev)->dev, 4294967295ULL); pci_set_consistent_dma_mask(ha->pdev, 4294967295ULL); return; } } static void qla2x00_enable_intrs(struct qla_hw_data *ha ) { unsigned long flags ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; { flags = 0UL; reg = & (ha->iobase)->isp; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ha->interrupts_on = 1U; writew(32776, (void volatile *)(& reg->ictrl)); readw((void const volatile *)(& reg->ictrl)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qla2x00_disable_intrs(struct qla_hw_data *ha ) { unsigned long flags ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; { flags = 0UL; reg = & (ha->iobase)->isp; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ha->interrupts_on = 0U; writew(0, (void volatile *)(& reg->ictrl)); readw((void const volatile *)(& reg->ictrl)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qla24xx_enable_intrs(struct qla_hw_data *ha ) { unsigned long flags ; struct device_reg_24xx *reg ; raw_spinlock_t *tmp ; { flags = 0UL; reg = & (ha->iobase)->isp24; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ha->interrupts_on = 1U; writel(8U, (void volatile *)(& reg->ictrl)); readl((void const volatile *)(& reg->ictrl)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qla24xx_disable_intrs(struct qla_hw_data *ha ) { unsigned long flags ; struct device_reg_24xx *reg ; raw_spinlock_t *tmp ; { flags = 0UL; reg = & (ha->iobase)->isp24; if ((ha->device_type & 8192U) != 0U && *((unsigned long *)ha + 2UL) != 0UL) { return; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ha->interrupts_on = 0U; writel(0U, (void volatile *)(& reg->ictrl)); readl((void const volatile *)(& reg->ictrl)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static int qla2x00_iospace_config(struct qla_hw_data *ha ) { resource_size_t pio ; uint16_t msix ; int cpus ; char const *tmp ; int tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; char const *tmp___4 ; void *tmp___5 ; char const *tmp___6 ; uint8_t tmp___7 ; void *tmp___8 ; unsigned int tmp___9 ; { tmp___0 = pci_request_selected_regions(ha->pdev, ha->bars, "qla2xxx"); if (tmp___0 != 0) { tmp = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 17, "Failed to reserve PIO/MMIO regions (%s), aborting.\n", tmp); goto iospace_error_exit; } else { } if ((ha->bars & 1) == 0) { goto skip_pio; } else { } pio = (ha->pdev)->resource[0].start; if (((ha->pdev)->resource[0].flags & 256UL) != 0UL) { if (((ha->pdev)->resource[0].start == 0ULL && (ha->pdev)->resource[0].end == (ha->pdev)->resource[0].start) || ((ha->pdev)->resource[0].end - (ha->pdev)->resource[0].start) + 1ULL <= 255ULL) { tmp___1 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 18, "Invalid pci I/O region size (%s).\n", tmp___1); pio = 0ULL; } else { } } else { tmp___2 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 19, "Region #0 no a PIO resource (%s).\n", tmp___2); pio = 0ULL; } ha->pio_address = pio; ql_dbg_pci(1073741824U, ha->pdev, 20, "PIO address=%llu.\n", ha->pio_address); skip_pio: ; if (((ha->pdev)->resource[1].flags & 512UL) == 0UL) { tmp___3 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 21, "Region #1 not an MMIO resource (%s), aborting.\n", tmp___3); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[1].start == 0ULL && (ha->pdev)->resource[1].end == (ha->pdev)->resource[1].start) || ((ha->pdev)->resource[1].end - (ha->pdev)->resource[1].start) + 1ULL <= 255ULL) { tmp___4 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 22, "Invalid PCI mem region size (%s), aborting.\n", tmp___4); goto iospace_error_exit; } else { } tmp___5 = ioremap((ha->pdev)->resource[1].start, 256UL); ha->iobase = (device_reg_t *)tmp___5; if ((unsigned long )ha->iobase == (unsigned long )((device_reg_t *)0)) { tmp___6 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 23, "Cannot remap MMIO (%s), aborting.\n", tmp___6); goto iospace_error_exit; } else { } tmp___7 = 1U; ha->max_rsp_queues = tmp___7; ha->max_req_queues = tmp___7; if (((ql2xmaxqueues <= 1 && ql2xmultique_tag == 0) || (ql2xmaxqueues > 1 && ql2xmultique_tag != 0)) || ((ha->device_type & 2048U) == 0U && (ha->device_type & 8192U) == 0U)) { goto mqiobase_exit; } else { } tmp___8 = ioremap((ha->pdev)->resource[3].start, (ha->pdev)->resource[3].start != 0ULL || (ha->pdev)->resource[3].end != (ha->pdev)->resource[3].start ? (unsigned long )(((ha->pdev)->resource[3].end - (ha->pdev)->resource[3].start) + 1ULL) : 0UL); ha->mqiobase = (device_reg_t *)tmp___8; if ((unsigned long )ha->mqiobase != (unsigned long )((device_reg_t *)0)) { ql_dbg_pci(1073741824U, ha->pdev, 24, "MQIO Base=%p.\n", ha->mqiobase); pci_read_config_word((struct pci_dev const *)ha->pdev, 162, & msix); ha->msix_count = msix; if (ql2xmultique_tag != 0) { tmp___9 = cpumask_weight(cpu_online_mask); cpus = (int )tmp___9; ha->max_rsp_queues = (int )ha->msix_count + -1 > cpus ? (unsigned int )((uint8_t )cpus) + 1U : (unsigned int )((uint8_t )ha->msix_count) + 255U; ha->max_req_queues = 2U; } else if (ql2xmaxqueues > 1) { ha->max_req_queues = (uint8_t )(32 < ql2xmaxqueues ? 32 : ql2xmaxqueues); ql_dbg_pci(1048576U, ha->pdev, 49160, "QoS mode set, max no of request queues:%d.\n", (int )ha->max_req_queues); ql_dbg_pci(1073741824U, ha->pdev, 25, "QoS mode set, max no of request queues:%d.\n", (int )ha->max_req_queues); } else { } ql_log_pci(2U, ha->pdev, 26, "MSI-X vector count: %d.\n", (int )msix); } else { ql_log_pci(2U, ha->pdev, 27, "BAR 3 not enabled.\n"); } mqiobase_exit: ha->msix_count = (unsigned int )((uint16_t )ha->max_rsp_queues) + 1U; ql_dbg_pci(1073741824U, ha->pdev, 28, "MSIX Count:%d.\n", (int )ha->msix_count); return (0); iospace_error_exit: ; return (-12); } } static int qla83xx_iospace_config(struct qla_hw_data *ha ) { uint16_t msix ; int cpus ; char const *tmp ; int tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; void *tmp___3 ; char const *tmp___4 ; uint8_t tmp___5 ; void *tmp___6 ; void *tmp___7 ; unsigned int tmp___8 ; { tmp___0 = pci_request_selected_regions(ha->pdev, ha->bars, "qla2xxx"); if (tmp___0 != 0) { tmp = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 279, "Failed to reserve PIO/MMIO regions (%s), aborting.\n", tmp); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[0].flags & 512UL) == 0UL) { tmp___1 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 280, "Invalid pci I/O region size (%s).\n", tmp___1); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[0].start == 0ULL && (ha->pdev)->resource[0].end == (ha->pdev)->resource[0].start) || ((ha->pdev)->resource[0].end - (ha->pdev)->resource[0].start) + 1ULL <= 255ULL) { tmp___2 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 281, "Invalid PCI mem region size (%s), aborting\n", tmp___2); goto iospace_error_exit; } else { } tmp___3 = ioremap((ha->pdev)->resource[0].start, 256UL); ha->iobase = (device_reg_t *)tmp___3; if ((unsigned long )ha->iobase == (unsigned long )((device_reg_t *)0)) { tmp___4 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 282, "Cannot remap MMIO (%s), aborting.\n", tmp___4); goto iospace_error_exit; } else { } tmp___5 = 1U; ha->max_rsp_queues = tmp___5; ha->max_req_queues = tmp___5; tmp___6 = ioremap((ha->pdev)->resource[4].start, (ha->pdev)->resource[4].start != 0ULL || (ha->pdev)->resource[4].end != (ha->pdev)->resource[4].start ? (unsigned long )(((ha->pdev)->resource[4].end - (ha->pdev)->resource[4].start) + 1ULL) : 0UL); ha->mqiobase = (device_reg_t *)tmp___6; if ((unsigned long )ha->mqiobase == (unsigned long )((device_reg_t *)0)) { ql_log_pci(0U, ha->pdev, 285, "BAR2/region4 not enabled\n"); goto mqiobase_exit; } else { } tmp___7 = ioremap((ha->pdev)->resource[2].start, (ha->pdev)->resource[2].start != 0ULL || (ha->pdev)->resource[2].end != (ha->pdev)->resource[2].start ? (unsigned long )(((ha->pdev)->resource[2].end - (ha->pdev)->resource[2].start) + 1ULL) : 0UL); ha->msixbase = (device_reg_t *)tmp___7; if ((unsigned long )ha->msixbase != (unsigned long )((device_reg_t *)0)) { pci_read_config_word((struct pci_dev const *)ha->pdev, 146, & msix); ha->msix_count = msix; if (ql2xmultique_tag != 0) { tmp___8 = cpumask_weight(cpu_online_mask); cpus = (int )tmp___8; ha->max_rsp_queues = (int )ha->msix_count + -1 > cpus ? (unsigned int )((uint8_t )cpus) + 1U : (unsigned int )((uint8_t )ha->msix_count) + 255U; ha->max_req_queues = 2U; } else if (ql2xmaxqueues > 1) { ha->max_req_queues = (uint8_t )(32 < ql2xmaxqueues ? 32 : ql2xmaxqueues); ql_dbg_pci(1048576U, ha->pdev, 49164, "QoS mode set, max no of request queues:%d.\n", (int )ha->max_req_queues); ql_dbg_pci(1073741824U, ha->pdev, 283, "QoS mode set, max no of request queues:%d.\n", (int )ha->max_req_queues); } else { } ql_log_pci(2U, ha->pdev, 284, "MSI-X vector count: %d.\n", (int )msix); } else { ql_log_pci(2U, ha->pdev, 286, "BAR 1 not enabled.\n"); } mqiobase_exit: ha->msix_count = (unsigned int )((uint16_t )ha->max_rsp_queues) + 1U; qlt_83xx_iospace_config(ha); ql_dbg_pci(1073741824U, ha->pdev, 287, "MSIX Count:%d.\n", (int )ha->msix_count); return (0); iospace_error_exit: ; return (-12); } } static struct isp_operations qla2100_isp_ops = {& qla2100_pci_config, & qla2x00_reset_chip, & qla2x00_chip_diag, & qla2x00_config_rings, & qla2x00_reset_adapter, & qla2x00_nvram_config, & qla2x00_update_fw_options, & qla2x00_load_risc, & qla2x00_pci_info_str, & qla2x00_fw_version_str, & qla2100_intr_handler, & qla2x00_enable_intrs, & qla2x00_disable_intrs, & qla2x00_abort_command, & qla2x00_abort_target, & qla2x00_lun_reset, & qla2x00_login_fabric, & qla2x00_fabric_logout, & qla2x00_calc_iocbs_32, & qla2x00_build_scsi_iocbs_32, & qla2x00_prep_ms_iocb, & qla2x00_prep_ms_fdmi_iocb, & qla2x00_read_nvram_data, & qla2x00_write_nvram_data, & qla2100_fw_dump, (int (*)(struct scsi_qla_host * ))0, (int (*)(struct scsi_qla_host * ))0, (void (*)(struct scsi_qla_host * ))0, & qla2x00_read_optrom_data, & qla2x00_write_optrom_data, & qla2x00_get_flash_version, & qla2x00_start_scsi, & qla2x00_abort_isp, & qla2x00_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla2300_isp_ops = {& qla2300_pci_config, & qla2x00_reset_chip, & qla2x00_chip_diag, & qla2x00_config_rings, & qla2x00_reset_adapter, & qla2x00_nvram_config, & qla2x00_update_fw_options, & qla2x00_load_risc, & qla2x00_pci_info_str, & qla2x00_fw_version_str, & qla2300_intr_handler, & qla2x00_enable_intrs, & qla2x00_disable_intrs, & qla2x00_abort_command, & qla2x00_abort_target, & qla2x00_lun_reset, & qla2x00_login_fabric, & qla2x00_fabric_logout, & qla2x00_calc_iocbs_32, & qla2x00_build_scsi_iocbs_32, & qla2x00_prep_ms_iocb, & qla2x00_prep_ms_fdmi_iocb, & qla2x00_read_nvram_data, & qla2x00_write_nvram_data, & qla2300_fw_dump, & qla2x00_beacon_on, & qla2x00_beacon_off, & qla2x00_beacon_blink, & qla2x00_read_optrom_data, & qla2x00_write_optrom_data, & qla2x00_get_flash_version, & qla2x00_start_scsi, & qla2x00_abort_isp, & qla2x00_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla24xx_isp_ops = {& qla24xx_pci_config, & qla24xx_reset_chip, & qla24xx_chip_diag, & qla24xx_config_rings, & qla24xx_reset_adapter, & qla24xx_nvram_config, & qla24xx_update_fw_options, & qla24xx_load_risc, & qla24xx_pci_info_str, & qla24xx_fw_version_str, & qla24xx_intr_handler, & qla24xx_enable_intrs, & qla24xx_disable_intrs, & qla24xx_abort_command, & qla24xx_abort_target, & qla24xx_lun_reset, & qla24xx_login_fabric, & qla24xx_fabric_logout, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, & qla24xx_read_nvram_data, & qla24xx_write_nvram_data, & qla24xx_fw_dump, & qla24xx_beacon_on, & qla24xx_beacon_off, & qla24xx_beacon_blink, & qla24xx_read_optrom_data, & qla24xx_write_optrom_data, & qla24xx_get_flash_version, & qla24xx_start_scsi, & qla2x00_abort_isp, & qla2x00_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla25xx_isp_ops = {& qla25xx_pci_config, & qla24xx_reset_chip, & qla24xx_chip_diag, & qla24xx_config_rings, & qla24xx_reset_adapter, & qla24xx_nvram_config, & qla24xx_update_fw_options, & qla24xx_load_risc, & qla24xx_pci_info_str, & qla24xx_fw_version_str, & qla24xx_intr_handler, & qla24xx_enable_intrs, & qla24xx_disable_intrs, & qla24xx_abort_command, & qla24xx_abort_target, & qla24xx_lun_reset, & qla24xx_login_fabric, & qla24xx_fabric_logout, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, & qla25xx_read_nvram_data, & qla25xx_write_nvram_data, & qla25xx_fw_dump, & qla24xx_beacon_on, & qla24xx_beacon_off, & qla24xx_beacon_blink, & qla25xx_read_optrom_data, & qla24xx_write_optrom_data, & qla24xx_get_flash_version, & qla24xx_dif_start_scsi, & qla2x00_abort_isp, & qla2x00_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla81xx_isp_ops = {& qla25xx_pci_config, & qla24xx_reset_chip, & qla24xx_chip_diag, & qla24xx_config_rings, & qla24xx_reset_adapter, & qla81xx_nvram_config, & qla81xx_update_fw_options, & qla81xx_load_risc, & qla24xx_pci_info_str, & qla24xx_fw_version_str, & qla24xx_intr_handler, & qla24xx_enable_intrs, & qla24xx_disable_intrs, & qla24xx_abort_command, & qla24xx_abort_target, & qla24xx_lun_reset, & qla24xx_login_fabric, & qla24xx_fabric_logout, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, (uint8_t *(*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, (int (*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, & qla81xx_fw_dump, & qla24xx_beacon_on, & qla24xx_beacon_off, & qla83xx_beacon_blink, & qla25xx_read_optrom_data, & qla24xx_write_optrom_data, & qla24xx_get_flash_version, & qla24xx_dif_start_scsi, & qla2x00_abort_isp, & qla2x00_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla82xx_isp_ops = {& qla82xx_pci_config, & qla82xx_reset_chip, & qla24xx_chip_diag, & qla82xx_config_rings, & qla24xx_reset_adapter, & qla81xx_nvram_config, & qla24xx_update_fw_options, & qla82xx_load_risc, & qla24xx_pci_info_str, & qla24xx_fw_version_str, & qla82xx_intr_handler, & qla82xx_enable_intrs, & qla82xx_disable_intrs, & qla24xx_abort_command, & qla24xx_abort_target, & qla24xx_lun_reset, & qla24xx_login_fabric, & qla24xx_fabric_logout, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, & qla24xx_read_nvram_data, & qla24xx_write_nvram_data, & qla82xx_fw_dump, & qla82xx_beacon_on, & qla82xx_beacon_off, (void (*)(struct scsi_qla_host * ))0, & qla82xx_read_optrom_data, & qla82xx_write_optrom_data, & qla82xx_get_flash_version, & qla82xx_start_scsi, & qla82xx_abort_isp, & qla82xx_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla8044_isp_ops = {& qla82xx_pci_config, & qla82xx_reset_chip, & qla24xx_chip_diag, & qla82xx_config_rings, & qla24xx_reset_adapter, & qla81xx_nvram_config, & qla24xx_update_fw_options, & qla82xx_load_risc, & qla24xx_pci_info_str, & qla24xx_fw_version_str, & qla8044_intr_handler, & qla82xx_enable_intrs, & qla82xx_disable_intrs, & qla24xx_abort_command, & qla24xx_abort_target, & qla24xx_lun_reset, & qla24xx_login_fabric, & qla24xx_fabric_logout, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, (uint8_t *(*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, (int (*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, & qla8044_fw_dump, & qla82xx_beacon_on, & qla82xx_beacon_off, (void (*)(struct scsi_qla_host * ))0, & qla8044_read_optrom_data, & qla8044_write_optrom_data, & qla82xx_get_flash_version, & qla82xx_start_scsi, & qla8044_abort_isp, & qla82xx_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla83xx_isp_ops = {& qla25xx_pci_config, & qla24xx_reset_chip, & qla24xx_chip_diag, & qla24xx_config_rings, & qla24xx_reset_adapter, & qla81xx_nvram_config, & qla81xx_update_fw_options, & qla81xx_load_risc, & qla24xx_pci_info_str, & qla24xx_fw_version_str, & qla24xx_intr_handler, & qla24xx_enable_intrs, & qla24xx_disable_intrs, & qla24xx_abort_command, & qla24xx_abort_target, & qla24xx_lun_reset, & qla24xx_login_fabric, & qla24xx_fabric_logout, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, (uint8_t *(*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, (int (*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, & qla83xx_fw_dump, & qla24xx_beacon_on, & qla24xx_beacon_off, & qla83xx_beacon_blink, & qla25xx_read_optrom_data, & qla24xx_write_optrom_data, & qla24xx_get_flash_version, & qla24xx_dif_start_scsi, & qla2x00_abort_isp, & qla83xx_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qlafx00_isp_ops = {& qlafx00_pci_config, & qlafx00_soft_reset, & qlafx00_chip_diag, & qlafx00_config_rings, & qlafx00_soft_reset, (int (*)(struct scsi_qla_host * ))0, (void (*)(struct scsi_qla_host * ))0, (int (*)(struct scsi_qla_host * , uint32_t * ))0, & qlafx00_pci_info_str, & qlafx00_fw_version_str, & qlafx00_intr_handler, & qlafx00_enable_intrs, & qlafx00_disable_intrs, & qla24xx_async_abort_command, & qlafx00_abort_target, & qlafx00_lun_reset, (int (*)(struct scsi_qla_host * , uint16_t , uint8_t , uint8_t , uint8_t , uint16_t * , uint8_t ))0, (int (*)(struct scsi_qla_host * , uint16_t , uint8_t , uint8_t , uint8_t ))0, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, & qla24xx_read_nvram_data, & qla24xx_write_nvram_data, (void (*)(struct scsi_qla_host * , int ))0, & qla24xx_beacon_on, & qla24xx_beacon_off, (void (*)(struct scsi_qla_host * ))0, & qla24xx_read_optrom_data, & qla24xx_write_optrom_data, & qla24xx_get_flash_version, & qlafx00_start_scsi, & qlafx00_abort_isp, & qlafx00_iospace_config, & qlafx00_initialize_adapter}; static struct isp_operations qla27xx_isp_ops = {& qla25xx_pci_config, & qla24xx_reset_chip, & qla24xx_chip_diag, & qla24xx_config_rings, & qla24xx_reset_adapter, & qla81xx_nvram_config, & qla81xx_update_fw_options, & qla81xx_load_risc, & qla24xx_pci_info_str, & qla24xx_fw_version_str, & qla24xx_intr_handler, & qla24xx_enable_intrs, & qla24xx_disable_intrs, & qla24xx_abort_command, & qla24xx_abort_target, & qla24xx_lun_reset, & qla24xx_login_fabric, & qla24xx_fabric_logout, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, (uint8_t *(*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, (int (*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, & qla27xx_fwdump, & qla24xx_beacon_on, & qla24xx_beacon_off, & qla83xx_beacon_blink, & qla25xx_read_optrom_data, & qla24xx_write_optrom_data, & qla24xx_get_flash_version, & qla24xx_dif_start_scsi, & qla2x00_abort_isp, & qla83xx_iospace_config, & qla2x00_initialize_adapter}; __inline static void qla2x00_set_isp_flags(struct qla_hw_data *ha ) { { ha->device_type = 2147483648U; switch ((int )(ha->pdev)->device) { case 8448: ha->device_type = ha->device_type | 1U; ha->device_type = ha->device_type & 2147483647U; ha->fw_srisc_address = 4096U; goto ldv_67209; case 8704: ha->device_type = ha->device_type | 2U; ha->device_type = ha->device_type & 2147483647U; ha->fw_srisc_address = 4096U; goto ldv_67209; case 8960: ha->device_type = ha->device_type | 4U; ha->device_type = ha->device_type | 268435456U; ha->fw_srisc_address = 2048U; goto ldv_67209; case 8978: ha->device_type = ha->device_type | 8U; ha->device_type = ha->device_type | 268435456U; ha->fw_srisc_address = 2048U; goto ldv_67209; case 8994: ha->device_type = ha->device_type | 16U; ha->device_type = ha->device_type | 268435456U; if ((unsigned int )(ha->pdev)->subsystem_vendor == 4136U && (unsigned int )(ha->pdev)->subsystem_device == 368U) { ha->device_type = ha->device_type | 536870912U; } else { } ha->fw_srisc_address = 2048U; goto ldv_67209; case 25362: ha->device_type = ha->device_type | 32U; ha->fw_srisc_address = 2048U; goto ldv_67209; case 25378: ha->device_type = ha->device_type | 64U; ha->fw_srisc_address = 2048U; goto ldv_67209; case 9250: ha->device_type = ha->device_type | 128U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->fw_srisc_address = 1048576U; goto ldv_67209; case 9266: ha->device_type = ha->device_type | 256U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->fw_srisc_address = 1048576U; goto ldv_67209; case 33842: ha->device_type = ha->device_type | 4096U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->fw_srisc_address = 1048576U; goto ldv_67209; case 21538: ha->device_type = ha->device_type | 512U; ha->device_type = ha->device_type | 134217728U; ha->fw_srisc_address = 1048576U; goto ldv_67209; case 21554: ha->device_type = ha->device_type | 1024U; ha->device_type = ha->device_type | 134217728U; ha->fw_srisc_address = 1048576U; goto ldv_67209; case 9522: ha->device_type = ha->device_type | 2048U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->fw_srisc_address = 1048576U; goto ldv_67209; case 32769: ha->device_type = ha->device_type | 8192U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->fw_srisc_address = 1048576U; goto ldv_67209; case 32801: ha->device_type = ha->device_type | 16384U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->fw_srisc_address = 1048576U; qla82xx_init_flags(ha); goto ldv_67209; case 32836: ha->device_type = ha->device_type | 262144U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->fw_srisc_address = 1048576U; qla82xx_init_flags(ha); goto ldv_67209; case 8241: ha->device_type = ha->device_type | 32768U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->device_type = ha->device_type | 33554432U; ha->fw_srisc_address = 1048576U; goto ldv_67209; case 32817: ha->device_type = ha->device_type | 65536U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->device_type = ha->device_type | 33554432U; ha->fw_srisc_address = 1048576U; goto ldv_67209; case 61441: ha->device_type = ha->device_type | 131072U; goto ldv_67209; case 8305: ha->device_type = ha->device_type | 524288U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->fw_srisc_address = 1048576U; goto ldv_67209; case 8817: ha->device_type = ha->device_type | 1048576U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->fw_srisc_address = 1048576U; goto ldv_67209; } ldv_67209: ; if ((ha->device_type & 16384U) != 0U) { ha->port_no = (unsigned int )((uint8_t )ha->portnum) & 1U; } else { pci_read_config_byte((struct pci_dev const *)ha->pdev, 61, & ha->port_no); if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { ha->port_no = (uint8_t )((int )ha->port_no - 1); } else { ha->port_no = ((int )ha->port_no & 1) == 0; } } ql_dbg_pci(1073741824U, ha->pdev, 11, "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", ha->device_type, (int )ha->port_no, ha->fw_srisc_address); return; } } static void qla2xxx_scan_start(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; if (*((unsigned long *)vha->hw + 2UL) != 0UL) { return; } else { } set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(7L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(16L, (unsigned long volatile *)(& vha->dpc_flags)); return; } } static int qla2xxx_scan_finished(struct Scsi_Host *shost , unsigned long time ) { scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; if ((unsigned long )vha->host == (unsigned long )((struct Scsi_Host *)0)) { return (1); } else { } if ((unsigned long )((int )(vha->hw)->loop_reset_delay * 250) < time) { return (1); } else { } tmp___0 = atomic_read((atomic_t const *)(& vha->loop_state)); return (tmp___0 == 5); } } static int qla2x00_probe_one(struct pci_dev *pdev , struct pci_device_id const *id ) { int ret ; struct Scsi_Host *host ; scsi_qla_host_t *base_vha ; struct qla_hw_data *ha ; char pci_info[30U] ; char fw_str[30U] ; char wq_name[30U] ; struct scsi_host_template *sht ; int bars ; int mem_only ; uint16_t req_length ; uint16_t rsp_length ; struct req_que *req ; struct rsp_que *rsp ; int tmp ; int tmp___0 ; void *tmp___1 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; struct lock_class_key __key___2 ; struct lock_class_key __key___3 ; struct lock_class_key __key___4 ; struct scsi_qla_host *tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; long tmp___6 ; bool tmp___7 ; struct lock_class_key __key___5 ; atomic_long_t __constr_expr_0 ; struct lock_class_key __key___6 ; char const *__lock_name ; struct workqueue_struct *tmp___8 ; struct lock_class_key __key___7 ; atomic_long_t __constr_expr_1 ; struct lock_class_key __key___8 ; char const *__lock_name___0 ; struct workqueue_struct *tmp___9 ; struct lock_class_key __key___9 ; atomic_long_t __constr_expr_2 ; struct lock_class_key __key___10 ; atomic_long_t __constr_expr_3 ; struct lock_class_key __key___11 ; atomic_long_t __constr_expr_4 ; int prot ; int guard ; bool tmp___10 ; char *tmp___11 ; char const *tmp___12 ; char *tmp___13 ; uint8_t tmp___14 ; struct task_struct *t ; { ret = -19; base_vha = (scsi_qla_host_t *)0; mem_only = 0; req_length = 0U; rsp_length = 0U; req = (struct req_que *)0; rsp = (struct rsp_que *)0; bars = pci_select_bars(pdev, 768UL); sht = & qla2xxx_driver_template; if ((((((((((((((unsigned int )pdev->device == 9250U || (unsigned int )pdev->device == 9266U) || (unsigned int )pdev->device == 33842U) || (unsigned int )pdev->device == 21538U) || (unsigned int )pdev->device == 21554U) || (unsigned int )pdev->device == 9522U) || (unsigned int )pdev->device == 32769U) || (unsigned int )pdev->device == 32801U) || (unsigned int )pdev->device == 8241U) || (unsigned int )pdev->device == 32817U) || (unsigned int )pdev->device == 61441U) || (unsigned int )pdev->device == 32836U) || (unsigned int )pdev->device == 8305U) || (unsigned int )pdev->device == 8817U) { bars = pci_select_bars(pdev, 512UL); mem_only = 1; ql_dbg_pci(1073741824U, pdev, 7, "Mem only adapter.\n"); } else { } ql_dbg_pci(1073741824U, pdev, 8, "Bars=%d.\n", bars); if (mem_only != 0) { tmp = pci_enable_device_mem(pdev); if (tmp != 0) { goto probe_out; } else { } } else { tmp___0 = pci_enable_device(pdev); if (tmp___0 != 0) { goto probe_out; } else { } } pci_enable_pcie_error_reporting(pdev); tmp___1 = kzalloc(12288UL, 208U); ha = (struct qla_hw_data *)tmp___1; if ((unsigned long )ha == (unsigned long )((struct qla_hw_data *)0)) { ql_log_pci(0U, pdev, 9, "Unable to allocate memory for ha.\n"); goto probe_out; } else { } ql_dbg_pci(1073741824U, pdev, 10, "Memory allocated for ha=%p.\n", ha); ha->pdev = pdev; ha->tgt.enable_class_2 = (unsigned char )ql2xenableclass2; INIT_LIST_HEAD(& ha->tgt.q_full_list); spinlock_check(& ha->tgt.q_full_lock); __raw_spin_lock_init(& ha->tgt.q_full_lock.__annonCompField18.rlock, "&(&ha->tgt.q_full_lock)->rlock", & __key); ha->bars = bars; ha->mem_only = mem_only; spinlock_check(& ha->hardware_lock); __raw_spin_lock_init(& ha->hardware_lock.__annonCompField18.rlock, "&(&ha->hardware_lock)->rlock", & __key___0); spinlock_check(& ha->vport_slock); __raw_spin_lock_init(& ha->vport_slock.__annonCompField18.rlock, "&(&ha->vport_slock)->rlock", & __key___1); __mutex_init(& ha->selflogin_lock, "&ha->selflogin_lock", & __key___2); __mutex_init(& ha->optrom_mutex, "&ha->optrom_mutex", & __key___3); qla2x00_set_isp_flags(ha); if ((((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || (ha->device_type & 2048U) != 0U) || (ha->device_type & 8192U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { pdev->needs_freset = 1U; } else { } ha->prev_topology = 0U; ha->init_cb_size = 96; ha->link_data_rate = 65535U; ha->optrom_size = 131072U; if ((int )ha->device_type & 1) { ha->max_fibre_devices = 512U; ha->mbx_count = 8U; req_length = 128U; rsp_length = 64U; ha->max_loop_id = 254U; ha->gid_list_info_size = 4; ha->flash_conf_off = 4294967295U; ha->flash_data_off = 4294967295U; ha->nvram_conf_off = 4294967295U; ha->nvram_data_off = 4294967295U; ha->isp_ops = & qla2100_isp_ops; } else if ((ha->device_type & 2U) != 0U) { ha->max_fibre_devices = 512U; ha->mbx_count = 24U; req_length = 2048U; rsp_length = 64U; ha->max_loop_id = 254U; ha->gid_list_info_size = 4; ha->flash_conf_off = 4294967295U; ha->flash_data_off = 4294967295U; ha->nvram_conf_off = 4294967295U; ha->nvram_data_off = 4294967295U; ha->isp_ops = & qla2100_isp_ops; } else if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { ha->max_fibre_devices = 512U; ha->mbx_count = 32U; req_length = 2048U; rsp_length = 512U; ha->max_loop_id = 2047U; ha->gid_list_info_size = 6; if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { ha->optrom_size = 1048576U; } else { } ha->flash_conf_off = 4294967295U; ha->flash_data_off = 4294967295U; ha->nvram_conf_off = 4294967295U; ha->nvram_data_off = 4294967295U; ha->isp_ops = & qla2300_isp_ops; } else if ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) { ha->max_fibre_devices = 2048U; ha->mbx_count = 32U; req_length = 2048U; rsp_length = 512U; ha->tgt.atio_q_length = 4096U; ha->max_loop_id = 2047U; ha->init_cb_size = 5252; ha->gid_list_info_size = 8; ha->optrom_size = 1048576U; ha->nvram_npiv_size = 128U; ha->isp_ops = & qla24xx_isp_ops; ha->flash_conf_off = 2147287040U; ha->flash_data_off = 2146435072U; ha->nvram_conf_off = 2147418112U; ha->nvram_data_off = 2147352576U; } else if ((ha->device_type & 2048U) != 0U) { ha->max_fibre_devices = 2048U; ha->mbx_count = 32U; req_length = 2048U; rsp_length = 512U; ha->tgt.atio_q_length = 4096U; ha->max_loop_id = 2047U; ha->init_cb_size = 5252; ha->gid_list_info_size = 8; ha->optrom_size = 2097152U; ha->nvram_npiv_size = 256U; ha->isp_ops = & qla25xx_isp_ops; ha->flash_conf_off = 2147287040U; ha->flash_data_off = 2146435072U; ha->nvram_conf_off = 2147418112U; ha->nvram_data_off = 2147352576U; } else if ((ha->device_type & 8192U) != 0U) { ha->max_fibre_devices = 2048U; ha->mbx_count = 32U; req_length = 2048U; rsp_length = 512U; ha->tgt.atio_q_length = 4096U; ha->max_loop_id = 2047U; ha->init_cb_size = 5252; ha->gid_list_info_size = 8; ha->optrom_size = 4194304U; ha->nvram_npiv_size = 256U; ha->isp_ops = & qla81xx_isp_ops; ha->flash_conf_off = 2147287040U; ha->flash_data_off = 2139095040U; ha->nvram_conf_off = 4294967295U; ha->nvram_data_off = 4294967295U; } else if ((ha->device_type & 16384U) != 0U) { ha->max_fibre_devices = 2048U; ha->mbx_count = 32U; req_length = 128U; rsp_length = 128U; ha->max_loop_id = 2047U; ha->init_cb_size = 5252; ha->gid_list_info_size = 8; ha->optrom_size = 8388608U; ha->nvram_npiv_size = 256U; ha->isp_ops = & qla82xx_isp_ops; ha->flash_conf_off = 2147287040U; ha->flash_data_off = 2146435072U; ha->nvram_conf_off = 2147418112U; ha->nvram_data_off = 2147352576U; } else if ((ha->device_type & 262144U) != 0U) { ha->max_fibre_devices = 2048U; ha->mbx_count = 32U; req_length = 128U; rsp_length = 128U; ha->max_loop_id = 2047U; ha->init_cb_size = 5252; ha->gid_list_info_size = 8; ha->optrom_size = 16777216U; ha->nvram_npiv_size = 256U; ha->isp_ops = & qla8044_isp_ops; ha->flash_conf_off = 2147287040U; ha->flash_data_off = 2146435072U; ha->nvram_conf_off = 2147418112U; ha->nvram_data_off = 2147352576U; } else if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { ha->portnum = (unsigned int )((uint16_t )(ha->pdev)->devfn) & 7U; ha->max_fibre_devices = 2048U; ha->mbx_count = 32U; req_length = 8192U; rsp_length = 512U; ha->tgt.atio_q_length = 4096U; ha->max_loop_id = 2047U; ha->init_cb_size = 5252; ha->gid_list_info_size = 8; ha->optrom_size = 16777216U; ha->nvram_npiv_size = 256U; ha->isp_ops = & qla83xx_isp_ops; ha->flash_conf_off = 2147287040U; ha->flash_data_off = 2139095040U; ha->nvram_conf_off = 4294967295U; ha->nvram_data_off = 4294967295U; } else if ((ha->device_type & 131072U) != 0U) { ha->max_fibre_devices = 512U; ha->mbx_count = 16U; ha->aen_mbx_count = 8U; req_length = 512U; rsp_length = 256U; ha->isp_ops = & qlafx00_isp_ops; ha->port_down_retry_count = 30; ha->mr.fw_hbt_cnt = 6U; ha->mr.fw_reset_timer_tick = 120U; ha->mr.fw_critemp_timer_tick = 60U; ha->mr.fw_hbt_en = 1U; ha->mr.host_info_resend = 0; ha->mr.hinfo_resend_timer_tick = 60U; } else if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { ha->portnum = (unsigned int )((uint16_t )(ha->pdev)->devfn) & 7U; ha->max_fibre_devices = 2048U; ha->mbx_count = 32U; req_length = 2048U; rsp_length = 512U; ha->max_loop_id = 2047U; ha->init_cb_size = 5252; ha->gid_list_info_size = 8; ha->optrom_size = 16777216U; ha->nvram_npiv_size = 256U; ha->isp_ops = & qla27xx_isp_ops; ha->flash_conf_off = 2147287040U; ha->flash_data_off = 2139095040U; ha->nvram_conf_off = 4294967295U; ha->nvram_data_off = 4294967295U; } else { } ql_dbg_pci(1073741824U, pdev, 30, "mbx_count=%d, req_length=%d, rsp_length=%d, max_loop_id=%d, init_cb_size=%d, gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, max_fibre_devices=%d.\n", (int )ha->mbx_count, (int )req_length, (int )rsp_length, (int )ha->max_loop_id, ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, (int )ha->nvram_npiv_size, (int )ha->max_fibre_devices); ql_dbg_pci(1073741824U, pdev, 31, "isp_ops=%p, flash_conf_off=%d, flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, ha->nvram_conf_off, ha->nvram_data_off); ret = (*((ha->isp_ops)->iospace_config))(ha); if (ret != 0) { goto iospace_config_failed; } else { } ql_log_pci(2U, pdev, 29, "Found an ISP%04X irq %d iobase 0x%p.\n", (int )pdev->device, pdev->irq, ha->iobase); __mutex_init(& ha->vport_lock, "&ha->vport_lock", & __key___4); init_completion(& ha->mbx_cmd_comp); complete(& ha->mbx_cmd_comp); init_completion(& ha->mbx_intr_comp); init_completion(& ha->dcbx_comp); init_completion(& ha->lb_portup_comp); set_bit(0L, (unsigned long volatile *)(& ha->vp_idx_map)); qla2x00_config_dma_addressing(ha); ql_dbg_pci(1073741824U, pdev, 32, "64 Bit addressing is %s.\n", *((unsigned long *)ha + 2UL) != 0UL ? (char *)"enable" : (char *)"disable"); ret = qla2x00_mem_alloc(ha, (int )req_length, (int )rsp_length, & req, & rsp); if (ret != 0) { ql_log_pci(0U, pdev, 49, "Failed to allocate memory for adapter, aborting.\n"); goto probe_hw_failed; } else { } req->max_q_depth = 32; if (ql2xmaxqdepth != 0 && (unsigned int )ql2xmaxqdepth <= 65535U) { req->max_q_depth = ql2xmaxqdepth; } else { } tmp___2 = qla2x00_create_host(sht, ha); base_vha = tmp___2; if ((unsigned long )base_vha == (unsigned long )((scsi_qla_host_t *)0)) { ret = -12; qla2x00_mem_free(ha); qla2x00_free_req_que(ha, req); qla2x00_free_rsp_que(ha, rsp); goto probe_hw_failed; } else { } pci_set_drvdata(pdev, (void *)base_vha); set_bit(2L, (unsigned long volatile *)(& base_vha->pci_flags)); host = base_vha->host; base_vha->req = req; if (((((((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) || (ha->device_type & 8192U) != 0U) || (ha->device_type & 16384U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || (ha->device_type & 262144U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { base_vha->mgmt_svr_loop_id = (unsigned int )base_vha->vp_idx + 10U; } else { base_vha->mgmt_svr_loop_id = (unsigned int )base_vha->vp_idx + 254U; } ha->mr.fcport.vha = base_vha; ha->mr.fcport.port_type = 0; ha->mr.fcport.loop_id = 4096U; qla2x00_set_fcport_state(& ha->mr.fcport, 1); ha->mr.fcport.supported_classes = 0U; ha->mr.fcport.scan_state = 1U; if ((ha->device_type & 134217728U) == 0U) { if ((int )ha->device_type & 1) { host->sg_tablesize = 32U; } else { } } else if ((ha->device_type & 16384U) == 0U) { host->sg_tablesize = 1024U; } else { } host->max_id = (unsigned int )ha->max_fibre_devices; host->cmd_per_lun = 3; host->unique_id = host->host_no; if ((ha->device_type & 33554432U) != 0U && ql2xenabledif != 0) { host->max_cmd_len = 32U; } else { host->max_cmd_len = 16U; } host->max_channel = 0U; if (((ha->device_type & 131072U) == 0U && (ha->device_type & 134217728U) == 0U) && ql2xmaxlun > 65535ULL) { host->max_lun = 65535ULL; } else { host->max_lun = ql2xmaxlun; } host->transportt = qla2xxx_transport_template; sht->vendor_id = 72057594037932151ULL; ql_dbg(1073741824U, base_vha, 51, "max_id=%d this_id=%d cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id, host->this_id, (int )host->cmd_per_lun, host->unique_id, (int )host->max_cmd_len, host->max_channel, host->max_lun, host->transportt, sht->vendor_id); que_init: tmp___3 = qla2x00_alloc_queues(ha, req, rsp); if (tmp___3 == 0) { ql_log(0U, base_vha, 61, "Failed to allocate memory for queue pointers...aborting.\n"); goto probe_init_failed; } else { } qlt_probe_one_stage1(base_vha, ha); ret = qla2x00_request_irqs(ha, rsp); if (ret != 0) { goto probe_init_failed; } else { } pci_save_state(pdev); rsp->req = req; req->rsp = rsp; if ((ha->device_type & 131072U) != 0U) { *(ha->rsp_q_map) = rsp; *(ha->req_q_map) = req; set_bit(0L, (unsigned long volatile *)(& ha->req_qid_map)); set_bit(0L, (unsigned long volatile *)(& ha->rsp_qid_map)); } else { } req->req_q_in = & (ha->iobase)->isp24.req_q_in; req->req_q_out = & (ha->iobase)->isp24.req_q_out; rsp->rsp_q_in = & (ha->iobase)->isp24.rsp_q_in; rsp->rsp_q_out = & (ha->iobase)->isp24.rsp_q_out; if (((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { req->req_q_in = & (ha->mqiobase)->isp25mq.req_q_in; req->req_q_out = & (ha->mqiobase)->isp25mq.req_q_out; rsp->rsp_q_in = & (ha->mqiobase)->isp25mq.rsp_q_in; rsp->rsp_q_out = & (ha->mqiobase)->isp25mq.rsp_q_out; } else { } if ((ha->device_type & 131072U) != 0U) { req->req_q_in = & (ha->iobase)->ispfx00.req_q_in; req->req_q_out = & (ha->iobase)->ispfx00.req_q_out; rsp->rsp_q_in = & (ha->iobase)->ispfx00.rsp_q_in; rsp->rsp_q_out = & (ha->iobase)->ispfx00.rsp_q_out; } else { } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { req->req_q_out = (uint32_t *)(& (ha->iobase)->isp82.req_q_out); rsp->rsp_q_in = (uint32_t *)(& (ha->iobase)->isp82.rsp_q_in); rsp->rsp_q_out = (uint32_t *)(& (ha->iobase)->isp82.rsp_q_out); } else { } ql_dbg(1048576U, base_vha, 49161, "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); ql_dbg(1048576U, base_vha, 49162, "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); ql_dbg(1073741824U, base_vha, 62, "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); ql_dbg(1073741824U, base_vha, 63, "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); tmp___4 = (*((ha->isp_ops)->initialize_adapter))(base_vha); if (tmp___4 != 0) { ql_log(0U, base_vha, 214, "Failed to initialize adapter - Adapter flags %x.\n", base_vha->device_flags); if ((ha->device_type & 16384U) != 0U) { qla82xx_idc_lock(ha); qla82xx_wr_32(ha, 136323392UL, 6U); qla82xx_idc_unlock(ha); ql_log(0U, base_vha, 215, "HW State: FAILED.\n"); } else if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); qla8044_wr_direct(base_vha, 4U, 6U); qla8044_idc_unlock(ha); ql_log(0U, base_vha, 336, "HW State: FAILED.\n"); } else { } ret = -19; goto probe_failed; } else { } if ((ha->device_type & 131072U) != 0U) { host->can_queue = 1024; } else { host->can_queue = (int )req->num_outstanding_cmds + -10; } ql_dbg(1073741824U, base_vha, 50, "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", host->can_queue, base_vha->req, (int )base_vha->mgmt_svr_loop_id, (int )host->sg_tablesize); if ((unsigned int )ha->mqenable != 0U) { tmp___5 = qla25xx_setup_mode(base_vha); if (tmp___5 != 0) { ql_log(1U, base_vha, 236, "Failed to create queues, falling back to single queue mode.\n"); goto que_init; } else { } } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { goto skip_dpc; } else { } ha->dpc_thread = kthread_create_on_node(& qla2x00_do_dpc, (void *)ha, -1, "%s_dpc", (uint8_t *)(& base_vha->host_str)); tmp___7 = IS_ERR((void const *)ha->dpc_thread); if ((int )tmp___7) { ql_log(0U, base_vha, 237, "Failed to start DPC thread.\n"); tmp___6 = PTR_ERR((void const *)ha->dpc_thread); ret = (int )tmp___6; goto probe_failed; } else { } ql_dbg(1073741824U, base_vha, 238, "DPC thread started successfully.\n"); qla2xxx_wake_dpc(base_vha); __init_work(& ha->board_disable, 0); __constr_expr_0.counter = 137438953408L; ha->board_disable.data = __constr_expr_0; lockdep_init_map(& ha->board_disable.lockdep_map, "(&ha->board_disable)", & __key___5, 0); INIT_LIST_HEAD(& ha->board_disable.entry); ha->board_disable.func = & qla2x00_disable_board_on_pci_error; if ((ha->device_type & 65536U) != 0U || ((ha->device_type & 32768U) != 0U && (int )ha->fw_attributes_ext[0] & 1)) { sprintf((char *)(& wq_name), "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); __lock_name = "\"%s\"wq_name"; tmp___8 = __alloc_workqueue_key("%s", 131082U, 1, & __key___6, __lock_name, (char *)(& wq_name)); ha->dpc_lp_wq = tmp___8; __init_work(& ha->idc_aen, 0); __constr_expr_1.counter = 137438953408L; ha->idc_aen.data = __constr_expr_1; lockdep_init_map(& ha->idc_aen.lockdep_map, "(&ha->idc_aen)", & __key___7, 0); INIT_LIST_HEAD(& ha->idc_aen.entry); ha->idc_aen.func = & qla83xx_service_idc_aen; sprintf((char *)(& wq_name), "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); __lock_name___0 = "\"%s\"wq_name"; tmp___9 = __alloc_workqueue_key("%s", 131082U, 1, & __key___8, __lock_name___0, (char *)(& wq_name)); ha->dpc_hp_wq = tmp___9; __init_work(& ha->nic_core_reset, 0); __constr_expr_2.counter = 137438953408L; ha->nic_core_reset.data = __constr_expr_2; lockdep_init_map(& ha->nic_core_reset.lockdep_map, "(&ha->nic_core_reset)", & __key___9, 0); INIT_LIST_HEAD(& ha->nic_core_reset.entry); ha->nic_core_reset.func = & qla83xx_nic_core_reset_work; __init_work(& ha->idc_state_handler, 0); __constr_expr_3.counter = 137438953408L; ha->idc_state_handler.data = __constr_expr_3; lockdep_init_map(& ha->idc_state_handler.lockdep_map, "(&ha->idc_state_handler)", & __key___10, 0); INIT_LIST_HEAD(& ha->idc_state_handler.entry); ha->idc_state_handler.func = & qla83xx_idc_state_handler_work; __init_work(& ha->nic_core_unrecoverable, 0); __constr_expr_4.counter = 137438953408L; ha->nic_core_unrecoverable.data = __constr_expr_4; lockdep_init_map(& ha->nic_core_unrecoverable.lockdep_map, "(&ha->nic_core_unrecoverable)", & __key___11, 0); INIT_LIST_HEAD(& ha->nic_core_unrecoverable.entry); ha->nic_core_unrecoverable.func = & qla83xx_nic_core_unrecoverable_work; } else { } skip_dpc: list_add_tail(& base_vha->list, & ha->vp_list); (base_vha->host)->irq = (ha->pdev)->irq; qla2x00_start_timer(base_vha, (void *)(& qla2x00_timer), 1UL); ql_dbg(1073741824U, base_vha, 239, "Started qla2x00_timer with interval=%d.\n", 1); ql_dbg(1073741824U, base_vha, 240, "Detected hba at address=%p.\n", ha); if ((ha->device_type & 33554432U) != 0U && ql2xenabledif != 0) { if (((int )ha->fw_attributes & 16) != 0) { prot = 0; base_vha->flags.difdix_supported = 1U; ql_dbg(1073741824U, base_vha, 241, "Registering for DIF/DIX type 1 and 3 protection.\n"); if (ql2xenabledif == 1) { prot = 8; } else { } scsi_host_set_prot(host, (unsigned int )(prot | 119)); guard = 1; if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) && ql2xenabledif > 1) { guard = guard | 2; } else { } scsi_host_set_guard(host, (int )((unsigned char )guard)); } else { base_vha->flags.difdix_supported = 0U; } } else { } (*((ha->isp_ops)->enable_intrs))(ha); if ((ha->device_type & 131072U) != 0U) { ret = qlafx00_fx_disc(base_vha, & (base_vha->hw)->mr.fcport, 1); host->sg_tablesize = (int )ha->mr.extended_io_enabled ? 1024U : 128U; } else { } ret = scsi_add_host(host, & pdev->dev); if (ret != 0) { goto probe_failed; } else { } base_vha->flags.init_done = 1U; base_vha->flags.online = 1U; ha->prev_minidump_failed = 0; ql_dbg(1073741824U, base_vha, 242, "Init done and hba is online.\n"); tmp___10 = qla_ini_mode_enabled(base_vha); if ((int )tmp___10) { scsi_scan_host(host); } else { ql_dbg(1073741824U, base_vha, 290, "skipping scsi_scan_host() for non-initiator port\n"); } qla2x00_alloc_sysfs_attr(base_vha); if ((ha->device_type & 131072U) != 0U) { ret = qlafx00_fx_disc(base_vha, & (base_vha->hw)->mr.fcport, 2); ret = qlafx00_fx_disc(base_vha, & (base_vha->hw)->mr.fcport, 153); } else { } qla2x00_init_host_attr(base_vha); qla2x00_dfs_setup(base_vha); ql_log(2U, base_vha, 251, "QLogic %s - %s.\n", (uint8_t *)(& ha->model_number), (char *)(& ha->model_desc)); tmp___11 = (*((ha->isp_ops)->fw_version_str))(base_vha, (char *)(& fw_str), 30UL); tmp___12 = pci_name((struct pci_dev const *)pdev); tmp___13 = (*((ha->isp_ops)->pci_info_str))(base_vha, (char *)(& pci_info)); ql_log(2U, base_vha, 252, "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", (int )pdev->device, tmp___13, tmp___12, *((unsigned long *)ha + 2UL) != 0UL ? 43 : 45, base_vha->host_no, tmp___11); qlt_add_target(ha, base_vha); clear_bit(2L, (unsigned long volatile *)(& base_vha->pci_flags)); return (0); probe_init_failed: qla2x00_free_req_que(ha, req); *(ha->req_q_map) = (struct req_que *)0; clear_bit(0L, (unsigned long volatile *)(& ha->req_qid_map)); qla2x00_free_rsp_que(ha, rsp); *(ha->rsp_q_map) = (struct rsp_que *)0; clear_bit(0L, (unsigned long volatile *)(& ha->rsp_qid_map)); tmp___14 = 0U; ha->max_rsp_queues = tmp___14; ha->max_req_queues = tmp___14; probe_failed: ; if (base_vha->timer_active != 0U) { qla2x00_stop_timer(base_vha); } else { } base_vha->flags.online = 0U; if ((unsigned long )ha->dpc_thread != (unsigned long )((struct task_struct *)0)) { t = ha->dpc_thread; ha->dpc_thread = (struct task_struct *)0; kthread_stop(t); } else { } qla2x00_free_device(base_vha); scsi_host_put(base_vha->host); probe_hw_failed: qla2x00_clear_drv_active(ha); iospace_config_failed: ; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { if (ha->nx_pcibase == 0UL) { iounmap((void volatile *)ha->nx_pcibase); } else { } if (ql2xdbwr == 0) { iounmap((void volatile *)ha->nxdb_wr_ptr); } else { } } else { if ((unsigned long )ha->iobase != (unsigned long )((device_reg_t *)0)) { iounmap((void volatile *)ha->iobase); } else { } if ((unsigned long )ha->cregbase != (unsigned long )((void *)0)) { iounmap((void volatile *)ha->cregbase); } else { } } pci_release_selected_regions(ha->pdev, ha->bars); kfree((void const *)ha); ha = (struct qla_hw_data *)0; probe_out: pci_disable_device(pdev); return (ret); } } static void qla2x00_shutdown(struct pci_dev *pdev ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; int tmp ; void *tmp___0 ; { tmp = atomic_read((atomic_t const *)(& pdev->enable_cnt)); if (tmp == 0) { return; } else { } tmp___0 = pci_get_drvdata(pdev); vha = (scsi_qla_host_t *)tmp___0; ha = vha->hw; if ((ha->device_type & 131072U) != 0U) { qlafx00_driver_shutdown(vha, 20); } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { qla2x00_disable_fce_trace(vha, (uint64_t *)0ULL, (uint64_t *)0ULL); ha->flags.fce_enabled = 0U; } else { } if ((unsigned long )ha->eft != (unsigned long )((void *)0)) { qla2x00_disable_eft_trace(vha); } else { } qla2x00_try_to_stop_firmware(vha); vha->flags.online = 0U; if ((unsigned int )ha->interrupts_on != 0U) { vha->flags.init_done = 0U; (*((ha->isp_ops)->disable_intrs))(ha); } else { } qla2x00_free_irqs(vha); qla2x00_free_fw_dump(ha); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); return; } } static void qla2x00_delete_all_vps(struct qla_hw_data *ha , scsi_qla_host_t *base_vha ) { struct Scsi_Host *scsi_host ; scsi_qla_host_t *vha ; unsigned long flags ; raw_spinlock_t *tmp ; long tmp___0 ; struct list_head const *__mptr ; { mutex_lock_nested(& ha->vport_lock, 0U); goto ldv_67308; ldv_67307: tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = ldv__builtin_expect((unsigned long )base_vha->list.next == (unsigned long )(& ha->vp_list), 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_os.c"), "i" (2984), "i" (12UL)); ldv_67304: ; goto ldv_67304; } else { } __mptr = (struct list_head const *)base_vha->list.next; vha = (scsi_qla_host_t *)__mptr; scsi_host = scsi_host_get(vha->host); spin_unlock_irqrestore(& ha->vport_slock, flags); mutex_unlock(& ha->vport_lock); fc_vport_terminate(vha->fc_vport); scsi_host_put(vha->host); mutex_lock_nested(& ha->vport_lock, 0U); ldv_67308: ; if (ha->cur_vport_count != 0) { goto ldv_67307; } else { } mutex_unlock(& ha->vport_lock); return; } } static void qla2x00_destroy_deferred_work(struct qla_hw_data *ha ) { struct task_struct *t ; { if ((unsigned long )ha->wq != (unsigned long )((struct workqueue_struct *)0)) { ldv_flush_workqueue_15(ha->wq); ldv_destroy_workqueue_16(ha->wq); ha->wq = (struct workqueue_struct *)0; } else { } if ((unsigned long )ha->dpc_lp_wq != (unsigned long )((struct workqueue_struct *)0)) { ldv_cancel_work_sync_17(& ha->idc_aen); ldv_destroy_workqueue_18(ha->dpc_lp_wq); ha->dpc_lp_wq = (struct workqueue_struct *)0; } else { } if ((unsigned long )ha->dpc_hp_wq != (unsigned long )((struct workqueue_struct *)0)) { ldv_cancel_work_sync_19(& ha->nic_core_reset); ldv_cancel_work_sync_20(& ha->idc_state_handler); ldv_cancel_work_sync_21(& ha->nic_core_unrecoverable); ldv_destroy_workqueue_22(ha->dpc_hp_wq); ha->dpc_hp_wq = (struct workqueue_struct *)0; } else { } if ((unsigned long )ha->dpc_thread != (unsigned long )((struct task_struct *)0)) { t = ha->dpc_thread; ha->dpc_thread = (struct task_struct *)0; kthread_stop(t); } else { } return; } } static void qla2x00_unmap_iobases(struct qla_hw_data *ha ) { { if ((ha->device_type & 16384U) != 0U) { iounmap((void volatile *)ha->nx_pcibase); if (ql2xdbwr == 0) { iounmap((void volatile *)ha->nxdb_wr_ptr); } else { } } else { if ((unsigned long )ha->iobase != (unsigned long )((device_reg_t *)0)) { iounmap((void volatile *)ha->iobase); } else { } if ((unsigned long )ha->cregbase != (unsigned long )((void *)0)) { iounmap((void volatile *)ha->cregbase); } else { } if ((unsigned long )ha->mqiobase != (unsigned long )((device_reg_t *)0)) { iounmap((void volatile *)ha->mqiobase); } else { } if ((((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) && (unsigned long )ha->msixbase != (unsigned long )((device_reg_t *)0)) { iounmap((void volatile *)ha->msixbase); } else { } } return; } } static void qla2x00_clear_drv_active(struct qla_hw_data *ha ) { { if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); qla8044_clear_drv_active(ha); qla8044_idc_unlock(ha); } else if ((ha->device_type & 16384U) != 0U) { qla82xx_idc_lock(ha); qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); } else { } return; } } static void qla2x00_remove_one(struct pci_dev *pdev ) { scsi_qla_host_t *base_vha ; struct qla_hw_data *ha ; void *tmp ; int tmp___0 ; int tmp___1 ; { tmp = pci_get_drvdata(pdev); base_vha = (scsi_qla_host_t *)tmp; ha = base_vha->hw; set_bit(1L, (unsigned long volatile *)(& base_vha->pci_flags)); ldv_cancel_work_sync_23(& ha->board_disable); tmp___0 = atomic_read((atomic_t const *)(& pdev->enable_cnt)); if (tmp___0 == 0) { scsi_host_put(base_vha->host); kfree((void const *)ha); pci_set_drvdata(pdev, (void *)0); return; } else { } qla2x00_wait_for_hba_ready(base_vha); set_bit(15L, (unsigned long volatile *)(& base_vha->dpc_flags)); if ((ha->device_type & 131072U) != 0U) { qlafx00_driver_shutdown(base_vha, 20); } else { } qla2x00_delete_all_vps(ha, base_vha); if ((ha->device_type & 65536U) != 0U) { ql_dbg(524288U, base_vha, 45182, "Clearing fcoe driver presence.\n"); tmp___1 = qla83xx_clear_drv_presence(base_vha); if (tmp___1 != 0) { ql_dbg(524288U, base_vha, 45177, "Error while clearing DRV-Presence.\n"); } else { } } else { } qla2x00_abort_all_cmds(base_vha, 65536); qla2x00_dfs_remove(base_vha); qla84xx_put_chip(base_vha); if ((ha->device_type & 32768U) != 0U) { qla83xx_disable_laser(base_vha); } else { } if (base_vha->timer_active != 0U) { qla2x00_stop_timer(base_vha); } else { } base_vha->flags.online = 0U; qla2x00_destroy_deferred_work(ha); qlt_remove_target(ha, base_vha); qla2x00_free_sysfs_attr(base_vha, 1); fc_remove_host(base_vha->host); ldv_scsi_remove_host_24(base_vha->host); qla2x00_free_device(base_vha); qla2x00_clear_drv_active(ha); scsi_host_put(base_vha->host); qla2x00_unmap_iobases(ha); pci_release_selected_regions(ha->pdev, ha->bars); kfree((void const *)ha); ha = (struct qla_hw_data *)0; pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); return; } } static void qla2x00_free_device(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; qla2x00_abort_all_cmds(vha, 65536); if (vha->timer_active != 0U) { qla2x00_stop_timer(vha); } else { } qla25xx_delete_queues(vha); if (*((unsigned long *)ha + 2UL) != 0UL) { qla2x00_disable_fce_trace(vha, (uint64_t *)0ULL, (uint64_t *)0ULL); } else { } if ((unsigned long )ha->eft != (unsigned long )((void *)0)) { qla2x00_disable_eft_trace(vha); } else { } qla2x00_try_to_stop_firmware(vha); vha->flags.online = 0U; if ((unsigned int )ha->interrupts_on != 0U) { vha->flags.init_done = 0U; (*((ha->isp_ops)->disable_intrs))(ha); } else { } qla2x00_free_irqs(vha); qla2x00_free_fcports(vha); qla2x00_mem_free(ha); qla82xx_md_free(vha); qla2x00_free_queues(ha); return; } } void qla2x00_free_fcports(struct scsi_qla_host *vha ) { fc_port_t *fcport ; fc_port_t *tfcport ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; __mptr___0 = (struct list_head const *)fcport->list.next; tfcport = (fc_port_t *)__mptr___0; goto ldv_67341; ldv_67340: list_del(& fcport->list); qla2x00_clear_loop_id(fcport); kfree((void const *)fcport); fcport = (fc_port_t *)0; fcport = tfcport; __mptr___1 = (struct list_head const *)tfcport->list.next; tfcport = (fc_port_t *)__mptr___1; ldv_67341: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_67340; } else { } return; } } __inline static void qla2x00_schedule_rport_del(struct scsi_qla_host *vha , fc_port_t *fcport , int defer ) { struct fc_rport *rport ; scsi_qla_host_t *base_vha ; unsigned long flags ; void *tmp ; raw_spinlock_t *tmp___0 ; { if ((unsigned long )fcport->rport == (unsigned long )((struct fc_rport *)0)) { return; } else { } rport = fcport->rport; if (defer != 0) { tmp = pci_get_drvdata((vha->hw)->pdev); base_vha = (scsi_qla_host_t *)tmp; tmp___0 = spinlock_check((vha->host)->host_lock); flags = _raw_spin_lock_irqsave(tmp___0); fcport->drport = rport; spin_unlock_irqrestore((vha->host)->host_lock, flags); set_bit(13L, (unsigned long volatile *)(& base_vha->dpc_flags)); qla2xxx_wake_dpc(base_vha); } else { fc_remote_port_delete(rport); qlt_fc_port_deleted(vha, fcport); } return; } } void qla2x00_mark_device_lost(scsi_qla_host_t *vha , fc_port_t *fcport , int do_login , int defer ) { int tmp ; int tmp___0 ; { if (((vha->hw)->device_type & 131072U) != 0U) { qla2x00_set_fcport_state(fcport, 3); qla2x00_schedule_rport_del(vha, fcport, defer); return; } else { } tmp = atomic_read((atomic_t const *)(& fcport->state)); if (tmp == 4 && (int )vha->vp_idx == (int )(fcport->vha)->vp_idx) { qla2x00_set_fcport_state(fcport, 3); qla2x00_schedule_rport_del(vha, fcport, defer); } else { } tmp___0 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___0 != 2) { qla2x00_set_fcport_state(fcport, 3); } else { } if (do_login == 0) { return; } else { } if (fcport->login_retry == 0) { fcport->login_retry = (int )(vha->hw)->login_retry_count; set_bit(8L, (unsigned long volatile *)(& vha->dpc_flags)); ql_dbg(268435456U, vha, 8295, "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n", (uint8_t *)(& fcport->port_name), (int )fcport->loop_id, fcport->login_retry); } else { } return; } } void qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha , int defer ) { fc_port_t *fcport ; struct list_head const *__mptr ; int tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_67371; ldv_67370: ; if ((unsigned int )vha->vp_idx != 0U && (int )vha->vp_idx != (int )(fcport->vha)->vp_idx) { goto ldv_67369; } else { } tmp = atomic_read((atomic_t const *)(& fcport->state)); if (tmp == 2) { goto ldv_67369; } else { } tmp___0 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___0 == 4) { qla2x00_set_fcport_state(fcport, 3); if (defer != 0) { qla2x00_schedule_rport_del(vha, fcport, defer); } else if ((int )vha->vp_idx == (int )(fcport->vha)->vp_idx) { qla2x00_schedule_rport_del(vha, fcport, defer); } else { } } else { } ldv_67369: __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_67371: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_67370; } else { } return; } } static int qla2x00_mem_alloc(struct qla_hw_data *ha , uint16_t req_len , uint16_t rsp_len , struct req_que **req , struct rsp_que **rsp ) { char name[16U] ; void *tmp ; int tmp___0 ; int tmp___1 ; void *tmp___2 ; void *tmp___3 ; void *tmp___4 ; void *tmp___5 ; void *tmp___6 ; void *tmp___7 ; void *tmp___8 ; void *tmp___9 ; void *tmp___10 ; void *tmp___11 ; int tmp___12 ; { tmp = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )ha->init_cb_size, & ha->init_cb_dma, 208U, (struct dma_attrs *)0); ha->init_cb = (init_cb_t *)tmp; if ((unsigned long )ha->init_cb == (unsigned long )((init_cb_t *)0)) { goto fail; } else { } tmp___0 = qlt_mem_alloc(ha); if (tmp___0 < 0) { goto fail_free_init_cb; } else { } tmp___1 = qla2x00_gid_list_size(ha); tmp___2 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )tmp___1, & ha->gid_list_dma, 208U, (struct dma_attrs *)0); ha->gid_list = (struct gid_list_info *)tmp___2; if ((unsigned long )ha->gid_list == (unsigned long )((struct gid_list_info *)0)) { goto fail_free_tgt_mem; } else { } ha->srb_mempool = mempool_create_slab_pool(128, srb_cachep); if ((unsigned long )ha->srb_mempool == (unsigned long )((mempool_t *)0)) { goto fail_free_gid_list; } else { } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { if ((unsigned long )ctx_cachep == (unsigned long )((struct kmem_cache *)0)) { ctx_cachep = kmem_cache_create("qla2xxx_ctx", 48UL, 0UL, 8192UL, (void (*)(void * ))0); if ((unsigned long )ctx_cachep == (unsigned long )((struct kmem_cache *)0)) { goto fail_free_gid_list; } else { } } else { } ha->ctx_mempool = mempool_create_slab_pool(128, ctx_cachep); if ((unsigned long )ha->ctx_mempool == (unsigned long )((mempool_t *)0)) { goto fail_free_srb_mempool; } else { } ql_dbg_pci(1073741824U, ha->pdev, 33, "ctx_cachep=%p ctx_mempool=%p.\n", ctx_cachep, ha->ctx_mempool); } else { } ha->nvram = kzalloc(4096UL, 208U); if ((unsigned long )ha->nvram == (unsigned long )((void *)0)) { goto fail_free_ctx_mempool; } else { } snprintf((char *)(& name), 16UL, "%s_%d", (char *)"qla2xxx", (int )(ha->pdev)->device); ha->s_dma_pool = dma_pool_create((char const *)(& name), & (ha->pdev)->dev, 256UL, 8UL, 0UL); if ((unsigned long )ha->s_dma_pool == (unsigned long )((struct dma_pool *)0)) { goto fail_free_nvram; } else { } ql_dbg_pci(1073741824U, ha->pdev, 34, "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); if (((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) || ql2xenabledif != 0) { ha->dl_dma_pool = dma_pool_create((char const *)(& name), & (ha->pdev)->dev, 512UL, 8UL, 0UL); if ((unsigned long )ha->dl_dma_pool == (unsigned long )((struct dma_pool *)0)) { ql_log_pci(0U, ha->pdev, 35, "Failed to allocate memory for dl_dma_pool.\n"); goto fail_s_dma_pool; } else { } ha->fcp_cmnd_dma_pool = dma_pool_create((char const *)(& name), & (ha->pdev)->dev, 512UL, 8UL, 0UL); if ((unsigned long )ha->fcp_cmnd_dma_pool == (unsigned long )((struct dma_pool *)0)) { ql_log_pci(0U, ha->pdev, 36, "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); goto fail_dl_dma_pool; } else { } ql_dbg_pci(1073741824U, ha->pdev, 37, "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n", ha->dl_dma_pool, ha->fcp_cmnd_dma_pool); } else { } if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp___3 = dma_alloc_attrs(& (ha->pdev)->dev, 2064UL, & ha->sns_cmd_dma, 208U, (struct dma_attrs *)0); ha->sns_cmd = (struct sns_cmd_pkt *)tmp___3; if ((unsigned long )ha->sns_cmd == (unsigned long )((struct sns_cmd_pkt *)0)) { goto fail_dma_pool; } else { } ql_dbg_pci(1073741824U, ha->pdev, 38, "sns_cmd: %p.\n", ha->sns_cmd); } else { tmp___4 = dma_pool_alloc(ha->s_dma_pool, 208U, & ha->ms_iocb_dma); ha->ms_iocb = (ms_iocb_entry_t *)tmp___4; if ((unsigned long )ha->ms_iocb == (unsigned long )((ms_iocb_entry_t *)0)) { goto fail_dma_pool; } else { } tmp___5 = dma_alloc_attrs(& (ha->pdev)->dev, 8208UL, & ha->ct_sns_dma, 208U, (struct dma_attrs *)0); ha->ct_sns = (struct ct_sns_pkt *)tmp___5; if ((unsigned long )ha->ct_sns == (unsigned long )((struct ct_sns_pkt *)0)) { goto fail_free_ms_iocb; } else { } ql_dbg_pci(1073741824U, ha->pdev, 39, "ms_iocb=%p ct_sns=%p.\n", ha->ms_iocb, ha->ct_sns); } tmp___6 = kzalloc(192UL, 208U); *req = (struct req_que *)tmp___6; if ((unsigned long )*req == (unsigned long )((struct req_que *)0)) { ql_log_pci(0U, ha->pdev, 40, "Failed to allocate memory for req.\n"); goto fail_req; } else { } (*req)->length = req_len; tmp___7 = dma_alloc_attrs(& (ha->pdev)->dev, (unsigned long )((int )(*req)->length + 1) * 64UL, & (*req)->dma, 208U, (struct dma_attrs *)0); (*req)->ring = (request_t *)tmp___7; if ((unsigned long )(*req)->ring == (unsigned long )((request_t *)0)) { ql_log_pci(0U, ha->pdev, 41, "Failed to allocate memory for req_ring.\n"); goto fail_req_ring; } else { } tmp___8 = kzalloc(272UL, 208U); *rsp = (struct rsp_que *)tmp___8; if ((unsigned long )*rsp == (unsigned long )((struct rsp_que *)0)) { ql_log_pci(0U, ha->pdev, 42, "Failed to allocate memory for rsp.\n"); goto fail_rsp; } else { } (*rsp)->hw = ha; (*rsp)->length = rsp_len; tmp___9 = dma_alloc_attrs(& (ha->pdev)->dev, (unsigned long )((int )(*rsp)->length + 1) * 64UL, & (*rsp)->dma, 208U, (struct dma_attrs *)0); (*rsp)->ring = (response_t *)tmp___9; if ((unsigned long )(*rsp)->ring == (unsigned long )((response_t *)0)) { ql_log_pci(0U, ha->pdev, 43, "Failed to allocate memory for rsp_ring.\n"); goto fail_rsp_ring; } else { } (*req)->rsp = *rsp; (*rsp)->req = *req; ql_dbg_pci(1073741824U, ha->pdev, 44, "req=%p req->length=%d req->ring=%p rsp=%p rsp->length=%d rsp->ring=%p.\n", *req, (int )(*req)->length, (*req)->ring, *rsp, (int )(*rsp)->length, (*rsp)->ring); if ((unsigned int )ha->nvram_npiv_size != 0U) { tmp___10 = kzalloc((unsigned long )ha->nvram_npiv_size * 24UL, 208U); ha->npiv_info = (struct qla_npiv_entry *)tmp___10; if ((unsigned long )ha->npiv_info == (unsigned long )((struct qla_npiv_entry *)0)) { ql_log_pci(0U, ha->pdev, 45, "Failed to allocate memory for npiv_info.\n"); goto fail_npiv_info; } else { } } else { ha->npiv_info = (struct qla_npiv_entry *)0; } if ((((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) || (ha->device_type & 32768U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { tmp___11 = dma_pool_alloc(ha->s_dma_pool, 208U, & ha->ex_init_cb_dma); ha->ex_init_cb = (struct ex_init_cb_81xx *)tmp___11; if ((unsigned long )ha->ex_init_cb == (unsigned long )((struct ex_init_cb_81xx *)0)) { goto fail_ex_init_cb; } else { } ql_dbg_pci(1073741824U, ha->pdev, 46, "ex_init_cb=%p.\n", ha->ex_init_cb); } else { } INIT_LIST_HEAD(& ha->gbl_dsd_list); if ((ha->device_type & 134217728U) == 0U) { ha->async_pd = dma_pool_alloc(ha->s_dma_pool, 208U, & ha->async_pd_dma); if ((unsigned long )ha->async_pd == (unsigned long )((void *)0)) { goto fail_async_pd; } else { } ql_dbg_pci(1073741824U, ha->pdev, 47, "async_pd=%p.\n", ha->async_pd); } else { } INIT_LIST_HEAD(& ha->vp_list); ha->loop_id_map = kzalloc((((unsigned long )ha->max_fibre_devices + 63UL) / 64UL) * 8UL, 208U); if ((unsigned long )ha->loop_id_map == (unsigned long )((void *)0)) { goto fail_async_pd; } else { qla2x00_set_reserved_loop_ids(ha); ql_dbg_pci(1073741824U, ha->pdev, 291, "loop_id_map=%p.\n", ha->loop_id_map); } return (0); fail_async_pd: dma_pool_free(ha->s_dma_pool, (void *)ha->ex_init_cb, ha->ex_init_cb_dma); fail_ex_init_cb: kfree((void const *)ha->npiv_info); fail_npiv_info: dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )(*rsp)->length + 1) * 64UL, (void *)(*rsp)->ring, (*rsp)->dma, (struct dma_attrs *)0); (*rsp)->ring = (response_t *)0; (*rsp)->dma = 0ULL; fail_rsp_ring: kfree((void const *)*rsp); fail_rsp: dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )(*req)->length + 1) * 64UL, (void *)(*req)->ring, (*req)->dma, (struct dma_attrs *)0); (*req)->ring = (request_t *)0; (*req)->dma = 0ULL; fail_req_ring: kfree((void const *)*req); fail_req: dma_free_attrs(& (ha->pdev)->dev, 8208UL, (void *)ha->ct_sns, ha->ct_sns_dma, (struct dma_attrs *)0); ha->ct_sns = (struct ct_sns_pkt *)0; ha->ct_sns_dma = 0ULL; fail_free_ms_iocb: dma_pool_free(ha->s_dma_pool, (void *)ha->ms_iocb, ha->ms_iocb_dma); ha->ms_iocb = (ms_iocb_entry_t *)0; ha->ms_iocb_dma = 0ULL; fail_dma_pool: ; if ((ha->device_type & 16384U) != 0U || ql2xenabledif != 0) { dma_pool_destroy(ha->fcp_cmnd_dma_pool); ha->fcp_cmnd_dma_pool = (struct dma_pool *)0; } else { } fail_dl_dma_pool: ; if ((ha->device_type & 16384U) != 0U || ql2xenabledif != 0) { dma_pool_destroy(ha->dl_dma_pool); ha->dl_dma_pool = (struct dma_pool *)0; } else { } fail_s_dma_pool: dma_pool_destroy(ha->s_dma_pool); ha->s_dma_pool = (struct dma_pool *)0; fail_free_nvram: kfree((void const *)ha->nvram); ha->nvram = (void *)0; fail_free_ctx_mempool: mempool_destroy(ha->ctx_mempool); ha->ctx_mempool = (mempool_t *)0; fail_free_srb_mempool: mempool_destroy(ha->srb_mempool); ha->srb_mempool = (mempool_t *)0; fail_free_gid_list: tmp___12 = qla2x00_gid_list_size(ha); dma_free_attrs(& (ha->pdev)->dev, (size_t )tmp___12, (void *)ha->gid_list, ha->gid_list_dma, (struct dma_attrs *)0); ha->gid_list = (struct gid_list_info *)0; ha->gid_list_dma = 0ULL; fail_free_tgt_mem: qlt_mem_free(ha); fail_free_init_cb: dma_free_attrs(& (ha->pdev)->dev, (size_t )ha->init_cb_size, (void *)ha->init_cb, ha->init_cb_dma, (struct dma_attrs *)0); ha->init_cb = (init_cb_t *)0; ha->init_cb_dma = 0ULL; fail: ql_log(0U, (scsi_qla_host_t *)0, 48, "Memory allocation failure.\n"); return (-12); } } static void qla2x00_free_fw_dump(struct qla_hw_data *ha ) { { if ((unsigned long )ha->fce != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 65536UL, ha->fce, ha->fce_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->eft != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 65536UL, ha->eft, ha->eft_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->fw_dump != (unsigned long )((struct qla2xxx_fw_dump *)0)) { vfree((void const *)ha->fw_dump); } else { } if ((unsigned long )ha->fw_dump_template != (unsigned long )((void *)0)) { vfree((void const *)ha->fw_dump_template); } else { } ha->fce = (void *)0; ha->fce_dma = 0ULL; ha->eft = (void *)0; ha->eft_dma = 0ULL; ha->fw_dumped = 0; ha->fw_dump_cap_flags = 0UL; ha->fw_dump_reading = 0; ha->fw_dump = (struct qla2xxx_fw_dump *)0; ha->fw_dump_len = 0U; ha->fw_dump_template = (void *)0; ha->fw_dump_template_len = 0U; return; } } static void qla2x00_mem_free(struct qla_hw_data *ha ) { int tmp ; struct dsd_dma *dsd_ptr ; struct dsd_dma *tdsd_ptr ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int tmp___0 ; { qla2x00_free_fw_dump(ha); if ((unsigned long )ha->mctp_dump != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 548964UL, ha->mctp_dump, ha->mctp_dump_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->srb_mempool != (unsigned long )((mempool_t *)0)) { mempool_destroy(ha->srb_mempool); } else { } if ((unsigned long )ha->dcbx_tlv != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 4096UL, ha->dcbx_tlv, ha->dcbx_tlv_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->xgmac_data != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 4096UL, ha->xgmac_data, ha->xgmac_data_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->sns_cmd != (unsigned long )((struct sns_cmd_pkt *)0)) { dma_free_attrs(& (ha->pdev)->dev, 2064UL, (void *)ha->sns_cmd, ha->sns_cmd_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->ct_sns != (unsigned long )((struct ct_sns_pkt *)0)) { dma_free_attrs(& (ha->pdev)->dev, 8208UL, (void *)ha->ct_sns, ha->ct_sns_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->sfp_data != (unsigned long )((void *)0)) { dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); } else { } if ((unsigned long )ha->ms_iocb != (unsigned long )((ms_iocb_entry_t *)0)) { dma_pool_free(ha->s_dma_pool, (void *)ha->ms_iocb, ha->ms_iocb_dma); } else { } if ((unsigned long )ha->ex_init_cb != (unsigned long )((struct ex_init_cb_81xx *)0)) { dma_pool_free(ha->s_dma_pool, (void *)ha->ex_init_cb, ha->ex_init_cb_dma); } else { } if ((unsigned long )ha->async_pd != (unsigned long )((void *)0)) { dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); } else { } if ((unsigned long )ha->s_dma_pool != (unsigned long )((struct dma_pool *)0)) { dma_pool_destroy(ha->s_dma_pool); } else { } if ((unsigned long )ha->gid_list != (unsigned long )((struct gid_list_info *)0)) { tmp = qla2x00_gid_list_size(ha); dma_free_attrs(& (ha->pdev)->dev, (size_t )tmp, (void *)ha->gid_list, ha->gid_list_dma, (struct dma_attrs *)0); } else { } if ((ha->device_type & 16384U) != 0U) { tmp___0 = list_empty((struct list_head const *)(& ha->gbl_dsd_list)); if (tmp___0 == 0) { __mptr = (struct list_head const *)ha->gbl_dsd_list.next; dsd_ptr = (struct dsd_dma *)__mptr; __mptr___0 = (struct list_head const *)dsd_ptr->list.next; tdsd_ptr = (struct dsd_dma *)__mptr___0; goto ldv_67414; ldv_67413: dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); list_del(& dsd_ptr->list); kfree((void const *)dsd_ptr); dsd_ptr = tdsd_ptr; __mptr___1 = (struct list_head const *)tdsd_ptr->list.next; tdsd_ptr = (struct dsd_dma *)__mptr___1; ldv_67414: ; if ((unsigned long )(& dsd_ptr->list) != (unsigned long )(& ha->gbl_dsd_list)) { goto ldv_67413; } else { } } else { } } else { } if ((unsigned long )ha->dl_dma_pool != (unsigned long )((struct dma_pool *)0)) { dma_pool_destroy(ha->dl_dma_pool); } else { } if ((unsigned long )ha->fcp_cmnd_dma_pool != (unsigned long )((struct dma_pool *)0)) { dma_pool_destroy(ha->fcp_cmnd_dma_pool); } else { } if ((unsigned long )ha->ctx_mempool != (unsigned long )((mempool_t *)0)) { mempool_destroy(ha->ctx_mempool); } else { } qlt_mem_free(ha); if ((unsigned long )ha->init_cb != (unsigned long )((init_cb_t *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )ha->init_cb_size, (void *)ha->init_cb, ha->init_cb_dma, (struct dma_attrs *)0); } else { } vfree((void const *)ha->optrom_buffer); kfree((void const *)ha->nvram); kfree((void const *)ha->npiv_info); kfree((void const *)ha->swl); kfree((void const *)ha->loop_id_map); ha->srb_mempool = (mempool_t *)0; ha->ctx_mempool = (mempool_t *)0; ha->sns_cmd = (struct sns_cmd_pkt *)0; ha->sns_cmd_dma = 0ULL; ha->ct_sns = (struct ct_sns_pkt *)0; ha->ct_sns_dma = 0ULL; ha->ms_iocb = (ms_iocb_entry_t *)0; ha->ms_iocb_dma = 0ULL; ha->init_cb = (init_cb_t *)0; ha->init_cb_dma = 0ULL; ha->ex_init_cb = (struct ex_init_cb_81xx *)0; ha->ex_init_cb_dma = 0ULL; ha->async_pd = (void *)0; ha->async_pd_dma = 0ULL; ha->s_dma_pool = (struct dma_pool *)0; ha->dl_dma_pool = (struct dma_pool *)0; ha->fcp_cmnd_dma_pool = (struct dma_pool *)0; ha->gid_list = (struct gid_list_info *)0; ha->gid_list_dma = 0ULL; ha->tgt.atio_ring = (struct atio *)0; ha->tgt.atio_dma = 0ULL; ha->tgt.tgt_vp_map = (struct qla_tgt_vp_map *)0; return; } } struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht , struct qla_hw_data *ha ) { struct Scsi_Host *host ; struct scsi_qla_host *vha ; void *tmp ; struct lock_class_key __key ; char const *tmp___0 ; { vha = (struct scsi_qla_host *)0; host = ldv_scsi_host_alloc_25(sht, 1360); if ((unsigned long )host == (unsigned long )((struct Scsi_Host *)0)) { ql_log_pci(0U, ha->pdev, 263, "Failed to allocate host from the scsi layer, aborting.\n"); goto fail; } else { } tmp = shost_priv(host); vha = (struct scsi_qla_host *)tmp; memset((void *)vha, 0, 1360UL); vha->host = host; vha->host_no = (unsigned long )host->host_no; vha->hw = ha; INIT_LIST_HEAD(& vha->vp_fcports); INIT_LIST_HEAD(& vha->work_list); INIT_LIST_HEAD(& vha->list); spinlock_check(& vha->work_lock); __raw_spin_lock_init(& vha->work_lock.__annonCompField18.rlock, "&(&vha->work_lock)->rlock", & __key); sprintf((char *)(& vha->host_str), "%s_%ld", (char *)"qla2xxx", vha->host_no); tmp___0 = dev_name((struct device const *)(& (ha->pdev)->dev)); ql_dbg(1073741824U, vha, 65, "Allocated the host=%p hw=%p vha=%p dev_name=%s", vha->host, vha->hw, vha, tmp___0); return (vha); fail: ; return (vha); } } static struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *vha , enum qla_work_type type ) { struct qla_work_evt *e ; uint8_t bail ; void *tmp ; { atomic_inc(& vha->vref_count); __asm__ volatile ("mfence": : : "memory"); if (*((unsigned long *)vha + 19UL) != 0UL) { atomic_dec(& vha->vref_count); bail = 1U; } else { bail = 0U; } if ((unsigned int )bail != 0U) { return ((struct qla_work_evt *)0); } else { } tmp = kzalloc(64UL, 32U); e = (struct qla_work_evt *)tmp; if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { atomic_dec(& vha->vref_count); return ((struct qla_work_evt *)0); } else { } INIT_LIST_HEAD(& e->list); e->type = type; e->flags = 1U; return (e); } } static int qla2x00_post_work(struct scsi_qla_host *vha , struct qla_work_evt *e ) { unsigned long flags ; raw_spinlock_t *tmp ; { tmp = spinlock_check(& vha->work_lock); flags = _raw_spin_lock_irqsave(tmp); list_add_tail(& e->list, & vha->work_list); spin_unlock_irqrestore(& vha->work_lock, flags); qla2xxx_wake_dpc(vha); return (0); } } int qla2x00_post_aen_work(struct scsi_qla_host *vha , enum fc_host_event_code code , u32 data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 0); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.aen.code = code; e->u.aen.data = data; tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_idc_ack_work(struct scsi_qla_host *vha , uint16_t *mb ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 1); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } memcpy((void *)(& e->u.idc_ack.mb), (void const *)mb, 14UL); tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_async_login_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 2); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.logio.fcport = fcport; if ((unsigned long )data != (unsigned long )((uint16_t *)0U)) { e->u.logio.data[0] = *data; e->u.logio.data[1] = *(data + 1UL); } else { } tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_async_login_done_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 3); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.logio.fcport = fcport; if ((unsigned long )data != (unsigned long )((uint16_t *)0U)) { e->u.logio.data[0] = *data; e->u.logio.data[1] = *(data + 1UL); } else { } tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_async_logout_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 4); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.logio.fcport = fcport; if ((unsigned long )data != (unsigned long )((uint16_t *)0U)) { e->u.logio.data[0] = *data; e->u.logio.data[1] = *(data + 1UL); } else { } tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_async_logout_done_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 5); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.logio.fcport = fcport; if ((unsigned long )data != (unsigned long )((uint16_t *)0U)) { e->u.logio.data[0] = *data; e->u.logio.data[1] = *(data + 1UL); } else { } tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_async_adisc_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 6); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.logio.fcport = fcport; if ((unsigned long )data != (unsigned long )((uint16_t *)0U)) { e->u.logio.data[0] = *data; e->u.logio.data[1] = *(data + 1UL); } else { } tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 7); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.logio.fcport = fcport; if ((unsigned long )data != (unsigned long )((uint16_t *)0U)) { e->u.logio.data[0] = *data; e->u.logio.data[1] = *(data + 1UL); } else { } tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_uevent_work(struct scsi_qla_host *vha , u32 code ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 8); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.uevent.code = code; tmp = qla2x00_post_work(vha, e); return (tmp); } } static void qla2x00_uevent_emit(struct scsi_qla_host *vha , u32 code ) { char event_string[40U] ; char *envp[2U] ; { envp[0] = (char *)(& event_string); envp[1] = (char *)0; switch (code) { case 0U: snprintf((char *)(& event_string), 40UL, "FW_DUMP=%ld", vha->host_no); goto ldv_67497; default: ; goto ldv_67497; } ldv_67497: kobject_uevent_env(& ((vha->hw)->pdev)->dev.kobj, 2, (char **)(& envp)); return; } } int qlafx00_post_aenfx_work(struct scsi_qla_host *vha , uint32_t evtcode , uint32_t *data , int cnt ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 9); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.aenfx.evtcode = evtcode; e->u.aenfx.count = (uint32_t )cnt; memcpy((void *)(& e->u.aenfx.mbx), (void const *)data, (unsigned long )cnt * 4UL); tmp = qla2x00_post_work(vha, e); return (tmp); } } void qla2x00_do_work(struct scsi_qla_host *vha ) { struct qla_work_evt *e ; struct qla_work_evt *tmp ; unsigned long flags ; struct list_head work ; raw_spinlock_t *tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; u32 tmp___1 ; struct list_head const *__mptr___1 ; { work.next = & work; work.prev = & work; tmp___0 = spinlock_check(& vha->work_lock); flags = _raw_spin_lock_irqsave(tmp___0); list_splice_init(& vha->work_list, & work); spin_unlock_irqrestore(& vha->work_lock, flags); __mptr = (struct list_head const *)work.next; e = (struct qla_work_evt *)__mptr; __mptr___0 = (struct list_head const *)e->list.next; tmp = (struct qla_work_evt *)__mptr___0; goto ldv_67534; ldv_67533: list_del_init(& e->list); switch ((unsigned int )e->type) { case 0U: tmp___1 = fc_get_event_number(); fc_host_post_event(vha->host, tmp___1, e->u.aen.code, e->u.aen.data); goto ldv_67523; case 1U: qla81xx_idc_ack(vha, (uint16_t *)(& e->u.idc_ack.mb)); goto ldv_67523; case 2U: qla2x00_async_login(vha, e->u.logio.fcport, (uint16_t *)(& e->u.logio.data)); goto ldv_67523; case 3U: qla2x00_async_login_done(vha, e->u.logio.fcport, (uint16_t *)(& e->u.logio.data)); goto ldv_67523; case 4U: qla2x00_async_logout(vha, e->u.logio.fcport); goto ldv_67523; case 5U: qla2x00_async_logout_done(vha, e->u.logio.fcport, (uint16_t *)(& e->u.logio.data)); goto ldv_67523; case 6U: qla2x00_async_adisc(vha, e->u.logio.fcport, (uint16_t *)(& e->u.logio.data)); goto ldv_67523; case 7U: qla2x00_async_adisc_done(vha, e->u.logio.fcport, (uint16_t *)(& e->u.logio.data)); goto ldv_67523; case 8U: qla2x00_uevent_emit(vha, e->u.uevent.code); goto ldv_67523; case 9U: qlafx00_process_aen(vha, e); goto ldv_67523; } ldv_67523: ; if ((int )e->flags & 1) { kfree((void const *)e); } else { } atomic_dec(& vha->vref_count); e = tmp; __mptr___1 = (struct list_head const *)tmp->list.next; tmp = (struct qla_work_evt *)__mptr___1; ldv_67534: ; if ((unsigned long )(& e->list) != (unsigned long )(& work)) { goto ldv_67533; } else { } return; } } void qla2x00_relogin(struct scsi_qla_host *vha ) { fc_port_t *fcport ; int status ; uint16_t next_loopid ; struct qla_hw_data *ha ; uint16_t data[2U] ; struct list_head const *__mptr ; int status2 ; uint8_t opts ; int tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; { next_loopid = 0U; ha = vha->hw; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_67553; ldv_67552: tmp = atomic_read((atomic_t const *)(& fcport->state)); if ((tmp != 4 && fcport->login_retry != 0) && (fcport->flags & 8U) == 0U) { fcport->login_retry = fcport->login_retry - 1; if ((int )fcport->flags & 1) { if ((fcport->flags & 4U) != 0U) { (*((ha->isp_ops)->fabric_logout))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } if ((unsigned int )fcport->loop_id == 4096U) { next_loopid = ha->min_external_loopid; fcport->loop_id = next_loopid; status = qla2x00_find_new_loop_id(vha, fcport); if (status != 0) { goto ldv_67548; } else { } } else { } if ((((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) || (ha->device_type & 134217728U) != 0U) { fcport->flags = fcport->flags | 8U; data[0] = 0U; data[1] = 1U; status = qla2x00_post_async_login_work(vha, fcport, (uint16_t *)(& data)); if (status == 0) { goto ldv_67549; } else { } status = 1; } else { status = qla2x00_fabric_login(vha, fcport, & next_loopid); if (status == 0) { opts = 0U; if ((fcport->flags & 4U) != 0U) { opts = (uint8_t )((unsigned int )opts | 2U); } else { } status2 = qla2x00_get_port_database(vha, fcport, (int )opts); if (status2 != 0) { status = 1; } else { } } else { } } } else { status = qla2x00_local_device_login(vha, fcport); } if (status == 0) { fcport->old_loop_id = fcport->loop_id; ql_dbg(268435456U, vha, 8195, "Port login OK: logged in ID 0x%x.\n", (int )fcport->loop_id); qla2x00_update_fcport(vha, fcport); } else if (status == 1) { set_bit(8L, (unsigned long volatile *)(& vha->dpc_flags)); ql_dbg(268435456U, vha, 8199, "Retrying %d login again loop_id 0x%x.\n", fcport->login_retry, (int )fcport->loop_id); } else { fcport->login_retry = 0; } if (fcport->login_retry == 0 && status != 0) { qla2x00_clear_loop_id(fcport); } else { } } else { } tmp___0 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { goto ldv_67548; } else { } ldv_67549: __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_67553: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_67552; } else { } ldv_67548: ; return; } } void qla83xx_schedule_work(scsi_qla_host_t *base_vha , int work_code ) { struct qla_hw_data *ha ; { ha = base_vha->hw; switch (work_code) { case 33280: ; if ((unsigned long )ha->dpc_lp_wq != (unsigned long )((struct workqueue_struct *)0)) { queue_work(ha->dpc_lp_wq, & ha->idc_aen); } else { } goto ldv_67560; case 1: ; if (*((unsigned long *)ha + 2UL) == 0UL) { if ((unsigned long )ha->dpc_hp_wq != (unsigned long )((struct workqueue_struct *)0)) { queue_work(ha->dpc_hp_wq, & ha->nic_core_reset); } else { } } else { ql_dbg(524288U, base_vha, 45150, "NIC Core reset is already active. Skip scheduling it again.\n"); } goto ldv_67560; case 2: ; if ((unsigned long )ha->dpc_hp_wq != (unsigned long )((struct workqueue_struct *)0)) { queue_work(ha->dpc_hp_wq, & ha->idc_state_handler); } else { } goto ldv_67560; case 3: ; if ((unsigned long )ha->dpc_hp_wq != (unsigned long )((struct workqueue_struct *)0)) { queue_work(ha->dpc_hp_wq, & ha->nic_core_unrecoverable); } else { } goto ldv_67560; default: ql_log(1U, base_vha, 45151, "Unknown work-code=0x%x.\n", work_code); } ldv_67560: ; return; } } void qla83xx_nic_core_unrecoverable_work(struct work_struct *work ) { struct qla_hw_data *ha ; struct work_struct const *__mptr ; scsi_qla_host_t *base_vha ; void *tmp ; uint32_t dev_state ; { __mptr = (struct work_struct const *)work; ha = (struct qla_hw_data *)__mptr + 0xfffffffffffff320UL; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; dev_state = 0U; qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, 571483012U, & dev_state); qla83xx_reset_ownership(base_vha); if (*((unsigned long *)ha + 2UL) != 0UL) { ha->flags.nic_core_reset_owner = 0U; qla83xx_wr_reg(base_vha, 571483012U, 6U); ql_log(2U, base_vha, 45152, "HW State: FAILED.\n"); qla83xx_schedule_work(base_vha, 2); } else { } qla83xx_idc_unlock(base_vha, 0); return; } } void qla83xx_idc_state_handler_work(struct work_struct *work ) { struct qla_hw_data *ha ; struct work_struct const *__mptr ; scsi_qla_host_t *base_vha ; void *tmp ; uint32_t dev_state ; { __mptr = (struct work_struct const *)work; ha = (struct qla_hw_data *)__mptr + 0xfffffffffffff370UL; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; dev_state = 0U; qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, 571483012U, & dev_state); if (dev_state == 6U || dev_state == 5U) { qla83xx_idc_state_handler(base_vha); } else { } qla83xx_idc_unlock(base_vha, 0); return; } } static int qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha ) { int rval ; unsigned long heart_beat_wait ; uint32_t heart_beat_counter1 ; uint32_t heart_beat_counter2 ; { rval = 0; heart_beat_wait = (unsigned long )jiffies + 250UL; ldv_67595: ; if ((long )(heart_beat_wait - (unsigned long )jiffies) < 0L) { ql_dbg(524288U, base_vha, 45180, "Nic Core f/w is not alive.\n"); rval = 258; goto ldv_67594; } else { } qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, 571482288U, & heart_beat_counter1); qla83xx_idc_unlock(base_vha, 0); msleep(100U); qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, 571482288U, & heart_beat_counter2); qla83xx_idc_unlock(base_vha, 0); if (heart_beat_counter1 == heart_beat_counter2) { goto ldv_67595; } else { } ldv_67594: ; return (rval); } } void qla83xx_nic_core_reset_work(struct work_struct *work ) { struct qla_hw_data *ha ; struct work_struct const *__mptr ; scsi_qla_host_t *base_vha ; void *tmp ; uint32_t dev_state ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { __mptr = (struct work_struct const *)work; ha = (struct qla_hw_data *)__mptr + 0xfffffffffffff3c0UL; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; dev_state = 0U; if ((ha->device_type & 32768U) != 0U) { tmp___0 = qla2xxx_mctp_dump(base_vha); if (tmp___0 != 0) { ql_log(1U, base_vha, 45185, "Failed to dump mctp\n"); } else { } return; } else { } if (*((unsigned long *)ha + 2UL) == 0UL) { tmp___1 = qla83xx_check_nic_core_fw_alive(base_vha); if (tmp___1 == 0) { qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, 571483012U, & dev_state); qla83xx_idc_unlock(base_vha, 0); if (dev_state != 4U) { ql_dbg(524288U, base_vha, 45178, "Nic Core f/w is alive.\n"); return; } else { } } else { } ha->flags.nic_core_reset_hdlr_active = 1U; tmp___2 = qla83xx_nic_core_reset(base_vha); if (tmp___2 != 0) { ql_dbg(524288U, base_vha, 45153, "NIC Core reset failed.\n"); } else { } ha->flags.nic_core_reset_hdlr_active = 0U; } else { } return; } } void qla83xx_service_idc_aen(struct work_struct *work ) { struct qla_hw_data *ha ; struct work_struct const *__mptr ; scsi_qla_host_t *base_vha ; void *tmp ; uint32_t dev_state ; uint32_t idc_control ; int tmp___0 ; { __mptr = (struct work_struct const *)work; ha = (struct qla_hw_data *)__mptr + 0xfffffffffffff418UL; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, 571483012U, & dev_state); qla83xx_rd_reg(base_vha, 571483024U, & idc_control); qla83xx_idc_unlock(base_vha, 0); if (dev_state == 4U) { if ((idc_control & 2U) != 0U) { ql_dbg(524288U, base_vha, 45154, "Application requested NIC Core Reset.\n"); qla83xx_schedule_work(base_vha, 1); } else { tmp___0 = qla83xx_check_nic_core_fw_alive(base_vha); if (tmp___0 == 0) { ql_dbg(524288U, base_vha, 45179, "Other protocol driver requested NIC Core Reset.\n"); qla83xx_schedule_work(base_vha, 1); } else { } } } else if (dev_state == 6U || dev_state == 5U) { qla83xx_schedule_work(base_vha, 2); } else { } return; } } static void qla83xx_wait_logic(void) { int i ; int tmp ; { tmp = preempt_count(); if (((unsigned long )tmp & 2096896UL) == 0UL) { msleep(100U); schedule(); } else { i = 0; goto ldv_67618; ldv_67617: cpu_relax(); i = i + 1; ldv_67618: ; if (i <= 19) { goto ldv_67617; } else { } } return; } } static int qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha ) { int rval ; uint32_t data ; uint32_t idc_lck_rcvry_stage_mask ; uint32_t idc_lck_rcvry_owner_mask ; struct qla_hw_data *ha ; { idc_lck_rcvry_stage_mask = 3U; idc_lck_rcvry_owner_mask = 60U; ha = base_vha->hw; ql_dbg(524288U, base_vha, 45190, "Trying force recovery of the IDC lock.\n"); rval = qla83xx_rd_reg(base_vha, 571483036U, & data); if (rval != 0) { return (rval); } else { } if ((data & idc_lck_rcvry_stage_mask) != 0U) { return (0); } else { data = (uint32_t )(((int )ha->portnum << 2) | 1); rval = qla83xx_wr_reg(base_vha, 571483036U, data); if (rval != 0) { return (rval); } else { } msleep(200U); rval = qla83xx_rd_reg(base_vha, 571483036U, & data); if (rval != 0) { return (rval); } else { } if ((data & idc_lck_rcvry_owner_mask) >> 2 == (uint32_t )ha->portnum) { data = (~ idc_lck_rcvry_stage_mask | 2U) & data; rval = qla83xx_wr_reg(base_vha, 571483036U, data); if (rval != 0) { return (rval); } else { } rval = qla83xx_rd_reg(base_vha, 2165424172U, & data); if (rval != 0) { return (rval); } else { } rval = qla83xx_wr_reg(base_vha, 571482372U, 255U); if (rval != 0) { return (rval); } else { } rval = qla83xx_wr_reg(base_vha, 571483036U, 0U); if (rval != 0) { return (rval); } else { } } else { return (0); } } return (rval); } } static int qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha ) { int rval ; uint32_t o_drv_lockid ; uint32_t n_drv_lockid ; unsigned long lock_recovery_timeout ; int tmp ; { rval = 0; lock_recovery_timeout = (unsigned long )jiffies + 500UL; retry_lockid: rval = qla83xx_rd_reg(base_vha, 571482372U, & o_drv_lockid); if (rval != 0) { goto exit; } else { } if ((long )((unsigned long )jiffies - lock_recovery_timeout) >= 0L) { tmp = qla83xx_force_lock_recovery(base_vha); if (tmp == 0) { return (0); } else { return (258); } } else { } rval = qla83xx_rd_reg(base_vha, 571482372U, & n_drv_lockid); if (rval != 0) { goto exit; } else { } if (o_drv_lockid == n_drv_lockid) { qla83xx_wait_logic(); goto retry_lockid; } else { return (0); } exit: ; return (rval); } } void qla83xx_idc_lock(scsi_qla_host_t *base_vha , uint16_t requester_id ) { uint16_t options ; uint32_t data ; uint32_t lock_owner ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { options = (uint16_t )((int )((short )((int )requester_id << 15)) | 64); ha = base_vha->hw; retry_lock: tmp___0 = qla83xx_rd_reg(base_vha, 2165424168U, & data); if (tmp___0 == 0) { if (data != 0U) { qla83xx_wr_reg(base_vha, 571482372U, (uint32_t )ha->portnum); } else { qla83xx_rd_reg(base_vha, 571482372U, & lock_owner); ql_dbg(524288U, base_vha, 45155, "Failed to acquire IDC lock, acquired by %d, retrying...\n", lock_owner); tmp = qla83xx_idc_lock_recovery(base_vha); if (tmp == 0) { qla83xx_wait_logic(); goto retry_lock; } else { ql_log(1U, base_vha, 45173, "IDC Lock recovery FAILED.\n"); } } } else { } return; retry_lock2: tmp___2 = qla83xx_access_control(base_vha, (int )options, 0U, 0U, (uint16_t *)0U); if (tmp___2 != 0) { ql_dbg(524288U, base_vha, 45170, "Failed to acquire IDC lock. retrying...\n"); tmp___1 = qla83xx_idc_lock_recovery(base_vha); if (tmp___1 == 0) { qla83xx_wait_logic(); goto retry_lock2; } else { ql_log(1U, base_vha, 45174, "IDC Lock recovery FAILED.\n"); } } else { } return; } } void qla83xx_idc_unlock(scsi_qla_host_t *base_vha , uint16_t requester_id ) { uint16_t retry ; uint32_t data ; struct qla_hw_data *ha ; int tmp ; { ha = base_vha->hw; retry = 0U; retry_unlock: tmp = qla83xx_rd_reg(base_vha, 571482372U, & data); if (tmp == 0) { if ((uint32_t )ha->portnum == data) { qla83xx_rd_reg(base_vha, 2165424172U, & data); qla83xx_wr_reg(base_vha, 571482372U, 255U); } else if ((unsigned int )retry <= 9U) { qla83xx_wait_logic(); retry = (uint16_t )((int )retry + 1); ql_dbg(524288U, base_vha, 45156, "Failed to release IDC lock, retyring=%d\n", (int )retry); goto retry_unlock; } else { } } else if ((unsigned int )retry <= 9U) { qla83xx_wait_logic(); retry = (uint16_t )((int )retry + 1); ql_dbg(524288U, base_vha, 45157, "Failed to read drv-lockid, retyring=%d\n", (int )retry); goto retry_unlock; } else { } return; } } int __qla83xx_set_drv_presence(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; uint32_t drv_presence ; { rval = 0; ha = vha->hw; rval = qla83xx_rd_reg(vha, 571483016U, & drv_presence); if (rval == 0) { drv_presence = (uint32_t )(1 << (int )ha->portnum) | drv_presence; rval = qla83xx_wr_reg(vha, 571483016U, drv_presence); } else { } return (rval); } } int qla83xx_set_drv_presence(scsi_qla_host_t *vha ) { int rval ; { rval = 0; qla83xx_idc_lock(vha, 0); rval = __qla83xx_set_drv_presence(vha); qla83xx_idc_unlock(vha, 0); return (rval); } } int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; uint32_t drv_presence ; { rval = 0; ha = vha->hw; rval = qla83xx_rd_reg(vha, 571483016U, & drv_presence); if (rval == 0) { drv_presence = (uint32_t )(~ (1 << (int )ha->portnum)) & drv_presence; rval = qla83xx_wr_reg(vha, 571483016U, drv_presence); } else { } return (rval); } } int qla83xx_clear_drv_presence(scsi_qla_host_t *vha ) { int rval ; { rval = 0; qla83xx_idc_lock(vha, 0); rval = __qla83xx_clear_drv_presence(vha); qla83xx_idc_unlock(vha, 0); return (rval); } } static void qla83xx_need_reset_handler(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t drv_ack ; uint32_t drv_presence ; unsigned long ack_timeout ; { ha = vha->hw; ack_timeout = (unsigned long )(ha->fcoe_reset_timeout * 250U) + (unsigned long )jiffies; ldv_67695: qla83xx_rd_reg(vha, 571483020U, & drv_ack); qla83xx_rd_reg(vha, 571483016U, & drv_presence); if ((drv_ack & drv_presence) == drv_presence) { goto ldv_67688; } else { } if ((long )((unsigned long )jiffies - ack_timeout) >= 0L) { ql_log(1U, vha, 45159, "RESET ACK TIMEOUT! drv_presence=0x%x drv_ack=0x%x\n", drv_presence, drv_ack); if (drv_ack != drv_presence) { qla83xx_wr_reg(vha, 571483016U, drv_ack); } else { } goto ldv_67688; } else { } qla83xx_idc_unlock(vha, 0); msleep(1000U); qla83xx_idc_lock(vha, 0); goto ldv_67695; ldv_67688: qla83xx_wr_reg(vha, 571483012U, 1U); ql_log(2U, vha, 45160, "HW State: COLD/RE-INIT.\n"); return; } } static int qla83xx_device_bootstrap(scsi_qla_host_t *vha ) { int rval ; uint32_t idc_control ; { rval = 0; qla83xx_wr_reg(vha, 571483012U, 2U); ql_log(2U, vha, 45161, "HW State: INITIALIZING.\n"); __qla83xx_get_idc_control(vha, & idc_control); idc_control = idc_control & 4294967293U; __qla83xx_set_idc_control(vha, 0U); qla83xx_idc_unlock(vha, 0); rval = qla83xx_restart_nic_firmware(vha); qla83xx_idc_lock(vha, 0); if (rval != 0) { ql_log(0U, vha, 45162, "Failed to restart NIC f/w.\n"); qla83xx_wr_reg(vha, 571483012U, 6U); ql_log(2U, vha, 45163, "HW State: FAILED.\n"); } else { ql_dbg(524288U, vha, 45164, "Success in restarting nic f/w.\n"); qla83xx_wr_reg(vha, 571483012U, 3U); ql_log(2U, vha, 45165, "HW State: READY.\n"); } return (rval); } } int qla83xx_idc_state_handler(scsi_qla_host_t *base_vha ) { struct qla_hw_data *ha ; int rval ; unsigned long dev_init_timeout ; uint32_t dev_state ; { ha = base_vha->hw; rval = 0; dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; ldv_67725: ; if ((long )((unsigned long )jiffies - dev_init_timeout) >= 0L) { ql_log(1U, base_vha, 45166, "Initialization TIMEOUT!\n"); qla83xx_wr_reg(base_vha, 571483012U, 6U); ql_log(2U, base_vha, 45167, "HW State: FAILED.\n"); } else { } qla83xx_rd_reg(base_vha, 571483012U, & dev_state); switch (dev_state) { case 3U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { qla83xx_idc_audit(base_vha, 1); } else { } ha->flags.nic_core_reset_owner = 0U; ql_dbg(524288U, base_vha, 45168, "Reset_owner reset by 0x%x.\n", (int )ha->portnum); goto exit; case 1U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { rval = qla83xx_device_bootstrap(base_vha); } else { qla83xx_idc_unlock(base_vha, 0); msleep(1000U); qla83xx_idc_lock(base_vha, 0); } goto ldv_67717; case 2U: qla83xx_idc_unlock(base_vha, 0); msleep(1000U); qla83xx_idc_lock(base_vha, 0); goto ldv_67717; case 4U: ; if (ql2xdontresethba == 0 && *((unsigned long *)ha + 2UL) != 0UL) { qla83xx_need_reset_handler(base_vha); } else { qla83xx_idc_unlock(base_vha, 0); msleep(1000U); qla83xx_idc_lock(base_vha, 0); } dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; goto ldv_67717; case 5U: qla83xx_idc_unlock(base_vha, 0); msleep(1000U); qla83xx_idc_lock(base_vha, 0); goto ldv_67717; case 7U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { goto exit; } else { } qla83xx_idc_unlock(base_vha, 0); msleep(1000U); qla83xx_idc_lock(base_vha, 0); dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; goto ldv_67717; case 6U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { qla83xx_idc_audit(base_vha, 1); } else { } ha->flags.nic_core_reset_owner = 0U; __qla83xx_clear_drv_presence(base_vha); qla83xx_idc_unlock(base_vha, 0); qla8xxx_dev_failed_handler(base_vha); rval = 258; qla83xx_idc_lock(base_vha, 0); goto exit; case 3134241488U: qla83xx_idc_unlock(base_vha, 0); msleep(1000U); qla83xx_idc_lock(base_vha, 0); goto ldv_67717; default: ql_log(1U, base_vha, 45169, "Unknown Device State: %x.\n", dev_state); qla83xx_idc_unlock(base_vha, 0); qla8xxx_dev_failed_handler(base_vha); rval = 258; qla83xx_idc_lock(base_vha, 0); goto exit; } ldv_67717: ; goto ldv_67725; exit: ; return (rval); } } void qla2x00_disable_board_on_pci_error(struct work_struct *work ) { struct qla_hw_data *ha ; struct work_struct const *__mptr ; struct pci_dev *pdev ; scsi_qla_host_t *base_vha ; void *tmp ; { __mptr = (struct work_struct const *)work; ha = (struct qla_hw_data *)__mptr + 0xfffffffffffff2d0UL; pdev = ha->pdev; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; ql_log(1U, base_vha, 347, "Disabling adapter.\n"); set_bit(15L, (unsigned long volatile *)(& base_vha->dpc_flags)); qla2x00_delete_all_vps(ha, base_vha); qla2x00_abort_all_cmds(base_vha, 65536); qla2x00_dfs_remove(base_vha); qla84xx_put_chip(base_vha); if (base_vha->timer_active != 0U) { qla2x00_stop_timer(base_vha); } else { } base_vha->flags.online = 0U; qla2x00_destroy_deferred_work(ha); qla2x00_free_sysfs_attr(base_vha, 0); fc_remove_host(base_vha->host); ldv_scsi_remove_host_26(base_vha->host); base_vha->flags.init_done = 0U; qla25xx_delete_queues(base_vha); qla2x00_free_irqs(base_vha); qla2x00_free_fcports(base_vha); qla2x00_mem_free(ha); qla82xx_md_free(base_vha); qla2x00_free_queues(ha); qla2x00_unmap_iobases(ha); pci_release_selected_regions(ha->pdev, ha->bars); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); return; } } static int qla2x00_do_dpc(void *data ) { int rval ; scsi_qla_host_t *base_vha ; struct qla_hw_data *ha ; void *tmp ; struct task_struct *tmp___0 ; struct task_struct *tmp___1 ; long volatile __ret ; struct task_struct *tmp___2 ; struct task_struct *tmp___3 ; struct task_struct *tmp___4 ; struct task_struct *tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; int tmp___19 ; int tmp___20 ; int tmp___21 ; int ret ; int tmp___22 ; int tmp___23 ; int tmp___24 ; int tmp___25 ; int tmp___26 ; int tmp___27 ; int tmp___28 ; int tmp___29 ; int tmp___30 ; int tmp___31 ; int tmp___32 ; int tmp___33 ; struct task_struct *tmp___34 ; long volatile __ret___0 ; struct task_struct *tmp___35 ; struct task_struct *tmp___36 ; struct task_struct *tmp___37 ; struct task_struct *tmp___38 ; bool tmp___39 ; int tmp___40 ; struct task_struct *tmp___41 ; struct task_struct *tmp___42 ; { ha = (struct qla_hw_data *)data; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; tmp___0 = get_current(); set_user_nice(tmp___0, -20L); tmp___1 = get_current(); tmp___1->task_state_change = 0UL; __ret = 1L; switch (8UL) { case 1UL: tmp___2 = get_current(); __asm__ volatile ("xchgb %b0, %1\n": "+q" (__ret), "+m" (tmp___2->state): : "memory", "cc"); goto ldv_67742; case 2UL: tmp___3 = get_current(); __asm__ volatile ("xchgw %w0, %1\n": "+r" (__ret), "+m" (tmp___3->state): : "memory", "cc"); goto ldv_67742; case 4UL: tmp___4 = get_current(); __asm__ volatile ("xchgl %0, %1\n": "+r" (__ret), "+m" (tmp___4->state): : "memory", "cc"); goto ldv_67742; case 8UL: tmp___5 = get_current(); __asm__ volatile ("xchgq %q0, %1\n": "+r" (__ret), "+m" (tmp___5->state): : "memory", "cc"); goto ldv_67742; default: __xchg_wrong_size(); } ldv_67742: ; goto ldv_67749; ldv_67761: ql_dbg(67108864U, base_vha, 16384, "DPC handler sleeping.\n"); schedule(); if (*((unsigned long *)base_vha + 19UL) == 0UL || *((unsigned long *)ha + 2UL) != 0UL) { goto end_loop; } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(67108864U, base_vha, 16387, "eeh_busy=%d.\n", (int )ha->flags.eeh_busy); goto end_loop; } else { } ha->dpc_active = 1U; ql_dbg(67141632U, base_vha, 16385, "DPC handler waking up, dpc_flags=0x%lx.\n", base_vha->dpc_flags); qla2x00_do_work(base_vha); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { if ((ha->device_type & 262144U) != 0U) { tmp___6 = test_and_clear_bit(17L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___6 != 0) { qla8044_idc_lock(ha); qla8044_wr_direct(base_vha, 4U, 6U); qla8044_idc_unlock(ha); ql_log(2U, base_vha, 16388, "HW State: FAILED.\n"); qla8044_device_state_handler(base_vha); goto ldv_67749; } else { } } else { tmp___7 = test_and_clear_bit(17L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___7 != 0) { qla82xx_idc_lock(ha); qla82xx_wr_32(ha, 136323392UL, 6U); qla82xx_idc_unlock(ha); ql_log(2U, base_vha, 337, "HW State: FAILED.\n"); qla82xx_device_state_handler(base_vha); goto ldv_67749; } else { } } tmp___10 = test_and_clear_bit(18L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___10 != 0) { ql_dbg(67108864U, base_vha, 16389, "FCoE context reset scheduled.\n"); tmp___9 = test_and_set_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___9 == 0) { tmp___8 = qla82xx_fcoe_ctx_reset(base_vha); if (tmp___8 != 0) { set_bit(2L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } clear_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } ql_dbg(67108864U, base_vha, 16390, "FCoE context reset end.\n"); } else { } } else if ((ha->device_type & 131072U) != 0U) { tmp___13 = test_and_clear_bit(17L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___13 != 0) { ql_dbg(67108864U, base_vha, 16416, "Firmware Reset Recovery\n"); tmp___12 = qlafx00_reset_initialize(base_vha); if (tmp___12 != 0) { tmp___11 = constant_test_bit(15L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___11 == 0) { set_bit(17L, (unsigned long volatile *)(& base_vha->dpc_flags)); ql_dbg(67108864U, base_vha, 16417, "Reset Recovery Failed\n"); } else { } } else { } } else { } tmp___16 = test_and_clear_bit(24L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___16 != 0) { ql_dbg(67108864U, base_vha, 16418, "ISPFx00 Target Scan scheduled\n"); tmp___15 = qlafx00_rescan_isp(base_vha); if (tmp___15 != 0) { tmp___14 = constant_test_bit(15L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___14 == 0) { set_bit(17L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } ql_dbg(67108864U, base_vha, 16414, "ISPFx00 Target Scan Failed\n"); } else { } ql_dbg(67108864U, base_vha, 16415, "ISPFx00 Target Scan End\n"); } else { } tmp___17 = test_and_clear_bit(26L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___17 != 0) { ql_dbg(67108864U, base_vha, 16419, "ISPFx00 Host Info resend scheduled\n"); qlafx00_fx_disc(base_vha, & (base_vha->hw)->mr.fcport, 153); } else { } } else { } tmp___20 = test_and_clear_bit(2L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___20 != 0) { ql_dbg(67108864U, base_vha, 16391, "ISP abort scheduled.\n"); tmp___19 = test_and_set_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___19 == 0) { tmp___18 = (*((ha->isp_ops)->abort_isp))(base_vha); if (tmp___18 != 0) { set_bit(2L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } clear_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } ql_dbg(67108864U, base_vha, 16392, "ISP abort end.\n"); } else { } tmp___21 = test_and_clear_bit(13L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___21 != 0) { qla2x00_update_fcports(base_vha); } else { } tmp___22 = constant_test_bit(21L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___22 != 0) { ret = qla2x00_send_change_request(base_vha, 3, 0); if (ret != 0) { ql_log(1U, base_vha, 289, "Failed to enable receiving of RSCN requests: 0x%x.\n", ret); } else { } clear_bit(21L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } if ((ha->device_type & 131072U) != 0U) { goto loop_resync_check; } else { } tmp___23 = constant_test_bit(20L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___23 != 0) { ql_dbg(67108864U, base_vha, 16393, "Quiescence mode scheduled.\n"); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { if ((ha->device_type & 16384U) != 0U) { qla82xx_device_state_handler(base_vha); } else { } if ((ha->device_type & 262144U) != 0U) { qla8044_device_state_handler(base_vha); } else { } clear_bit(20L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (*((unsigned long *)ha + 2UL) == 0UL) { qla2x00_perform_loop_resync(base_vha); if ((ha->device_type & 16384U) != 0U) { qla82xx_idc_lock(ha); qla82xx_clear_qsnt_ready(base_vha); qla82xx_idc_unlock(ha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); qla8044_clear_qsnt_ready(base_vha); qla8044_idc_unlock(ha); } else { } } else { } } else { clear_bit(20L, (unsigned long volatile *)(& base_vha->dpc_flags)); qla2x00_quiesce_io(base_vha); } ql_dbg(67108864U, base_vha, 16394, "Quiescence mode end.\n"); } else { } tmp___24 = test_and_clear_bit(0L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___24 != 0) { tmp___25 = test_and_set_bit(1L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___25 == 0) { ql_dbg(67108864U, base_vha, 16395, "Reset marker scheduled.\n"); qla2x00_rst_aen(base_vha); clear_bit(1L, (unsigned long volatile *)(& base_vha->dpc_flags)); ql_dbg(67108864U, base_vha, 16396, "Reset marker end.\n"); } else { } } else { } tmp___26 = test_and_clear_bit(8L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___26 != 0) { tmp___27 = constant_test_bit(4L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___27 == 0) { tmp___28 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___28 != 2) { ql_dbg(67108864U, base_vha, 16397, "Relogin scheduled.\n"); qla2x00_relogin(base_vha); ql_dbg(67108864U, base_vha, 16398, "Relogin end.\n"); } else { } } else { } } else { } loop_resync_check: tmp___30 = test_and_clear_bit(4L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___30 != 0) { ql_dbg(67108864U, base_vha, 16399, "Loop resync scheduled.\n"); tmp___29 = test_and_set_bit(5L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___29 == 0) { rval = qla2x00_loop_resync(base_vha); clear_bit(5L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } ql_dbg(67108864U, base_vha, 16400, "Loop resync end.\n"); } else { } if ((ha->device_type & 131072U) != 0U) { goto intr_on_check; } else { } tmp___31 = constant_test_bit(16L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___31 != 0) { tmp___32 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___32 == 5) { clear_bit(16L, (unsigned long volatile *)(& base_vha->dpc_flags)); qla2xxx_flash_npiv_conf(base_vha); } else { } } else { } intr_on_check: ; if ((unsigned int )ha->interrupts_on == 0U) { (*((ha->isp_ops)->enable_intrs))(ha); } else { } tmp___33 = test_and_clear_bit(11L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___33 != 0) { if ((unsigned int )ha->beacon_blink_led == 1U) { (*((ha->isp_ops)->beacon_blink))(base_vha); } else { } } else { } if ((ha->device_type & 131072U) == 0U) { qla2x00_do_dpc_all_vps(base_vha); } else { } ha->dpc_active = 0U; end_loop: tmp___34 = get_current(); tmp___34->task_state_change = 0UL; __ret___0 = 1L; switch (8UL) { case 1UL: tmp___35 = get_current(); __asm__ volatile ("xchgb %b0, %1\n": "+q" (__ret___0), "+m" (tmp___35->state): : "memory", "cc"); goto ldv_67755; case 2UL: tmp___36 = get_current(); __asm__ volatile ("xchgw %w0, %1\n": "+r" (__ret___0), "+m" (tmp___36->state): : "memory", "cc"); goto ldv_67755; case 4UL: tmp___37 = get_current(); __asm__ volatile ("xchgl %0, %1\n": "+r" (__ret___0), "+m" (tmp___37->state): : "memory", "cc"); goto ldv_67755; case 8UL: tmp___38 = get_current(); __asm__ volatile ("xchgq %q0, %1\n": "+r" (__ret___0), "+m" (tmp___38->state): : "memory", "cc"); goto ldv_67755; default: __xchg_wrong_size(); } ldv_67755: ; ldv_67749: tmp___39 = kthread_should_stop(); if (tmp___39) { tmp___40 = 0; } else { tmp___40 = 1; } if (tmp___40) { goto ldv_67761; } else { } tmp___41 = get_current(); tmp___41->task_state_change = 0UL; tmp___42 = get_current(); tmp___42->state = 0L; ql_dbg(67108864U, base_vha, 16401, "DPC handler exiting.\n"); ha->dpc_active = 0U; qla2x00_abort_all_cmds(base_vha, 65536); return (0); } } void qla2xxx_wake_dpc(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct task_struct *t ; int tmp ; { ha = vha->hw; t = ha->dpc_thread; tmp = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp == 0 && (unsigned long )t != (unsigned long )((struct task_struct *)0)) { wake_up_process(t); } else { } return; } } static void qla2x00_rst_aen(scsi_qla_host_t *vha ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { if (*((unsigned long *)vha + 19UL) != 0UL && *((unsigned long *)vha + 19UL) == 0UL) { tmp___1 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___1 == 0) { tmp___2 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 == 0) { ldv_67771: clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); vha->marker_needed = 1U; tmp = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp == 0) { tmp___0 = constant_test_bit(0L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { goto ldv_67771; } else { goto ldv_67772; } } else { } ldv_67772: ; } else { } } else { } } else { } return; } } void qla2x00_timer(scsi_qla_host_t *vha ) { unsigned long cpu_flags ; int start_dpc ; int index ; srb_t *sp ; uint16_t w ; struct qla_hw_data *ha ; struct req_que *req ; int tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; fc_port_t *sfcp ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; int tmp___19 ; int tmp___20 ; int tmp___21 ; int tmp___22 ; int tmp___23 ; int tmp___24 ; int tmp___25 ; int tmp___26 ; { cpu_flags = 0UL; start_dpc = 0; ha = vha->hw; if (*((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(16777216U, vha, 24576, "EEH = %d, restarting timer.\n", (int )ha->flags.eeh_busy); qla2x00_restart_timer(vha, 1UL); return; } else { } tmp = pci_channel_offline(ha->pdev); if (tmp == 0) { pci_read_config_word((struct pci_dev const *)ha->pdev, 0, & w); qla2x00_check_reg16_for_disconnect(vha, (int )w); } else { } if ((unsigned int )vha->vp_idx == 0U && ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U)) { tmp___0 = constant_test_bit(20L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { start_dpc = start_dpc + 1; } else { } if ((ha->device_type & 16384U) != 0U) { qla82xx_watchdog(vha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_watchdog(vha); } else { } } else { } if ((unsigned int )vha->vp_idx == 0U && (ha->device_type & 131072U) != 0U) { qlafx00_timer_routine(vha); } else { } tmp___5 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___5 > 0) { tmp___6 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___6 == 0) { tmp___7 = constant_test_bit(18L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___7 == 0) { if (*((unsigned long *)vha + 19UL) != 0UL) { tmp___2 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___2 == (int )vha->loop_down_abort_time) { ql_log(2U, vha, 24584, "Loop down - aborting the queues before time expires.\n"); if ((ha->device_type & 1U) == 0U && (unsigned int )vha->link_down_timeout != 0U) { atomic_set(& vha->loop_state, 6); } else { } if ((unsigned int )vha->vp_idx == 0U) { tmp___1 = spinlock_check(& ha->hardware_lock); cpu_flags = _raw_spin_lock_irqsave(tmp___1); req = *(ha->req_q_map); index = 1; goto ldv_67790; ldv_67789: sp = *(req->outstanding_cmds + (unsigned long )index); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto ldv_67787; } else { } if ((unsigned int )sp->type != 8U) { goto ldv_67787; } else { } sfcp = sp->fcport; if ((sfcp->flags & 4U) == 0U) { goto ldv_67787; } else { } if ((ha->device_type & 16384U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } goto ldv_67788; ldv_67787: index = index + 1; ldv_67790: ; if ((int )req->num_outstanding_cmds > index) { goto ldv_67789; } else { } ldv_67788: spin_unlock_irqrestore(& ha->hardware_lock, cpu_flags); } else { } start_dpc = start_dpc + 1; } else { } tmp___3 = atomic_dec_and_test(& vha->loop_down_timer); if (tmp___3 != 0) { if ((vha->device_flags & 2U) == 0U) { ql_log(1U, vha, 24585, "Loop down - aborting ISP.\n"); if ((ha->device_type & 16384U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } } else { } } else { } tmp___4 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); ql_dbg(16777216U, vha, 24586, "Loop down - seconds remaining %d.\n", tmp___4); } else { } } else { } } else { } } else { } if ((unsigned int )vha->vp_idx == 0U && (unsigned int )ha->beacon_blink_led == 1U) { if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { set_bit(11L, (unsigned long volatile *)(& vha->dpc_flags)); start_dpc = start_dpc + 1; } else { } } else { } tmp___8 = list_empty((struct list_head const *)(& vha->work_list)); if (tmp___8 == 0) { start_dpc = start_dpc + 1; } else { } tmp___18 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___18 != 0) { goto _L; } else { tmp___19 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___19 != 0) { goto _L; } else { tmp___20 = constant_test_bit(13L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___20 != 0) { goto _L; } else if (start_dpc != 0) { goto _L; } else { tmp___21 = constant_test_bit(0L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___21 != 0) { goto _L; } else { tmp___22 = constant_test_bit(11L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___22 != 0) { goto _L; } else { tmp___23 = constant_test_bit(17L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___23 != 0) { goto _L; } else { tmp___24 = constant_test_bit(18L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___24 != 0) { goto _L; } else { tmp___25 = constant_test_bit(14L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___25 != 0) { goto _L; } else { tmp___26 = constant_test_bit(8L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___26 != 0) { _L: /* CIL Label */ tmp___9 = constant_test_bit(0L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___10 = constant_test_bit(13L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___11 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___12 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); ql_dbg(16777216U, vha, 24587, "isp_abort_needed=%d loop_resync_needed=%d fcport_update_needed=%d start_dpc=%d reset_marker_needed=%d", tmp___12, tmp___11, tmp___10, start_dpc, tmp___9); tmp___13 = constant_test_bit(8L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___14 = constant_test_bit(14L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___15 = constant_test_bit(18L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___16 = constant_test_bit(17L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___17 = constant_test_bit(11L, (unsigned long const volatile *)(& vha->dpc_flags)); ql_dbg(16777216U, vha, 24588, "beacon_blink_needed=%d isp_unrecoverable=%d fcoe_ctx_reset_needed=%d vp_dpc_needed=%d relogin_needed=%d.\n", tmp___17, tmp___16, tmp___15, tmp___14, tmp___13); qla2xxx_wake_dpc(vha); } else { } } } } } } } } } qla2x00_restart_timer(vha, 1UL); return; } } static struct mutex qla_fw_lock = {{1}, {{{{{0}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "qla_fw_lock.wait_lock", 0, 0UL}}}}, {& qla_fw_lock.wait_list, & qla_fw_lock.wait_list}, 0, (void *)(& qla_fw_lock), {0, {0, 0}, "qla_fw_lock", 0, 0UL}}; static struct fw_blob qla_fw_blobs[11U] = { {(char *)"ql2100_fw.bin", {4096U, 0U}, 0}, {(char *)"ql2200_fw.bin", {4096U, 0U}, 0}, {(char *)"ql2300_fw.bin", {2048U, 0U}, 0}, {(char *)"ql2322_fw.bin", {2048U, 114688U, 122880U, 0U}, 0}, {(char *)"ql2400_fw.bin", {0U, 0U, 0U, 0U}, 0}, {(char *)"ql2500_fw.bin", {0U, 0U, 0U, 0U}, 0}, {(char *)"ql8100_fw.bin", {0U, 0U, 0U, 0U}, 0}, {(char *)"ql8200_fw.bin", {0U, 0U, 0U, 0U}, 0}, {(char *)"ql2600_fw.bin", {0U, 0U, 0U, 0U}, 0}, {(char *)"ql8300_fw.bin", {0U, 0U, 0U, 0U}, 0}, {(char *)"ql2700_fw.bin", {0U, 0U, 0U, 0U}, 0}}; struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct fw_blob *blob ; int tmp ; { ha = vha->hw; if ((int )ha->device_type & 1) { blob = (struct fw_blob *)(& qla_fw_blobs); } else if ((ha->device_type & 2U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 1UL; } else if (((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 32U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 2UL; } else if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 3UL; } else if ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 4UL; } else if ((ha->device_type & 2048U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 5UL; } else if ((ha->device_type & 8192U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 6UL; } else if ((ha->device_type & 16384U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 7UL; } else if ((ha->device_type & 32768U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 8UL; } else if ((ha->device_type & 65536U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 9UL; } else if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 10UL; } else { return ((struct fw_blob *)0); } mutex_lock_nested(& qla_fw_lock, 0U); if ((unsigned long )blob->fw != (unsigned long )((struct firmware const *)0)) { goto out; } else { } tmp = request_firmware(& blob->fw, (char const *)blob->name, & (ha->pdev)->dev); if (tmp != 0) { ql_log(1U, vha, 99, "Failed to load firmware image (%s).\n", blob->name); blob->fw = (struct firmware const *)0; blob = (struct fw_blob *)0; goto out; } else { } out: mutex_unlock(& qla_fw_lock); return (blob); } } static void qla2x00_release_firmware(void) { int idx ; { mutex_lock_nested(& qla_fw_lock, 0U); idx = 0; goto ldv_67805; ldv_67804: release_firmware(qla_fw_blobs[idx].fw); idx = idx + 1; ldv_67805: ; if (idx <= 10) { goto ldv_67804; } else { } mutex_unlock(& qla_fw_lock); return; } } static pci_ers_result_t qla2xxx_pci_error_detected(struct pci_dev *pdev , pci_channel_state_t state ) { scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; { tmp = pci_get_drvdata(pdev); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; ql_dbg(2097152U, vha, 36864, "PCI error detected, state %x.\n", state); switch (state) { case 1U: ha->flags.eeh_busy = 0U; return (2U); case 2U: ha->flags.eeh_busy = 1U; if ((ha->device_type & 16384U) != 0U) { ha->flags.isp82xx_fw_hung = 1U; ql_dbg(2097152U, vha, 36865, "Pci channel io frozen\n"); qla82xx_clear_pending_mbx(vha); } else { } qla2x00_free_irqs(vha); pci_disable_device(pdev); qla2x00_abort_all_cmds(vha, 524288); return (3U); case 3U: ha->flags.pci_channel_io_perm_failure = 1U; qla2x00_abort_all_cmds(vha, 65536); return (4U); } return (3U); } } static pci_ers_result_t qla2xxx_pci_mmio_enabled(struct pci_dev *pdev ) { int risc_paused ; uint32_t stat ; unsigned long flags ; scsi_qla_host_t *base_vha ; void *tmp ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; struct device_reg_24xx *reg24 ; raw_spinlock_t *tmp___0 ; { risc_paused = 0; tmp = pci_get_drvdata(pdev); base_vha = (scsi_qla_host_t *)tmp; ha = base_vha->hw; reg = & (ha->iobase)->isp; reg24 = & (ha->iobase)->isp24; if ((ha->device_type & 16384U) != 0U) { return (5U); } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { stat = readl((void const volatile *)(& reg->hccr)); if ((stat & 32U) != 0U) { risc_paused = 1; } else { } } else if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { stat = readl((void const volatile *)(& reg->u.isp2300.host_status)); if ((stat & 256U) != 0U) { risc_paused = 1; } else { } } else if ((ha->device_type & 134217728U) != 0U) { stat = readl((void const volatile *)(& reg24->host_status)); if ((stat & 256U) != 0U) { risc_paused = 1; } else { } } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); if (risc_paused != 0) { ql_log(2U, base_vha, 36867, "RISC paused -- mmio_enabled, Dumping firmware.\n"); (*((ha->isp_ops)->fw_dump))(base_vha, 0); return (3U); } else { return (5U); } } } static uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha ) { uint32_t rval ; uint32_t drv_active ; struct qla_hw_data *ha ; int fn ; struct pci_dev *other_pdev ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; { rval = 258U; drv_active = 0U; ha = base_vha->hw; other_pdev = (struct pci_dev *)0; ql_dbg(2097152U, base_vha, 36870, "Entered %s.\n", "qla82xx_error_recovery"); set_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (*((unsigned long *)base_vha + 19UL) != 0UL) { qla2x00_abort_isp_cleanup(base_vha); } else { } fn = (int )(ha->pdev)->devfn & 7; goto ldv_67838; ldv_67840: fn = fn - 1; ql_dbg(2097152U, base_vha, 36871, "Finding pci device at function = 0x%x.\n", fn); tmp = pci_domain_nr((ha->pdev)->bus); other_pdev = pci_get_domain_bus_and_slot(tmp, (unsigned int )((ha->pdev)->bus)->number, ((ha->pdev)->devfn & 248U) | ((unsigned int )fn & 7U)); if ((unsigned long )other_pdev == (unsigned long )((struct pci_dev *)0)) { goto ldv_67838; } else { } tmp___0 = atomic_read((atomic_t const *)(& other_pdev->enable_cnt)); if (tmp___0 != 0) { ql_dbg(2097152U, base_vha, 36872, "Found PCI func available and enable at 0x%x.\n", fn); pci_dev_put(other_pdev); goto ldv_67839; } else { } pci_dev_put(other_pdev); ldv_67838: ; if (fn > 0) { goto ldv_67840; } else { } ldv_67839: ; if (fn == 0) { ql_dbg(2097152U, base_vha, 36873, "This devfn is reset owner = 0x%x.\n", (ha->pdev)->devfn); qla82xx_idc_lock(ha); qla82xx_wr_32(ha, 136323392UL, 2U); qla82xx_wr_32(ha, 136323444UL, 1U); tmp___1 = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp___1; ql_dbg(2097152U, base_vha, 36874, "drv_active = 0x%x.\n", drv_active); qla82xx_idc_unlock(ha); if (drv_active != 0U) { tmp___2 = qla82xx_start_firmware(base_vha); rval = (uint32_t )tmp___2; } else { rval = 0U; } qla82xx_idc_lock(ha); if (rval != 0U) { ql_log(2U, base_vha, 36875, "HW State: FAILED.\n"); qla82xx_clear_drv_active(ha); qla82xx_wr_32(ha, 136323392UL, 6U); } else { ql_log(2U, base_vha, 36876, "HW State: READY.\n"); qla82xx_wr_32(ha, 136323392UL, 3U); qla82xx_idc_unlock(ha); ha->flags.isp82xx_fw_hung = 0U; tmp___3 = qla82xx_restart_isp(base_vha); rval = (uint32_t )tmp___3; qla82xx_idc_lock(ha); qla82xx_wr_32(ha, 136323396UL, 0U); qla82xx_set_drv_active(base_vha); } qla82xx_idc_unlock(ha); } else { ql_dbg(2097152U, base_vha, 36877, "This devfn is not reset owner = 0x%x.\n", (ha->pdev)->devfn); tmp___5 = qla82xx_rd_32(ha, 136323392UL); if (tmp___5 == 3) { ha->flags.isp82xx_fw_hung = 0U; tmp___4 = qla82xx_restart_isp(base_vha); rval = (uint32_t )tmp___4; qla82xx_idc_lock(ha); qla82xx_set_drv_active(base_vha); qla82xx_idc_unlock(ha); } else { } } clear_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); return (rval); } } static pci_ers_result_t qla2xxx_pci_slot_reset(struct pci_dev *pdev ) { pci_ers_result_t ret ; scsi_qla_host_t *base_vha ; void *tmp ; struct qla_hw_data *ha ; struct rsp_que *rsp ; int rc ; int retries ; int tmp___0 ; int tmp___1 ; uint32_t tmp___2 ; int tmp___3 ; int tmp___4 ; { ret = 4U; tmp = pci_get_drvdata(pdev); base_vha = (scsi_qla_host_t *)tmp; ha = base_vha->hw; retries = 10; ql_dbg(2097152U, base_vha, 36868, "Slot Reset.\n"); pdev->error_state = 1U; pci_restore_state(pdev); pci_save_state(pdev); if (ha->mem_only != 0) { rc = pci_enable_device_mem(pdev); } else { rc = pci_enable_device(pdev); } if (rc != 0) { ql_log(1U, base_vha, 36869, "Can\'t re-enable PCI device after reset.\n"); goto exit_slot_reset; } else { } rsp = *(ha->rsp_q_map); tmp___0 = qla2x00_request_irqs(ha, rsp); if (tmp___0 != 0) { goto exit_slot_reset; } else { } tmp___1 = (*((ha->isp_ops)->pci_config))(base_vha); if (tmp___1 != 0) { goto exit_slot_reset; } else { } if ((ha->device_type & 16384U) != 0U) { tmp___2 = qla82xx_error_recovery(base_vha); if (tmp___2 == 0U) { ret = 5U; goto exit_slot_reset; } else { goto exit_slot_reset; } } else { } goto ldv_67852; ldv_67851: msleep(1000U); ldv_67852: ; if (*((unsigned long *)ha + 2UL) != 0UL) { tmp___3 = retries; retries = retries - 1; if (tmp___3 != 0) { goto ldv_67851; } else { goto ldv_67853; } } else { } ldv_67853: set_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); tmp___4 = (*((ha->isp_ops)->abort_isp))(base_vha); if (tmp___4 == 0) { ret = 5U; } else { } clear_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); exit_slot_reset: ql_dbg(2097152U, base_vha, 36878, "slot_reset return %x.\n", ret); return (ret); } } static void qla2xxx_pci_resume(struct pci_dev *pdev ) { scsi_qla_host_t *base_vha ; void *tmp ; struct qla_hw_data *ha ; int ret ; { tmp = pci_get_drvdata(pdev); base_vha = (scsi_qla_host_t *)tmp; ha = base_vha->hw; ql_dbg(2097152U, base_vha, 36879, "pci_resume.\n"); ret = qla2x00_wait_for_hba_online(base_vha); if (ret != 0) { ql_log(0U, base_vha, 36866, "The device failed to resume I/O from slot/link_reset.\n"); } else { } pci_cleanup_aer_uncorrect_error_status(pdev); ha->flags.eeh_busy = 0U; return; } } static void qla83xx_disable_laser(scsi_qla_host_t *vha ) { uint32_t reg ; uint32_t data ; uint32_t fn ; struct qla_hw_data *ha ; struct device_reg_24xx *isp_reg ; unsigned int tmp ; { ha = vha->hw; isp_reg = & (ha->iobase)->isp24; ql_dbg(1073741824U, vha, 75, "Disabling Laser for hba: %p\n", vha); tmp = readl((void const volatile *)(& isp_reg->ctrl_status)); fn = tmp & 61440U; fn = fn >> 12; if ((int )fn & 1) { reg = 2102096U; } else { reg = 2102080U; } data = 25166208U; qla83xx_wr_reg(vha, reg, data); return; } } static struct pci_error_handlers const qla2xxx_err_handler = {(pci_ers_result_t (*)(struct pci_dev * , enum pci_channel_state ))(& qla2xxx_pci_error_detected), & qla2xxx_pci_mmio_enabled, 0, & qla2xxx_pci_slot_reset, 0, & qla2xxx_pci_resume}; static struct pci_device_id qla2xxx_pci_tbl[22U] = { {4215U, 8448U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 8704U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 8960U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 8978U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 8994U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 25362U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 25378U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 9250U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 9266U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 33842U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 21538U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 21554U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 9522U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 8241U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 32769U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 32801U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 32817U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 61441U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 32836U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 8305U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 8817U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {0U, 0U, 0U, 0U, 0U, 0U, 0UL}}; struct pci_device_id const __mod_pci__qla2xxx_pci_tbl_device_table[22U] ; static struct pci_driver qla2xxx_pci_driver = {{0, 0}, "qla2xxx", (struct pci_device_id const *)(& qla2xxx_pci_tbl), & qla2x00_probe_one, & qla2x00_remove_one, 0, 0, 0, 0, & qla2x00_shutdown, 0, & qla2xxx_err_handler, {0, 0, & __this_module, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{{{{{0}}, 0U, 0U, 0, {0, {0, 0}, 0, 0, 0UL}}}}, {0, 0}}}; static struct file_operations const apidev_fops = {& __this_module, & noop_llseek, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int qla2x00_module_init(void) { int ret ; { ret = 0; srb_cachep = kmem_cache_create("qla2xxx_srbs", 376UL, 0UL, 8192UL, (void (*)(void * ))0); if ((unsigned long )srb_cachep == (unsigned long )((struct kmem_cache *)0)) { ql_log(0U, (scsi_qla_host_t *)0, 1, "Unable to allocate SRB cache...Failing load!.\n"); return (-12); } else { } ret = qlt_init(); if (ret < 0) { kmem_cache_destroy(srb_cachep); return (ret); } else if (ret > 0) { qla2xxx_transport_functions.disable_target_scan = 1U; qla2xxx_transport_vport_functions.disable_target_scan = 1U; } else { } strcpy((char *)(& qla2x00_version_str), "8.07.00.18-k"); if (ql2xextended_error_logging != 0) { strcat((char *)(& qla2x00_version_str), "-debug"); } else { } qla2xxx_transport_template = fc_attach_transport(& qla2xxx_transport_functions); if ((unsigned long )qla2xxx_transport_template == (unsigned long )((struct scsi_transport_template *)0)) { kmem_cache_destroy(srb_cachep); ql_log(0U, (scsi_qla_host_t *)0, 2, "fc_attach_transport failed...Failing load!.\n"); qlt_exit(); return (-19); } else { } apidev_major = ldv_register_chrdev_27(0U, "ql2xapidev", & apidev_fops); if (apidev_major < 0) { ql_log(0U, (scsi_qla_host_t *)0, 3, "Unable to register char device %s.\n", (char *)"ql2xapidev"); } else { } qla2xxx_transport_vport_template = fc_attach_transport(& qla2xxx_transport_vport_functions); if ((unsigned long )qla2xxx_transport_vport_template == (unsigned long )((struct scsi_transport_template *)0)) { kmem_cache_destroy(srb_cachep); qlt_exit(); fc_release_transport(qla2xxx_transport_template); ql_log(0U, (scsi_qla_host_t *)0, 4, "fc_attach_transport vport failed...Failing load!.\n"); return (-19); } else { } ql_log(2U, (scsi_qla_host_t *)0, 5, "QLogic Fibre Channel HBA Driver: %s.\n", (char *)(& qla2x00_version_str)); ret = ldv___pci_register_driver_28(& qla2xxx_pci_driver, & __this_module, "qla2xxx"); if (ret != 0) { kmem_cache_destroy(srb_cachep); qlt_exit(); fc_release_transport(qla2xxx_transport_template); fc_release_transport(qla2xxx_transport_vport_template); ql_log(0U, (scsi_qla_host_t *)0, 6, "pci_register_driver failed...ret=%d Failing load!.\n", ret); } else { } return (ret); } } static void qla2x00_module_exit(void) { { ldv_unregister_chrdev_29((unsigned int )apidev_major, "ql2xapidev"); ldv_pci_unregister_driver_30(& qla2xxx_pci_driver); qla2x00_release_firmware(); kmem_cache_destroy(srb_cachep); qlt_exit(); if ((unsigned long )ctx_cachep != (unsigned long )((struct kmem_cache *)0)) { kmem_cache_destroy(ctx_cachep); } else { } fc_release_transport(qla2xxx_transport_template); fc_release_transport(qla2xxx_transport_vport_template); return; } } extern int ldv_release_85(void) ; extern int ldv_probe_78(void) ; extern int ldv_release_86(void) ; extern int ldv_release_77(void) ; extern int ldv_release_80(void) ; extern int ldv_suspend_77(void) ; extern int ldv_probe_82(void) ; extern int ldv_probe_81(void) ; int ldv_retval_2 ; extern int ldv_release_87(void) ; extern int ldv_probe_87(void) ; int ldv_retval_1 ; extern void ldv_initialize(void) ; extern int ldv_release_79(void) ; extern int ldv_probe_77(void) ; extern int ldv_probe_84(void) ; extern int ldv_probe_79(void) ; extern int ldv_probe_85(void) ; extern int ldv_release_84(void) ; extern int ldv_release_81(void) ; extern int ldv_release_78(void) ; extern int ldv_probe_86(void) ; extern int ldv_release_83(void) ; extern int ldv_probe_83(void) ; extern int ldv_open_75(void) ; extern int ldv_probe_80(void) ; extern int ldv_release_75(void) ; void ldv_check_final_state(void) ; int ldv_retval_3 ; extern int ldv_release_82(void) ; void activate_work_5(struct work_struct *work , int state ) { { if (ldv_work_5_0 == 0) { ldv_work_struct_5_0 = work; ldv_work_5_0 = state; return; } else { } if (ldv_work_5_1 == 0) { ldv_work_struct_5_1 = work; ldv_work_5_1 = state; return; } else { } if (ldv_work_5_2 == 0) { ldv_work_struct_5_2 = work; ldv_work_5_2 = state; return; } else { } if (ldv_work_5_3 == 0) { ldv_work_struct_5_3 = work; ldv_work_5_3 = state; return; } else { } return; } } void choose_timer_13(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_13 = 2; return; } } void ldv_initialize_isp_operations_85(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(1360UL); qla24xx_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_init_zalloc(12288UL); qla24xx_isp_ops_group1 = (struct qla_hw_data *)tmp___0; tmp___1 = ldv_init_zalloc(136UL); qla24xx_isp_ops_group2 = (struct fc_port *)tmp___1; return; } } void ldv_initialize_isp_operations_78(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(1360UL); qla27xx_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_init_zalloc(12288UL); qla27xx_isp_ops_group1 = (struct qla_hw_data *)tmp___0; tmp___1 = ldv_init_zalloc(136UL); qla27xx_isp_ops_group2 = (struct fc_port *)tmp___1; return; } } void work_init_5(void) { { ldv_work_5_0 = 0; ldv_work_5_1 = 0; ldv_work_5_2 = 0; ldv_work_5_3 = 0; return; } } void call_and_disable_all_4(int state ) { { if (ldv_work_4_0 == state) { call_and_disable_work_4(ldv_work_struct_4_0); } else { } if (ldv_work_4_1 == state) { call_and_disable_work_4(ldv_work_struct_4_1); } else { } if (ldv_work_4_2 == state) { call_and_disable_work_4(ldv_work_struct_4_2); } else { } if (ldv_work_4_3 == state) { call_and_disable_work_4(ldv_work_struct_4_3); } else { } return; } } void activate_work_1(struct work_struct *work , int state ) { { if (ldv_work_1_0 == 0) { ldv_work_struct_1_0 = work; ldv_work_1_0 = state; return; } else { } if (ldv_work_1_1 == 0) { ldv_work_struct_1_1 = work; ldv_work_1_1 = state; return; } else { } if (ldv_work_1_2 == 0) { ldv_work_struct_1_2 = work; ldv_work_1_2 = state; return; } else { } if (ldv_work_1_3 == 0) { ldv_work_struct_1_3 = work; ldv_work_1_3 = state; return; } else { } return; } } void ldv_initialize_isp_operations_81(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(1360UL); qla8044_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_init_zalloc(12288UL); qla8044_isp_ops_group1 = (struct qla_hw_data *)tmp___0; tmp___1 = ldv_init_zalloc(136UL); qla8044_isp_ops_group2 = (struct fc_port *)tmp___1; return; } } void call_and_disable_work_3(struct work_struct *work ) { { if ((ldv_work_3_0 == 2 || ldv_work_3_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_0) { qla83xx_nic_core_reset_work(work); ldv_work_3_0 = 1; return; } else { } if ((ldv_work_3_1 == 2 || ldv_work_3_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_1) { qla83xx_nic_core_reset_work(work); ldv_work_3_1 = 1; return; } else { } if ((ldv_work_3_2 == 2 || ldv_work_3_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_2) { qla83xx_nic_core_reset_work(work); ldv_work_3_2 = 1; return; } else { } if ((ldv_work_3_3 == 2 || ldv_work_3_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_3) { qla83xx_nic_core_reset_work(work); ldv_work_3_3 = 1; return; } else { } return; } } void disable_work_3(struct work_struct *work ) { { if ((ldv_work_3_0 == 3 || ldv_work_3_0 == 2) && (unsigned long )ldv_work_struct_3_0 == (unsigned long )work) { ldv_work_3_0 = 1; } else { } if ((ldv_work_3_1 == 3 || ldv_work_3_1 == 2) && (unsigned long )ldv_work_struct_3_1 == (unsigned long )work) { ldv_work_3_1 = 1; } else { } if ((ldv_work_3_2 == 3 || ldv_work_3_2 == 2) && (unsigned long )ldv_work_struct_3_2 == (unsigned long )work) { ldv_work_3_2 = 1; } else { } if ((ldv_work_3_3 == 3 || ldv_work_3_3 == 2) && (unsigned long )ldv_work_struct_3_3 == (unsigned long )work) { ldv_work_3_3 = 1; } else { } return; } } void ldv_initialize_isp_operations_82(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(1360UL); qla82xx_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_init_zalloc(12288UL); qla82xx_isp_ops_group1 = (struct qla_hw_data *)tmp___0; tmp___1 = ldv_init_zalloc(136UL); qla82xx_isp_ops_group2 = (struct fc_port *)tmp___1; return; } } void work_init_1(void) { { ldv_work_1_0 = 0; ldv_work_1_1 = 0; ldv_work_1_2 = 0; ldv_work_1_3 = 0; return; } } void invoke_work_4(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_4_0 == 2 || ldv_work_4_0 == 3) { ldv_work_4_0 = 4; qla83xx_idc_state_handler_work(ldv_work_struct_4_0); ldv_work_4_0 = 1; } else { } goto ldv_68006; case 1: ; if (ldv_work_4_1 == 2 || ldv_work_4_1 == 3) { ldv_work_4_1 = 4; qla83xx_idc_state_handler_work(ldv_work_struct_4_0); ldv_work_4_1 = 1; } else { } goto ldv_68006; case 2: ; if (ldv_work_4_2 == 2 || ldv_work_4_2 == 3) { ldv_work_4_2 = 4; qla83xx_idc_state_handler_work(ldv_work_struct_4_0); ldv_work_4_2 = 1; } else { } goto ldv_68006; case 3: ; if (ldv_work_4_3 == 2 || ldv_work_4_3 == 3) { ldv_work_4_3 = 4; qla83xx_idc_state_handler_work(ldv_work_struct_4_0); ldv_work_4_3 = 1; } else { } goto ldv_68006; default: ldv_stop(); } ldv_68006: ; return; } } void activate_pending_timer_13(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_13 == (unsigned long )timer) { if (ldv_timer_state_13 == 2 || pending_flag != 0) { ldv_timer_list_13 = timer; ldv_timer_list_13->data = data; ldv_timer_state_13 = 1; } else { } return; } else { } reg_timer_13(timer); ldv_timer_list_13->data = data; return; } } void ldv_pci_driver_76(void) { void *tmp ; { tmp = ldv_init_zalloc(2976UL); qla2xxx_pci_driver_group1 = (struct pci_dev *)tmp; return; } } void activate_work_2(struct work_struct *work , int state ) { { if (ldv_work_2_0 == 0) { ldv_work_struct_2_0 = work; ldv_work_2_0 = state; return; } else { } if (ldv_work_2_1 == 0) { ldv_work_struct_2_1 = work; ldv_work_2_1 = state; return; } else { } if (ldv_work_2_2 == 0) { ldv_work_struct_2_2 = work; ldv_work_2_2 = state; return; } else { } if (ldv_work_2_3 == 0) { ldv_work_struct_2_3 = work; ldv_work_2_3 = state; return; } else { } return; } } void invoke_work_5(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_5_0 == 2 || ldv_work_5_0 == 3) { ldv_work_5_0 = 4; qla83xx_nic_core_unrecoverable_work(ldv_work_struct_5_0); ldv_work_5_0 = 1; } else { } goto ldv_68029; case 1: ; if (ldv_work_5_1 == 2 || ldv_work_5_1 == 3) { ldv_work_5_1 = 4; qla83xx_nic_core_unrecoverable_work(ldv_work_struct_5_0); ldv_work_5_1 = 1; } else { } goto ldv_68029; case 2: ; if (ldv_work_5_2 == 2 || ldv_work_5_2 == 3) { ldv_work_5_2 = 4; qla83xx_nic_core_unrecoverable_work(ldv_work_struct_5_0); ldv_work_5_2 = 1; } else { } goto ldv_68029; case 3: ; if (ldv_work_5_3 == 2 || ldv_work_5_3 == 3) { ldv_work_5_3 = 4; qla83xx_nic_core_unrecoverable_work(ldv_work_struct_5_0); ldv_work_5_3 = 1; } else { } goto ldv_68029; default: ldv_stop(); } ldv_68029: ; return; } } void ldv_initialize_scsi_host_template_88(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = __VERIFIER_nondet_pointer(); qla2xxx_driver_template_group0 = (struct scsi_cmnd *)tmp; tmp___0 = ldv_init_zalloc(3816UL); qla2xxx_driver_template_group1 = (struct Scsi_Host *)tmp___0; tmp___1 = __VERIFIER_nondet_pointer(); qla2xxx_driver_template_group2 = (struct scsi_device *)tmp___1; return; } } void ldv_initialize_isp_operations_79(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(1360UL); qlafx00_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_init_zalloc(12288UL); qlafx00_isp_ops_group1 = (struct qla_hw_data *)tmp___0; tmp___1 = ldv_init_zalloc(136UL); qlafx00_isp_ops_group2 = (struct fc_port *)tmp___1; return; } } void ldv_initialize_isp_operations_83(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(1360UL); qla81xx_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_init_zalloc(12288UL); qla81xx_isp_ops_group1 = (struct qla_hw_data *)tmp___0; tmp___1 = ldv_init_zalloc(136UL); qla81xx_isp_ops_group2 = (struct fc_port *)tmp___1; return; } } void disable_work_4(struct work_struct *work ) { { if ((ldv_work_4_0 == 3 || ldv_work_4_0 == 2) && (unsigned long )ldv_work_struct_4_0 == (unsigned long )work) { ldv_work_4_0 = 1; } else { } if ((ldv_work_4_1 == 3 || ldv_work_4_1 == 2) && (unsigned long )ldv_work_struct_4_1 == (unsigned long )work) { ldv_work_4_1 = 1; } else { } if ((ldv_work_4_2 == 3 || ldv_work_4_2 == 2) && (unsigned long )ldv_work_struct_4_2 == (unsigned long )work) { ldv_work_4_2 = 1; } else { } if ((ldv_work_4_3 == 3 || ldv_work_4_3 == 2) && (unsigned long )ldv_work_struct_4_3 == (unsigned long )work) { ldv_work_4_3 = 1; } else { } return; } } void work_init_4(void) { { ldv_work_4_0 = 0; ldv_work_4_1 = 0; ldv_work_4_2 = 0; ldv_work_4_3 = 0; return; } } void invoke_work_1(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_1_0 == 2 || ldv_work_1_0 == 3) { ldv_work_1_0 = 4; qla2x00_disable_board_on_pci_error(ldv_work_struct_1_0); ldv_work_1_0 = 1; } else { } goto ldv_68055; case 1: ; if (ldv_work_1_1 == 2 || ldv_work_1_1 == 3) { ldv_work_1_1 = 4; qla2x00_disable_board_on_pci_error(ldv_work_struct_1_0); ldv_work_1_1 = 1; } else { } goto ldv_68055; case 2: ; if (ldv_work_1_2 == 2 || ldv_work_1_2 == 3) { ldv_work_1_2 = 4; qla2x00_disable_board_on_pci_error(ldv_work_struct_1_0); ldv_work_1_2 = 1; } else { } goto ldv_68055; case 3: ; if (ldv_work_1_3 == 2 || ldv_work_1_3 == 3) { ldv_work_1_3 = 4; qla2x00_disable_board_on_pci_error(ldv_work_struct_1_0); ldv_work_1_3 = 1; } else { } goto ldv_68055; default: ldv_stop(); } ldv_68055: ; return; } } void call_and_disable_all_3(int state ) { { if (ldv_work_3_0 == state) { call_and_disable_work_3(ldv_work_struct_3_0); } else { } if (ldv_work_3_1 == state) { call_and_disable_work_3(ldv_work_struct_3_1); } else { } if (ldv_work_3_2 == state) { call_and_disable_work_3(ldv_work_struct_3_2); } else { } if (ldv_work_3_3 == state) { call_and_disable_work_3(ldv_work_struct_3_3); } else { } return; } } void activate_pending_timer_14(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_14 == (unsigned long )timer) { if (ldv_timer_state_14 == 2 || pending_flag != 0) { ldv_timer_list_14 = timer; ldv_timer_list_14->data = data; ldv_timer_state_14 = 1; } else { } return; } else { } reg_timer_14(timer); ldv_timer_list_14->data = data; return; } } void call_and_disable_work_4(struct work_struct *work ) { { if ((ldv_work_4_0 == 2 || ldv_work_4_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_4_0) { qla83xx_idc_state_handler_work(work); ldv_work_4_0 = 1; return; } else { } if ((ldv_work_4_1 == 2 || ldv_work_4_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_4_1) { qla83xx_idc_state_handler_work(work); ldv_work_4_1 = 1; return; } else { } if ((ldv_work_4_2 == 2 || ldv_work_4_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_4_2) { qla83xx_idc_state_handler_work(work); ldv_work_4_2 = 1; return; } else { } if ((ldv_work_4_3 == 2 || ldv_work_4_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_4_3) { qla83xx_idc_state_handler_work(work); ldv_work_4_3 = 1; return; } else { } return; } } void work_init_3(void) { { ldv_work_3_0 = 0; ldv_work_3_1 = 0; ldv_work_3_2 = 0; ldv_work_3_3 = 0; return; } } void call_and_disable_work_1(struct work_struct *work ) { { if ((ldv_work_1_0 == 2 || ldv_work_1_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_1_0) { qla2x00_disable_board_on_pci_error(work); ldv_work_1_0 = 1; return; } else { } if ((ldv_work_1_1 == 2 || ldv_work_1_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_1_1) { qla2x00_disable_board_on_pci_error(work); ldv_work_1_1 = 1; return; } else { } if ((ldv_work_1_2 == 2 || ldv_work_1_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_1_2) { qla2x00_disable_board_on_pci_error(work); ldv_work_1_2 = 1; return; } else { } if ((ldv_work_1_3 == 2 || ldv_work_1_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_1_3) { qla2x00_disable_board_on_pci_error(work); ldv_work_1_3 = 1; return; } else { } return; } } void disable_suitable_timer_13(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_13) { ldv_timer_state_13 = 0; return; } else { } return; } } void ldv_file_operations_75(void) { void *tmp ; { apidev_fops_group1 = ldv_init_zalloc(1000UL); tmp = ldv_init_zalloc(504UL); apidev_fops_group2 = (struct file *)tmp; return; } } void call_and_disable_all_2(int state ) { { if (ldv_work_2_0 == state) { call_and_disable_work_2(ldv_work_struct_2_0); } else { } if (ldv_work_2_1 == state) { call_and_disable_work_2(ldv_work_struct_2_1); } else { } if (ldv_work_2_2 == state) { call_and_disable_work_2(ldv_work_struct_2_2); } else { } if (ldv_work_2_3 == state) { call_and_disable_work_2(ldv_work_struct_2_3); } else { } return; } } void activate_work_3(struct work_struct *work , int state ) { { if (ldv_work_3_0 == 0) { ldv_work_struct_3_0 = work; ldv_work_3_0 = state; return; } else { } if (ldv_work_3_1 == 0) { ldv_work_struct_3_1 = work; ldv_work_3_1 = state; return; } else { } if (ldv_work_3_2 == 0) { ldv_work_struct_3_2 = work; ldv_work_3_2 = state; return; } else { } if (ldv_work_3_3 == 0) { ldv_work_struct_3_3 = work; ldv_work_3_3 = state; return; } else { } return; } } void ldv_initialize_isp_operations_87(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(1360UL); qla2100_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_init_zalloc(12288UL); qla2100_isp_ops_group1 = (struct qla_hw_data *)tmp___0; tmp___1 = ldv_init_zalloc(136UL); qla2100_isp_ops_group2 = (struct fc_port *)tmp___1; return; } } void disable_work_5(struct work_struct *work ) { { if ((ldv_work_5_0 == 3 || ldv_work_5_0 == 2) && (unsigned long )ldv_work_struct_5_0 == (unsigned long )work) { ldv_work_5_0 = 1; } else { } if ((ldv_work_5_1 == 3 || ldv_work_5_1 == 2) && (unsigned long )ldv_work_struct_5_1 == (unsigned long )work) { ldv_work_5_1 = 1; } else { } if ((ldv_work_5_2 == 3 || ldv_work_5_2 == 2) && (unsigned long )ldv_work_struct_5_2 == (unsigned long )work) { ldv_work_5_2 = 1; } else { } if ((ldv_work_5_3 == 3 || ldv_work_5_3 == 2) && (unsigned long )ldv_work_struct_5_3 == (unsigned long )work) { ldv_work_5_3 = 1; } else { } return; } } void disable_work_1(struct work_struct *work ) { { if ((ldv_work_1_0 == 3 || ldv_work_1_0 == 2) && (unsigned long )ldv_work_struct_1_0 == (unsigned long )work) { ldv_work_1_0 = 1; } else { } if ((ldv_work_1_1 == 3 || ldv_work_1_1 == 2) && (unsigned long )ldv_work_struct_1_1 == (unsigned long )work) { ldv_work_1_1 = 1; } else { } if ((ldv_work_1_2 == 3 || ldv_work_1_2 == 2) && (unsigned long )ldv_work_struct_1_2 == (unsigned long )work) { ldv_work_1_2 = 1; } else { } if ((ldv_work_1_3 == 3 || ldv_work_1_3 == 2) && (unsigned long )ldv_work_struct_1_3 == (unsigned long )work) { ldv_work_1_3 = 1; } else { } return; } } void ldv_initialize_isp_operations_80(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(1360UL); qla83xx_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_init_zalloc(12288UL); qla83xx_isp_ops_group1 = (struct qla_hw_data *)tmp___0; tmp___1 = ldv_init_zalloc(136UL); qla83xx_isp_ops_group2 = (struct fc_port *)tmp___1; return; } } void call_and_disable_work_5(struct work_struct *work ) { { if ((ldv_work_5_0 == 2 || ldv_work_5_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_5_0) { qla83xx_nic_core_unrecoverable_work(work); ldv_work_5_0 = 1; return; } else { } if ((ldv_work_5_1 == 2 || ldv_work_5_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_5_1) { qla83xx_nic_core_unrecoverable_work(work); ldv_work_5_1 = 1; return; } else { } if ((ldv_work_5_2 == 2 || ldv_work_5_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_5_2) { qla83xx_nic_core_unrecoverable_work(work); ldv_work_5_2 = 1; return; } else { } if ((ldv_work_5_3 == 2 || ldv_work_5_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_5_3) { qla83xx_nic_core_unrecoverable_work(work); ldv_work_5_3 = 1; return; } else { } return; } } int reg_timer_14(struct timer_list *timer ) { { ldv_timer_list_14 = timer; ldv_timer_state_14 = 1; return (0); } } void invoke_work_2(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_2_0 == 2 || ldv_work_2_0 == 3) { ldv_work_2_0 = 4; qla83xx_service_idc_aen(ldv_work_struct_2_0); ldv_work_2_0 = 1; } else { } goto ldv_68120; case 1: ; if (ldv_work_2_1 == 2 || ldv_work_2_1 == 3) { ldv_work_2_1 = 4; qla83xx_service_idc_aen(ldv_work_struct_2_0); ldv_work_2_1 = 1; } else { } goto ldv_68120; case 2: ; if (ldv_work_2_2 == 2 || ldv_work_2_2 == 3) { ldv_work_2_2 = 4; qla83xx_service_idc_aen(ldv_work_struct_2_0); ldv_work_2_2 = 1; } else { } goto ldv_68120; case 3: ; if (ldv_work_2_3 == 2 || ldv_work_2_3 == 3) { ldv_work_2_3 = 4; qla83xx_service_idc_aen(ldv_work_struct_2_0); ldv_work_2_3 = 1; } else { } goto ldv_68120; default: ldv_stop(); } ldv_68120: ; return; } } void ldv_initialize_pci_error_handlers_77(void) { void *tmp ; { tmp = __VERIFIER_nondet_pointer(); qla2xxx_err_handler_group0 = (struct pci_dev *)tmp; return; } } void activate_work_4(struct work_struct *work , int state ) { { if (ldv_work_4_0 == 0) { ldv_work_struct_4_0 = work; ldv_work_4_0 = state; return; } else { } if (ldv_work_4_1 == 0) { ldv_work_struct_4_1 = work; ldv_work_4_1 = state; return; } else { } if (ldv_work_4_2 == 0) { ldv_work_struct_4_2 = work; ldv_work_4_2 = state; return; } else { } if (ldv_work_4_3 == 0) { ldv_work_struct_4_3 = work; ldv_work_4_3 = state; return; } else { } return; } } void ldv_initialize_isp_operations_84(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(1360UL); qla25xx_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_init_zalloc(12288UL); qla25xx_isp_ops_group1 = (struct qla_hw_data *)tmp___0; tmp___1 = ldv_init_zalloc(136UL); qla25xx_isp_ops_group2 = (struct fc_port *)tmp___1; return; } } void call_and_disable_all_5(int state ) { { if (ldv_work_5_0 == state) { call_and_disable_work_5(ldv_work_struct_5_0); } else { } if (ldv_work_5_1 == state) { call_and_disable_work_5(ldv_work_struct_5_1); } else { } if (ldv_work_5_2 == state) { call_and_disable_work_5(ldv_work_struct_5_2); } else { } if (ldv_work_5_3 == state) { call_and_disable_work_5(ldv_work_struct_5_3); } else { } return; } } void work_init_2(void) { { ldv_work_2_0 = 0; ldv_work_2_1 = 0; ldv_work_2_2 = 0; ldv_work_2_3 = 0; return; } } void disable_suitable_timer_14(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_14) { ldv_timer_state_14 = 0; return; } else { } return; } } void call_and_disable_all_1(int state ) { { if (ldv_work_1_0 == state) { call_and_disable_work_1(ldv_work_struct_1_0); } else { } if (ldv_work_1_1 == state) { call_and_disable_work_1(ldv_work_struct_1_1); } else { } if (ldv_work_1_2 == state) { call_and_disable_work_1(ldv_work_struct_1_2); } else { } if (ldv_work_1_3 == state) { call_and_disable_work_1(ldv_work_struct_1_3); } else { } return; } } void ldv_initialize_isp_operations_86(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(1360UL); qla2300_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_init_zalloc(12288UL); qla2300_isp_ops_group1 = (struct qla_hw_data *)tmp___0; tmp___1 = ldv_init_zalloc(136UL); qla2300_isp_ops_group2 = (struct fc_port *)tmp___1; return; } } void disable_work_2(struct work_struct *work ) { { if ((ldv_work_2_0 == 3 || ldv_work_2_0 == 2) && (unsigned long )ldv_work_struct_2_0 == (unsigned long )work) { ldv_work_2_0 = 1; } else { } if ((ldv_work_2_1 == 3 || ldv_work_2_1 == 2) && (unsigned long )ldv_work_struct_2_1 == (unsigned long )work) { ldv_work_2_1 = 1; } else { } if ((ldv_work_2_2 == 3 || ldv_work_2_2 == 2) && (unsigned long )ldv_work_struct_2_2 == (unsigned long )work) { ldv_work_2_2 = 1; } else { } if ((ldv_work_2_3 == 3 || ldv_work_2_3 == 2) && (unsigned long )ldv_work_struct_2_3 == (unsigned long )work) { ldv_work_2_3 = 1; } else { } return; } } void invoke_work_3(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_3_0 == 2 || ldv_work_3_0 == 3) { ldv_work_3_0 = 4; qla83xx_nic_core_reset_work(ldv_work_struct_3_0); ldv_work_3_0 = 1; } else { } goto ldv_68159; case 1: ; if (ldv_work_3_1 == 2 || ldv_work_3_1 == 3) { ldv_work_3_1 = 4; qla83xx_nic_core_reset_work(ldv_work_struct_3_0); ldv_work_3_1 = 1; } else { } goto ldv_68159; case 2: ; if (ldv_work_3_2 == 2 || ldv_work_3_2 == 3) { ldv_work_3_2 = 4; qla83xx_nic_core_reset_work(ldv_work_struct_3_0); ldv_work_3_2 = 1; } else { } goto ldv_68159; case 3: ; if (ldv_work_3_3 == 2 || ldv_work_3_3 == 3) { ldv_work_3_3 = 4; qla83xx_nic_core_reset_work(ldv_work_struct_3_0); ldv_work_3_3 = 1; } else { } goto ldv_68159; default: ldv_stop(); } ldv_68159: ; return; } } int reg_timer_13(struct timer_list *timer ) { { ldv_timer_list_13 = timer; ldv_timer_state_13 = 1; return (0); } } void choose_timer_14(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_14 = 2; return; } } void call_and_disable_work_2(struct work_struct *work ) { { if ((ldv_work_2_0 == 2 || ldv_work_2_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_0) { qla83xx_service_idc_aen(work); ldv_work_2_0 = 1; return; } else { } if ((ldv_work_2_1 == 2 || ldv_work_2_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_1) { qla83xx_service_idc_aen(work); ldv_work_2_1 = 1; return; } else { } if ((ldv_work_2_2 == 2 || ldv_work_2_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_2) { qla83xx_service_idc_aen(work); ldv_work_2_2 = 1; return; } else { } if ((ldv_work_2_3 == 2 || ldv_work_2_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_3) { qla83xx_service_idc_aen(work); ldv_work_2_3 = 1; return; } else { } return; } } void ldv_main_exported_31(void) ; void ldv_main_exported_67(void) ; void ldv_main_exported_33(void) ; void ldv_main_exported_32(void) ; void ldv_main_exported_63(void) ; void ldv_main_exported_71(void) ; void ldv_main_exported_70(void) ; void ldv_main_exported_68(void) ; void ldv_main_exported_72(void) ; void ldv_main_exported_44(void) ; void ldv_main_exported_55(void) ; void ldv_main_exported_74(void) ; void ldv_main_exported_57(void) ; void ldv_main_exported_40(void) ; void ldv_main_exported_61(void) ; void ldv_main_exported_69(void) ; void ldv_main_exported_59(void) ; void ldv_main_exported_49(void) ; void ldv_main_exported_35(void) ; void ldv_main_exported_53(void) ; void ldv_main_exported_48(void) ; void ldv_main_exported_42(void) ; void ldv_main_exported_46(void) ; void ldv_main_exported_65(void) ; void ldv_main_exported_50(void) ; void ldv_main_exported_39(void) ; void ldv_main_exported_64(void) ; void ldv_main_exported_36(void) ; void ldv_main_exported_51(void) ; void ldv_main_exported_41(void) ; void ldv_main_exported_58(void) ; void ldv_main_exported_47(void) ; void ldv_main_exported_38(void) ; void ldv_main_exported_52(void) ; void ldv_main_exported_60(void) ; void ldv_main_exported_34(void) ; void ldv_main_exported_56(void) ; void ldv_main_exported_73(void) ; void ldv_main_exported_66(void) ; void ldv_main_exported_45(void) ; void ldv_main_exported_37(void) ; void ldv_main_exported_43(void) ; void ldv_main_exported_62(void) ; void ldv_main_exported_54(void) ; int main(void) { uint8_t ldvarg18 ; uint64_t ldvarg32 ; srb_t *ldvarg23 ; void *tmp ; void *ldvarg43 ; void *tmp___0 ; int ldvarg42 ; uint8_t *ldvarg46 ; void *tmp___1 ; uint16_t ldvarg37 ; size_t ldvarg29 ; uint32_t ldvarg44 ; uint32_t ldvarg24 ; uint8_t ldvarg35 ; uint8_t ldvarg38 ; uint32_t ldvarg33 ; uint8_t ldvarg16 ; uint32_t ldvarg14 ; uint32_t ldvarg34 ; void *ldvarg28 ; void *tmp___2 ; srb_t *ldvarg39 ; void *tmp___3 ; uint8_t ldvarg20 ; int ldvarg31 ; uint64_t ldvarg41 ; uint32_t ldvarg13 ; uint8_t ldvarg36 ; int ldvarg40 ; uint32_t ldvarg45 ; char *ldvarg27 ; void *tmp___4 ; uint8_t *ldvarg26 ; void *tmp___5 ; uint16_t *ldvarg15 ; void *tmp___6 ; char *ldvarg30 ; void *tmp___7 ; uint32_t *ldvarg21 ; void *tmp___8 ; uint32_t ldvarg25 ; uint8_t ldvarg17 ; int ldvarg22 ; uint16_t ldvarg19 ; uint32_t ldvarg77 ; uint64_t ldvarg84 ; int ldvarg94 ; srb_t *ldvarg91 ; void *tmp___9 ; uint16_t *ldvarg67 ; void *tmp___10 ; uint16_t ldvarg71 ; uint8_t ldvarg72 ; uint8_t *ldvarg98 ; void *tmp___11 ; uint32_t ldvarg97 ; uint32_t ldvarg64 ; int ldvarg83 ; void *ldvarg95 ; void *tmp___12 ; uint32_t ldvarg85 ; uint8_t ldvarg69 ; uint64_t ldvarg93 ; uint16_t ldvarg89 ; uint32_t ldvarg60 ; void *ldvarg80 ; void *tmp___13 ; uint32_t ldvarg59 ; uint8_t ldvarg87 ; uint32_t ldvarg86 ; uint32_t ldvarg62 ; uint32_t ldvarg65 ; char *ldvarg79 ; void *tmp___14 ; uint8_t *ldvarg66 ; void *tmp___15 ; uint32_t ldvarg96 ; size_t ldvarg81 ; uint8_t *ldvarg63 ; void *tmp___16 ; uint32_t *ldvarg73 ; void *tmp___17 ; uint8_t ldvarg70 ; uint8_t ldvarg88 ; uint8_t *ldvarg78 ; void *tmp___18 ; uint8_t ldvarg68 ; uint8_t ldvarg90 ; uint32_t ldvarg61 ; int ldvarg92 ; int ldvarg74 ; uint32_t ldvarg76 ; char *ldvarg82 ; void *tmp___19 ; srb_t *ldvarg75 ; void *tmp___20 ; uint32_t ldvarg131 ; void *ldvarg134 ; void *tmp___21 ; int ldvarg146 ; uint8_t ldvarg141 ; char *ldvarg136 ; void *tmp___22 ; uint16_t ldvarg125 ; srb_t *ldvarg145 ; void *tmp___23 ; uint32_t ldvarg140 ; uint8_t *ldvarg152 ; void *tmp___24 ; uint8_t *ldvarg132 ; void *tmp___25 ; uint64_t ldvarg138 ; srb_t *ldvarg129 ; void *tmp___26 ; void *ldvarg149 ; void *tmp___27 ; uint8_t ldvarg142 ; uint16_t *ldvarg121 ; void *tmp___28 ; uint32_t ldvarg151 ; int ldvarg137 ; uint8_t ldvarg122 ; uint8_t ldvarg144 ; int ldvarg128 ; uint8_t ldvarg126 ; uint8_t ldvarg123 ; uint32_t ldvarg120 ; size_t ldvarg135 ; uint32_t ldvarg119 ; uint64_t ldvarg147 ; uint32_t *ldvarg127 ; void *tmp___29 ; uint8_t ldvarg124 ; char *ldvarg133 ; void *tmp___30 ; uint32_t ldvarg150 ; uint16_t ldvarg143 ; uint32_t ldvarg130 ; uint32_t ldvarg139 ; int ldvarg148 ; uint8_t *ldvarg178 ; void *tmp___31 ; uint32_t ldvarg157 ; void *ldvarg181 ; void *tmp___32 ; uint16_t ldvarg174 ; uint16_t ldvarg168 ; uint8_t ldvarg165 ; uint8_t ldvarg188 ; uint64_t ldvarg185 ; uint8_t ldvarg191 ; uint8_t ldvarg167 ; void *ldvarg196 ; void *tmp___33 ; uint32_t ldvarg197 ; uint32_t *ldvarg170 ; void *tmp___34 ; uint8_t ldvarg189 ; size_t ldvarg182 ; uint32_t ldvarg198 ; uint8_t *ldvarg163 ; void *tmp___35 ; uint8_t *ldvarg199 ; void *tmp___36 ; uint8_t ldvarg169 ; uint32_t ldvarg161 ; uint8_t *ldvarg160 ; void *tmp___37 ; uint32_t ldvarg158 ; cmd_entry_t *ldvarg175 ; void *tmp___38 ; int ldvarg195 ; uint32_t ldvarg187 ; srb_t *ldvarg172 ; void *tmp___39 ; int ldvarg184 ; uint32_t ldvarg186 ; uint16_t ldvarg190 ; uint32_t ldvarg177 ; int ldvarg193 ; uint32_t ldvarg156 ; uint8_t ldvarg166 ; srb_t *ldvarg192 ; void *tmp___40 ; uint16_t *ldvarg164 ; void *tmp___41 ; char *ldvarg183 ; void *tmp___42 ; uint32_t ldvarg176 ; uint64_t ldvarg194 ; srb_t *ldvarg173 ; void *tmp___43 ; char *ldvarg179 ; void *tmp___44 ; uint16_t ldvarg180 ; int ldvarg171 ; uint32_t ldvarg159 ; uint32_t ldvarg162 ; enum pci_channel_state ldvarg200 ; uint32_t ldvarg242 ; void *ldvarg237 ; void *tmp___45 ; uint8_t ldvarg225 ; uint8_t ldvarg247 ; uint32_t ldvarg233 ; char *ldvarg236 ; void *tmp___46 ; uint32_t ldvarg243 ; uint32_t ldvarg234 ; uint8_t ldvarg244 ; uint16_t ldvarg228 ; uint8_t ldvarg226 ; uint32_t ldvarg222 ; uint8_t *ldvarg255 ; void *tmp___47 ; srb_t *ldvarg232 ; void *tmp___48 ; uint32_t *ldvarg230 ; void *tmp___49 ; int ldvarg240 ; uint64_t ldvarg241 ; uint16_t ldvarg246 ; int ldvarg231 ; size_t ldvarg238 ; uint32_t ldvarg254 ; uint16_t *ldvarg224 ; void *tmp___50 ; srb_t *ldvarg248 ; void *tmp___51 ; uint32_t ldvarg223 ; uint8_t *ldvarg235 ; void *tmp___52 ; uint8_t ldvarg245 ; void *ldvarg252 ; void *tmp___53 ; uint8_t ldvarg229 ; uint8_t ldvarg227 ; int ldvarg251 ; uint64_t ldvarg250 ; int ldvarg249 ; char *ldvarg239 ; void *tmp___54 ; uint32_t ldvarg253 ; uint8_t ldvarg290 ; uint8_t ldvarg315 ; int ldvarg308 ; void *ldvarg305 ; void *tmp___55 ; uint8_t ldvarg313 ; char *ldvarg307 ; void *tmp___56 ; cmd_entry_t *ldvarg299 ; void *tmp___57 ; uint32_t ldvarg321 ; uint32_t ldvarg311 ; uint64_t ldvarg309 ; uint32_t ldvarg322 ; uint32_t ldvarg285 ; uint32_t ldvarg300 ; uint32_t ldvarg310 ; int ldvarg295 ; srb_t *ldvarg296 ; void *tmp___58 ; uint32_t ldvarg281 ; char *ldvarg303 ; void *tmp___59 ; uint8_t *ldvarg302 ; void *tmp___60 ; uint8_t *ldvarg284 ; void *tmp___61 ; uint32_t ldvarg280 ; int ldvarg317 ; uint16_t ldvarg304 ; uint8_t ldvarg312 ; uint64_t ldvarg318 ; uint8_t ldvarg293 ; uint16_t *ldvarg288 ; void *tmp___62 ; uint8_t ldvarg291 ; uint8_t ldvarg289 ; size_t ldvarg306 ; uint16_t ldvarg314 ; int ldvarg319 ; uint32_t ldvarg282 ; uint8_t *ldvarg323 ; void *tmp___63 ; uint32_t ldvarg301 ; uint8_t *ldvarg287 ; void *tmp___64 ; uint32_t ldvarg286 ; void *ldvarg320 ; void *tmp___65 ; srb_t *ldvarg297 ; void *tmp___66 ; uint16_t ldvarg292 ; uint32_t ldvarg283 ; uint16_t ldvarg298 ; srb_t *ldvarg316 ; void *tmp___67 ; uint32_t *ldvarg294 ; void *tmp___68 ; struct pci_device_id *ldvarg324 ; void *tmp___69 ; int ldvarg349 ; unsigned long ldvarg350 ; void *ldvarg372 ; void *tmp___70 ; uint8_t *ldvarg390 ; void *tmp___71 ; uint32_t ldvarg389 ; uint32_t ldvarg357 ; uint16_t ldvarg381 ; uint32_t ldvarg352 ; uint8_t ldvarg380 ; uint16_t ldvarg363 ; uint32_t ldvarg369 ; srb_t *ldvarg383 ; void *tmp___72 ; uint8_t ldvarg362 ; uint8_t ldvarg379 ; uint32_t *ldvarg365 ; void *tmp___73 ; uint8_t ldvarg360 ; uint8_t ldvarg364 ; uint64_t ldvarg376 ; uint32_t ldvarg377 ; void *ldvarg387 ; void *tmp___74 ; int ldvarg386 ; char *ldvarg374 ; void *tmp___75 ; uint8_t *ldvarg370 ; void *tmp___76 ; uint8_t ldvarg361 ; uint32_t ldvarg354 ; int ldvarg375 ; int ldvarg384 ; uint8_t *ldvarg358 ; void *tmp___77 ; uint32_t ldvarg351 ; srb_t *ldvarg367 ; void *tmp___78 ; uint32_t ldvarg378 ; uint8_t ldvarg382 ; uint16_t *ldvarg359 ; void *tmp___79 ; uint32_t ldvarg368 ; uint32_t ldvarg353 ; size_t ldvarg373 ; uint32_t ldvarg388 ; uint64_t ldvarg385 ; char *ldvarg371 ; void *tmp___80 ; int ldvarg366 ; uint8_t *ldvarg355 ; void *tmp___81 ; uint32_t ldvarg356 ; uint8_t *ldvarg404 ; void *tmp___82 ; uint32_t ldvarg412 ; uint16_t ldvarg397 ; uint8_t ldvarg396 ; uint8_t ldvarg394 ; char *ldvarg408 ; void *tmp___83 ; uint32_t ldvarg403 ; uint8_t *ldvarg424 ; void *tmp___84 ; uint8_t ldvarg416 ; uint8_t ldvarg395 ; uint8_t ldvarg398 ; uint32_t ldvarg423 ; uint16_t ldvarg415 ; int ldvarg418 ; uint32_t ldvarg422 ; size_t ldvarg407 ; uint16_t *ldvarg393 ; void *tmp___85 ; char *ldvarg405 ; void *tmp___86 ; int ldvarg409 ; uint32_t *ldvarg399 ; void *tmp___87 ; void *ldvarg406 ; void *tmp___88 ; uint32_t ldvarg392 ; uint64_t ldvarg419 ; int ldvarg420 ; srb_t *ldvarg401 ; void *tmp___89 ; void *ldvarg421 ; void *tmp___90 ; srb_t *ldvarg417 ; void *tmp___91 ; uint64_t ldvarg410 ; uint32_t ldvarg402 ; int ldvarg400 ; uint32_t ldvarg391 ; uint8_t ldvarg413 ; uint32_t ldvarg411 ; uint8_t ldvarg414 ; loff_t ldvarg426 ; int ldvarg425 ; void *ldvarg469 ; void *tmp___92 ; uint32_t ldvarg448 ; void *ldvarg458 ; void *tmp___93 ; uint32_t ldvarg463 ; srb_t *ldvarg465 ; void *tmp___94 ; size_t ldvarg459 ; uint32_t ldvarg464 ; uint8_t *ldvarg472 ; void *tmp___95 ; int ldvarg461 ; uint8_t *ldvarg449 ; void *tmp___96 ; uint8_t *ldvarg452 ; void *tmp___97 ; char *ldvarg457 ; void *tmp___98 ; uint32_t ldvarg447 ; uint32_t ldvarg450 ; uint64_t ldvarg462 ; uint32_t ldvarg446 ; char *ldvarg460 ; void *tmp___99 ; uint32_t ldvarg471 ; int ldvarg468 ; uint32_t ldvarg451 ; uint32_t ldvarg454 ; uint32_t ldvarg455 ; uint64_t ldvarg467 ; uint8_t *ldvarg456 ; void *tmp___100 ; srb_t *ldvarg453 ; void *tmp___101 ; uint32_t ldvarg470 ; uint32_t ldvarg445 ; int ldvarg466 ; uint16_t *ldvarg487 ; void *tmp___102 ; void *ldvarg500 ; void *tmp___103 ; uint32_t ldvarg479 ; uint8_t *ldvarg518 ; void *tmp___104 ; uint32_t ldvarg485 ; int ldvarg512 ; uint8_t ldvarg488 ; void *ldvarg515 ; void *tmp___105 ; uint32_t ldvarg506 ; size_t ldvarg501 ; uint16_t ldvarg509 ; int ldvarg514 ; uint32_t ldvarg496 ; uint8_t ldvarg507 ; uint8_t *ldvarg483 ; void *tmp___106 ; uint32_t ldvarg484 ; uint64_t ldvarg504 ; uint8_t ldvarg490 ; char *ldvarg499 ; void *tmp___107 ; uint8_t *ldvarg498 ; void *tmp___108 ; uint32_t ldvarg480 ; uint32_t *ldvarg493 ; void *tmp___109 ; uint32_t ldvarg516 ; uint8_t ldvarg489 ; uint8_t ldvarg508 ; uint32_t ldvarg497 ; char *ldvarg502 ; void *tmp___110 ; uint8_t ldvarg510 ; uint32_t ldvarg517 ; uint32_t ldvarg482 ; srb_t *ldvarg495 ; void *tmp___111 ; uint16_t ldvarg491 ; int ldvarg503 ; int ldvarg494 ; uint8_t *ldvarg486 ; void *tmp___112 ; srb_t *ldvarg511 ; void *tmp___113 ; uint32_t ldvarg481 ; uint8_t ldvarg492 ; uint64_t ldvarg513 ; uint32_t ldvarg505 ; int tmp___114 ; int tmp___115 ; int tmp___116 ; int tmp___117 ; int tmp___118 ; int tmp___119 ; int tmp___120 ; int tmp___121 ; int tmp___122 ; int tmp___123 ; int tmp___124 ; int tmp___125 ; int tmp___126 ; int tmp___127 ; int tmp___128 ; int tmp___129 ; { tmp = ldv_init_zalloc(376UL); ldvarg23 = (srb_t *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg43 = tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg46 = (uint8_t *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg28 = tmp___2; tmp___3 = ldv_init_zalloc(376UL); ldvarg39 = (srb_t *)tmp___3; tmp___4 = ldv_init_zalloc(1UL); ldvarg27 = (char *)tmp___4; tmp___5 = ldv_init_zalloc(1UL); ldvarg26 = (uint8_t *)tmp___5; tmp___6 = ldv_init_zalloc(2UL); ldvarg15 = (uint16_t *)tmp___6; tmp___7 = ldv_init_zalloc(1UL); ldvarg30 = (char *)tmp___7; tmp___8 = ldv_init_zalloc(4UL); ldvarg21 = (uint32_t *)tmp___8; tmp___9 = ldv_init_zalloc(376UL); ldvarg91 = (srb_t *)tmp___9; tmp___10 = ldv_init_zalloc(2UL); ldvarg67 = (uint16_t *)tmp___10; tmp___11 = ldv_init_zalloc(1UL); ldvarg98 = (uint8_t *)tmp___11; tmp___12 = ldv_init_zalloc(1UL); ldvarg95 = tmp___12; tmp___13 = ldv_init_zalloc(1UL); ldvarg80 = tmp___13; tmp___14 = ldv_init_zalloc(1UL); ldvarg79 = (char *)tmp___14; tmp___15 = ldv_init_zalloc(1UL); ldvarg66 = (uint8_t *)tmp___15; tmp___16 = ldv_init_zalloc(1UL); ldvarg63 = (uint8_t *)tmp___16; tmp___17 = ldv_init_zalloc(4UL); ldvarg73 = (uint32_t *)tmp___17; tmp___18 = ldv_init_zalloc(1UL); ldvarg78 = (uint8_t *)tmp___18; tmp___19 = ldv_init_zalloc(1UL); ldvarg82 = (char *)tmp___19; tmp___20 = ldv_init_zalloc(376UL); ldvarg75 = (srb_t *)tmp___20; tmp___21 = ldv_init_zalloc(1UL); ldvarg134 = tmp___21; tmp___22 = ldv_init_zalloc(1UL); ldvarg136 = (char *)tmp___22; tmp___23 = ldv_init_zalloc(376UL); ldvarg145 = (srb_t *)tmp___23; tmp___24 = ldv_init_zalloc(1UL); ldvarg152 = (uint8_t *)tmp___24; tmp___25 = ldv_init_zalloc(1UL); ldvarg132 = (uint8_t *)tmp___25; tmp___26 = ldv_init_zalloc(376UL); ldvarg129 = (srb_t *)tmp___26; tmp___27 = ldv_init_zalloc(1UL); ldvarg149 = tmp___27; tmp___28 = ldv_init_zalloc(2UL); ldvarg121 = (uint16_t *)tmp___28; tmp___29 = ldv_init_zalloc(4UL); ldvarg127 = (uint32_t *)tmp___29; tmp___30 = ldv_init_zalloc(1UL); ldvarg133 = (char *)tmp___30; tmp___31 = ldv_init_zalloc(1UL); ldvarg178 = (uint8_t *)tmp___31; tmp___32 = ldv_init_zalloc(1UL); ldvarg181 = tmp___32; tmp___33 = ldv_init_zalloc(1UL); ldvarg196 = tmp___33; tmp___34 = ldv_init_zalloc(4UL); ldvarg170 = (uint32_t *)tmp___34; tmp___35 = ldv_init_zalloc(1UL); ldvarg163 = (uint8_t *)tmp___35; tmp___36 = ldv_init_zalloc(1UL); ldvarg199 = (uint8_t *)tmp___36; tmp___37 = ldv_init_zalloc(1UL); ldvarg160 = (uint8_t *)tmp___37; tmp___38 = ldv_init_zalloc(64UL); ldvarg175 = (cmd_entry_t *)tmp___38; tmp___39 = ldv_init_zalloc(376UL); ldvarg172 = (srb_t *)tmp___39; tmp___40 = ldv_init_zalloc(376UL); ldvarg192 = (srb_t *)tmp___40; tmp___41 = ldv_init_zalloc(2UL); ldvarg164 = (uint16_t *)tmp___41; tmp___42 = ldv_init_zalloc(1UL); ldvarg183 = (char *)tmp___42; tmp___43 = ldv_init_zalloc(376UL); ldvarg173 = (srb_t *)tmp___43; tmp___44 = ldv_init_zalloc(1UL); ldvarg179 = (char *)tmp___44; tmp___45 = ldv_init_zalloc(1UL); ldvarg237 = tmp___45; tmp___46 = ldv_init_zalloc(1UL); ldvarg236 = (char *)tmp___46; tmp___47 = ldv_init_zalloc(1UL); ldvarg255 = (uint8_t *)tmp___47; tmp___48 = ldv_init_zalloc(376UL); ldvarg232 = (srb_t *)tmp___48; tmp___49 = ldv_init_zalloc(4UL); ldvarg230 = (uint32_t *)tmp___49; tmp___50 = ldv_init_zalloc(2UL); ldvarg224 = (uint16_t *)tmp___50; tmp___51 = ldv_init_zalloc(376UL); ldvarg248 = (srb_t *)tmp___51; tmp___52 = ldv_init_zalloc(1UL); ldvarg235 = (uint8_t *)tmp___52; tmp___53 = ldv_init_zalloc(1UL); ldvarg252 = tmp___53; tmp___54 = ldv_init_zalloc(1UL); ldvarg239 = (char *)tmp___54; tmp___55 = ldv_init_zalloc(1UL); ldvarg305 = tmp___55; tmp___56 = ldv_init_zalloc(1UL); ldvarg307 = (char *)tmp___56; tmp___57 = ldv_init_zalloc(64UL); ldvarg299 = (cmd_entry_t *)tmp___57; tmp___58 = ldv_init_zalloc(376UL); ldvarg296 = (srb_t *)tmp___58; tmp___59 = ldv_init_zalloc(1UL); ldvarg303 = (char *)tmp___59; tmp___60 = ldv_init_zalloc(1UL); ldvarg302 = (uint8_t *)tmp___60; tmp___61 = ldv_init_zalloc(1UL); ldvarg284 = (uint8_t *)tmp___61; tmp___62 = ldv_init_zalloc(2UL); ldvarg288 = (uint16_t *)tmp___62; tmp___63 = ldv_init_zalloc(1UL); ldvarg323 = (uint8_t *)tmp___63; tmp___64 = ldv_init_zalloc(1UL); ldvarg287 = (uint8_t *)tmp___64; tmp___65 = ldv_init_zalloc(1UL); ldvarg320 = tmp___65; tmp___66 = ldv_init_zalloc(376UL); ldvarg297 = (srb_t *)tmp___66; tmp___67 = ldv_init_zalloc(376UL); ldvarg316 = (srb_t *)tmp___67; tmp___68 = ldv_init_zalloc(4UL); ldvarg294 = (uint32_t *)tmp___68; tmp___69 = ldv_init_zalloc(32UL); ldvarg324 = (struct pci_device_id *)tmp___69; tmp___70 = ldv_init_zalloc(1UL); ldvarg372 = tmp___70; tmp___71 = ldv_init_zalloc(1UL); ldvarg390 = (uint8_t *)tmp___71; tmp___72 = ldv_init_zalloc(376UL); ldvarg383 = (srb_t *)tmp___72; tmp___73 = ldv_init_zalloc(4UL); ldvarg365 = (uint32_t *)tmp___73; tmp___74 = ldv_init_zalloc(1UL); ldvarg387 = tmp___74; tmp___75 = ldv_init_zalloc(1UL); ldvarg374 = (char *)tmp___75; tmp___76 = ldv_init_zalloc(1UL); ldvarg370 = (uint8_t *)tmp___76; tmp___77 = ldv_init_zalloc(1UL); ldvarg358 = (uint8_t *)tmp___77; tmp___78 = ldv_init_zalloc(376UL); ldvarg367 = (srb_t *)tmp___78; tmp___79 = ldv_init_zalloc(2UL); ldvarg359 = (uint16_t *)tmp___79; tmp___80 = ldv_init_zalloc(1UL); ldvarg371 = (char *)tmp___80; tmp___81 = ldv_init_zalloc(1UL); ldvarg355 = (uint8_t *)tmp___81; tmp___82 = ldv_init_zalloc(1UL); ldvarg404 = (uint8_t *)tmp___82; tmp___83 = ldv_init_zalloc(1UL); ldvarg408 = (char *)tmp___83; tmp___84 = ldv_init_zalloc(1UL); ldvarg424 = (uint8_t *)tmp___84; tmp___85 = ldv_init_zalloc(2UL); ldvarg393 = (uint16_t *)tmp___85; tmp___86 = ldv_init_zalloc(1UL); ldvarg405 = (char *)tmp___86; tmp___87 = ldv_init_zalloc(4UL); ldvarg399 = (uint32_t *)tmp___87; tmp___88 = ldv_init_zalloc(1UL); ldvarg406 = tmp___88; tmp___89 = ldv_init_zalloc(376UL); ldvarg401 = (srb_t *)tmp___89; tmp___90 = ldv_init_zalloc(1UL); ldvarg421 = tmp___90; tmp___91 = ldv_init_zalloc(376UL); ldvarg417 = (srb_t *)tmp___91; tmp___92 = ldv_init_zalloc(1UL); ldvarg469 = tmp___92; tmp___93 = ldv_init_zalloc(1UL); ldvarg458 = tmp___93; tmp___94 = ldv_init_zalloc(376UL); ldvarg465 = (srb_t *)tmp___94; tmp___95 = ldv_init_zalloc(1UL); ldvarg472 = (uint8_t *)tmp___95; tmp___96 = ldv_init_zalloc(1UL); ldvarg449 = (uint8_t *)tmp___96; tmp___97 = ldv_init_zalloc(1UL); ldvarg452 = (uint8_t *)tmp___97; tmp___98 = ldv_init_zalloc(1UL); ldvarg457 = (char *)tmp___98; tmp___99 = ldv_init_zalloc(1UL); ldvarg460 = (char *)tmp___99; tmp___100 = ldv_init_zalloc(1UL); ldvarg456 = (uint8_t *)tmp___100; tmp___101 = ldv_init_zalloc(376UL); ldvarg453 = (srb_t *)tmp___101; tmp___102 = ldv_init_zalloc(2UL); ldvarg487 = (uint16_t *)tmp___102; tmp___103 = ldv_init_zalloc(1UL); ldvarg500 = tmp___103; tmp___104 = ldv_init_zalloc(1UL); ldvarg518 = (uint8_t *)tmp___104; tmp___105 = ldv_init_zalloc(1UL); ldvarg515 = tmp___105; tmp___106 = ldv_init_zalloc(1UL); ldvarg483 = (uint8_t *)tmp___106; tmp___107 = ldv_init_zalloc(1UL); ldvarg499 = (char *)tmp___107; tmp___108 = ldv_init_zalloc(1UL); ldvarg498 = (uint8_t *)tmp___108; tmp___109 = ldv_init_zalloc(4UL); ldvarg493 = (uint32_t *)tmp___109; tmp___110 = ldv_init_zalloc(1UL); ldvarg502 = (char *)tmp___110; tmp___111 = ldv_init_zalloc(376UL); ldvarg495 = (srb_t *)tmp___111; tmp___112 = ldv_init_zalloc(1UL); ldvarg486 = (uint8_t *)tmp___112; tmp___113 = ldv_init_zalloc(376UL); ldvarg511 = (srb_t *)tmp___113; ldv_initialize(); ldv_memset((void *)(& ldvarg18), 0, 1UL); ldv_memset((void *)(& ldvarg32), 0, 8UL); ldv_memset((void *)(& ldvarg42), 0, 4UL); ldv_memset((void *)(& ldvarg37), 0, 2UL); ldv_memset((void *)(& ldvarg29), 0, 8UL); ldv_memset((void *)(& ldvarg44), 0, 4UL); ldv_memset((void *)(& ldvarg24), 0, 4UL); ldv_memset((void *)(& ldvarg35), 0, 1UL); ldv_memset((void *)(& ldvarg38), 0, 1UL); ldv_memset((void *)(& ldvarg33), 0, 4UL); ldv_memset((void *)(& ldvarg16), 0, 1UL); ldv_memset((void *)(& ldvarg14), 0, 4UL); ldv_memset((void *)(& ldvarg34), 0, 4UL); ldv_memset((void *)(& ldvarg20), 0, 1UL); ldv_memset((void *)(& ldvarg31), 0, 4UL); ldv_memset((void *)(& ldvarg41), 0, 8UL); ldv_memset((void *)(& ldvarg13), 0, 4UL); ldv_memset((void *)(& ldvarg36), 0, 1UL); ldv_memset((void *)(& ldvarg40), 0, 4UL); ldv_memset((void *)(& ldvarg45), 0, 4UL); ldv_memset((void *)(& ldvarg25), 0, 4UL); ldv_memset((void *)(& ldvarg17), 0, 1UL); ldv_memset((void *)(& ldvarg22), 0, 4UL); ldv_memset((void *)(& ldvarg19), 0, 2UL); ldv_memset((void *)(& ldvarg77), 0, 4UL); ldv_memset((void *)(& ldvarg84), 0, 8UL); ldv_memset((void *)(& ldvarg94), 0, 4UL); ldv_memset((void *)(& ldvarg71), 0, 2UL); ldv_memset((void *)(& ldvarg72), 0, 1UL); ldv_memset((void *)(& ldvarg97), 0, 4UL); ldv_memset((void *)(& ldvarg64), 0, 4UL); ldv_memset((void *)(& ldvarg83), 0, 4UL); ldv_memset((void *)(& ldvarg85), 0, 4UL); ldv_memset((void *)(& ldvarg69), 0, 1UL); ldv_memset((void *)(& ldvarg93), 0, 8UL); ldv_memset((void *)(& ldvarg89), 0, 2UL); ldv_memset((void *)(& ldvarg60), 0, 4UL); ldv_memset((void *)(& ldvarg59), 0, 4UL); ldv_memset((void *)(& ldvarg87), 0, 1UL); ldv_memset((void *)(& ldvarg86), 0, 4UL); ldv_memset((void *)(& ldvarg62), 0, 4UL); ldv_memset((void *)(& ldvarg65), 0, 4UL); ldv_memset((void *)(& ldvarg96), 0, 4UL); ldv_memset((void *)(& ldvarg81), 0, 8UL); ldv_memset((void *)(& ldvarg70), 0, 1UL); ldv_memset((void *)(& ldvarg88), 0, 1UL); ldv_memset((void *)(& ldvarg68), 0, 1UL); ldv_memset((void *)(& ldvarg90), 0, 1UL); ldv_memset((void *)(& ldvarg61), 0, 4UL); ldv_memset((void *)(& ldvarg92), 0, 4UL); ldv_memset((void *)(& ldvarg74), 0, 4UL); ldv_memset((void *)(& ldvarg76), 0, 4UL); ldv_memset((void *)(& ldvarg131), 0, 4UL); ldv_memset((void *)(& ldvarg146), 0, 4UL); ldv_memset((void *)(& ldvarg141), 0, 1UL); ldv_memset((void *)(& ldvarg125), 0, 2UL); ldv_memset((void *)(& ldvarg140), 0, 4UL); ldv_memset((void *)(& ldvarg138), 0, 8UL); ldv_memset((void *)(& ldvarg142), 0, 1UL); ldv_memset((void *)(& ldvarg151), 0, 4UL); ldv_memset((void *)(& ldvarg137), 0, 4UL); ldv_memset((void *)(& ldvarg122), 0, 1UL); ldv_memset((void *)(& ldvarg144), 0, 1UL); ldv_memset((void *)(& ldvarg128), 0, 4UL); ldv_memset((void *)(& ldvarg126), 0, 1UL); ldv_memset((void *)(& ldvarg123), 0, 1UL); ldv_memset((void *)(& ldvarg120), 0, 4UL); ldv_memset((void *)(& ldvarg135), 0, 8UL); ldv_memset((void *)(& ldvarg119), 0, 4UL); ldv_memset((void *)(& ldvarg147), 0, 8UL); ldv_memset((void *)(& ldvarg124), 0, 1UL); ldv_memset((void *)(& ldvarg150), 0, 4UL); ldv_memset((void *)(& ldvarg143), 0, 2UL); ldv_memset((void *)(& ldvarg130), 0, 4UL); ldv_memset((void *)(& ldvarg139), 0, 4UL); ldv_memset((void *)(& ldvarg148), 0, 4UL); ldv_memset((void *)(& ldvarg157), 0, 4UL); ldv_memset((void *)(& ldvarg174), 0, 2UL); ldv_memset((void *)(& ldvarg168), 0, 2UL); ldv_memset((void *)(& ldvarg165), 0, 1UL); ldv_memset((void *)(& ldvarg188), 0, 1UL); ldv_memset((void *)(& ldvarg185), 0, 8UL); ldv_memset((void *)(& ldvarg191), 0, 1UL); ldv_memset((void *)(& ldvarg167), 0, 1UL); ldv_memset((void *)(& ldvarg197), 0, 4UL); ldv_memset((void *)(& ldvarg189), 0, 1UL); ldv_memset((void *)(& ldvarg182), 0, 8UL); ldv_memset((void *)(& ldvarg198), 0, 4UL); ldv_memset((void *)(& ldvarg169), 0, 1UL); ldv_memset((void *)(& ldvarg161), 0, 4UL); ldv_memset((void *)(& ldvarg158), 0, 4UL); ldv_memset((void *)(& ldvarg195), 0, 4UL); ldv_memset((void *)(& ldvarg187), 0, 4UL); ldv_memset((void *)(& ldvarg184), 0, 4UL); ldv_memset((void *)(& ldvarg186), 0, 4UL); ldv_memset((void *)(& ldvarg190), 0, 2UL); ldv_memset((void *)(& ldvarg177), 0, 4UL); ldv_memset((void *)(& ldvarg193), 0, 4UL); ldv_memset((void *)(& ldvarg156), 0, 4UL); ldv_memset((void *)(& ldvarg166), 0, 1UL); ldv_memset((void *)(& ldvarg176), 0, 4UL); ldv_memset((void *)(& ldvarg194), 0, 8UL); ldv_memset((void *)(& ldvarg180), 0, 2UL); ldv_memset((void *)(& ldvarg171), 0, 4UL); ldv_memset((void *)(& ldvarg159), 0, 4UL); ldv_memset((void *)(& ldvarg162), 0, 4UL); ldv_memset((void *)(& ldvarg200), 0, 4UL); ldv_memset((void *)(& ldvarg242), 0, 4UL); ldv_memset((void *)(& ldvarg225), 0, 1UL); ldv_memset((void *)(& ldvarg247), 0, 1UL); ldv_memset((void *)(& ldvarg233), 0, 4UL); ldv_memset((void *)(& ldvarg243), 0, 4UL); ldv_memset((void *)(& ldvarg234), 0, 4UL); ldv_memset((void *)(& ldvarg244), 0, 1UL); ldv_memset((void *)(& ldvarg228), 0, 2UL); ldv_memset((void *)(& ldvarg226), 0, 1UL); ldv_memset((void *)(& ldvarg222), 0, 4UL); ldv_memset((void *)(& ldvarg240), 0, 4UL); ldv_memset((void *)(& ldvarg241), 0, 8UL); ldv_memset((void *)(& ldvarg246), 0, 2UL); ldv_memset((void *)(& ldvarg231), 0, 4UL); ldv_memset((void *)(& ldvarg238), 0, 8UL); ldv_memset((void *)(& ldvarg254), 0, 4UL); ldv_memset((void *)(& ldvarg223), 0, 4UL); ldv_memset((void *)(& ldvarg245), 0, 1UL); ldv_memset((void *)(& ldvarg229), 0, 1UL); ldv_memset((void *)(& ldvarg227), 0, 1UL); ldv_memset((void *)(& ldvarg251), 0, 4UL); ldv_memset((void *)(& ldvarg250), 0, 8UL); ldv_memset((void *)(& ldvarg249), 0, 4UL); ldv_memset((void *)(& ldvarg253), 0, 4UL); ldv_memset((void *)(& ldvarg290), 0, 1UL); ldv_memset((void *)(& ldvarg315), 0, 1UL); ldv_memset((void *)(& ldvarg308), 0, 4UL); ldv_memset((void *)(& ldvarg313), 0, 1UL); ldv_memset((void *)(& ldvarg321), 0, 4UL); ldv_memset((void *)(& ldvarg311), 0, 4UL); ldv_memset((void *)(& ldvarg309), 0, 8UL); ldv_memset((void *)(& ldvarg322), 0, 4UL); ldv_memset((void *)(& ldvarg285), 0, 4UL); ldv_memset((void *)(& ldvarg300), 0, 4UL); ldv_memset((void *)(& ldvarg310), 0, 4UL); ldv_memset((void *)(& ldvarg295), 0, 4UL); ldv_memset((void *)(& ldvarg281), 0, 4UL); ldv_memset((void *)(& ldvarg280), 0, 4UL); ldv_memset((void *)(& ldvarg317), 0, 4UL); ldv_memset((void *)(& ldvarg304), 0, 2UL); ldv_memset((void *)(& ldvarg312), 0, 1UL); ldv_memset((void *)(& ldvarg318), 0, 8UL); ldv_memset((void *)(& ldvarg293), 0, 1UL); ldv_memset((void *)(& ldvarg291), 0, 1UL); ldv_memset((void *)(& ldvarg289), 0, 1UL); ldv_memset((void *)(& ldvarg306), 0, 8UL); ldv_memset((void *)(& ldvarg314), 0, 2UL); ldv_memset((void *)(& ldvarg319), 0, 4UL); ldv_memset((void *)(& ldvarg282), 0, 4UL); ldv_memset((void *)(& ldvarg301), 0, 4UL); ldv_memset((void *)(& ldvarg286), 0, 4UL); ldv_memset((void *)(& ldvarg292), 0, 2UL); ldv_memset((void *)(& ldvarg283), 0, 4UL); ldv_memset((void *)(& ldvarg298), 0, 2UL); ldv_memset((void *)(& ldvarg349), 0, 4UL); ldv_memset((void *)(& ldvarg350), 0, 8UL); ldv_memset((void *)(& ldvarg389), 0, 4UL); ldv_memset((void *)(& ldvarg357), 0, 4UL); ldv_memset((void *)(& ldvarg381), 0, 2UL); ldv_memset((void *)(& ldvarg352), 0, 4UL); ldv_memset((void *)(& ldvarg380), 0, 1UL); ldv_memset((void *)(& ldvarg363), 0, 2UL); ldv_memset((void *)(& ldvarg369), 0, 4UL); ldv_memset((void *)(& ldvarg362), 0, 1UL); ldv_memset((void *)(& ldvarg379), 0, 1UL); ldv_memset((void *)(& ldvarg360), 0, 1UL); ldv_memset((void *)(& ldvarg364), 0, 1UL); ldv_memset((void *)(& ldvarg376), 0, 8UL); ldv_memset((void *)(& ldvarg377), 0, 4UL); ldv_memset((void *)(& ldvarg386), 0, 4UL); ldv_memset((void *)(& ldvarg361), 0, 1UL); ldv_memset((void *)(& ldvarg354), 0, 4UL); ldv_memset((void *)(& ldvarg375), 0, 4UL); ldv_memset((void *)(& ldvarg384), 0, 4UL); ldv_memset((void *)(& ldvarg351), 0, 4UL); ldv_memset((void *)(& ldvarg378), 0, 4UL); ldv_memset((void *)(& ldvarg382), 0, 1UL); ldv_memset((void *)(& ldvarg368), 0, 4UL); ldv_memset((void *)(& ldvarg353), 0, 4UL); ldv_memset((void *)(& ldvarg373), 0, 8UL); ldv_memset((void *)(& ldvarg388), 0, 4UL); ldv_memset((void *)(& ldvarg385), 0, 8UL); ldv_memset((void *)(& ldvarg366), 0, 4UL); ldv_memset((void *)(& ldvarg356), 0, 4UL); ldv_memset((void *)(& ldvarg412), 0, 4UL); ldv_memset((void *)(& ldvarg397), 0, 2UL); ldv_memset((void *)(& ldvarg396), 0, 1UL); ldv_memset((void *)(& ldvarg394), 0, 1UL); ldv_memset((void *)(& ldvarg403), 0, 4UL); ldv_memset((void *)(& ldvarg416), 0, 1UL); ldv_memset((void *)(& ldvarg395), 0, 1UL); ldv_memset((void *)(& ldvarg398), 0, 1UL); ldv_memset((void *)(& ldvarg423), 0, 4UL); ldv_memset((void *)(& ldvarg415), 0, 2UL); ldv_memset((void *)(& ldvarg418), 0, 4UL); ldv_memset((void *)(& ldvarg422), 0, 4UL); ldv_memset((void *)(& ldvarg407), 0, 8UL); ldv_memset((void *)(& ldvarg409), 0, 4UL); ldv_memset((void *)(& ldvarg392), 0, 4UL); ldv_memset((void *)(& ldvarg419), 0, 8UL); ldv_memset((void *)(& ldvarg420), 0, 4UL); ldv_memset((void *)(& ldvarg410), 0, 8UL); ldv_memset((void *)(& ldvarg402), 0, 4UL); ldv_memset((void *)(& ldvarg400), 0, 4UL); ldv_memset((void *)(& ldvarg391), 0, 4UL); ldv_memset((void *)(& ldvarg413), 0, 1UL); ldv_memset((void *)(& ldvarg411), 0, 4UL); ldv_memset((void *)(& ldvarg414), 0, 1UL); ldv_memset((void *)(& ldvarg426), 0, 8UL); ldv_memset((void *)(& ldvarg425), 0, 4UL); ldv_memset((void *)(& ldvarg448), 0, 4UL); ldv_memset((void *)(& ldvarg463), 0, 4UL); ldv_memset((void *)(& ldvarg459), 0, 8UL); ldv_memset((void *)(& ldvarg464), 0, 4UL); ldv_memset((void *)(& ldvarg461), 0, 4UL); ldv_memset((void *)(& ldvarg447), 0, 4UL); ldv_memset((void *)(& ldvarg450), 0, 4UL); ldv_memset((void *)(& ldvarg462), 0, 8UL); ldv_memset((void *)(& ldvarg446), 0, 4UL); ldv_memset((void *)(& ldvarg471), 0, 4UL); ldv_memset((void *)(& ldvarg468), 0, 4UL); ldv_memset((void *)(& ldvarg451), 0, 4UL); ldv_memset((void *)(& ldvarg454), 0, 4UL); ldv_memset((void *)(& ldvarg455), 0, 4UL); ldv_memset((void *)(& ldvarg467), 0, 8UL); ldv_memset((void *)(& ldvarg470), 0, 4UL); ldv_memset((void *)(& ldvarg445), 0, 4UL); ldv_memset((void *)(& ldvarg466), 0, 4UL); ldv_memset((void *)(& ldvarg479), 0, 4UL); ldv_memset((void *)(& ldvarg485), 0, 4UL); ldv_memset((void *)(& ldvarg512), 0, 4UL); ldv_memset((void *)(& ldvarg488), 0, 1UL); ldv_memset((void *)(& ldvarg506), 0, 4UL); ldv_memset((void *)(& ldvarg501), 0, 8UL); ldv_memset((void *)(& ldvarg509), 0, 2UL); ldv_memset((void *)(& ldvarg514), 0, 4UL); ldv_memset((void *)(& ldvarg496), 0, 4UL); ldv_memset((void *)(& ldvarg507), 0, 1UL); ldv_memset((void *)(& ldvarg484), 0, 4UL); ldv_memset((void *)(& ldvarg504), 0, 8UL); ldv_memset((void *)(& ldvarg490), 0, 1UL); ldv_memset((void *)(& ldvarg480), 0, 4UL); ldv_memset((void *)(& ldvarg516), 0, 4UL); ldv_memset((void *)(& ldvarg489), 0, 1UL); ldv_memset((void *)(& ldvarg508), 0, 1UL); ldv_memset((void *)(& ldvarg497), 0, 4UL); ldv_memset((void *)(& ldvarg510), 0, 1UL); ldv_memset((void *)(& ldvarg517), 0, 4UL); ldv_memset((void *)(& ldvarg482), 0, 4UL); ldv_memset((void *)(& ldvarg491), 0, 2UL); ldv_memset((void *)(& ldvarg503), 0, 4UL); ldv_memset((void *)(& ldvarg494), 0, 4UL); ldv_memset((void *)(& ldvarg481), 0, 4UL); ldv_memset((void *)(& ldvarg492), 0, 1UL); ldv_memset((void *)(& ldvarg513), 0, 8UL); ldv_memset((void *)(& ldvarg505), 0, 4UL); ldv_state_variable_33 = 0; ldv_state_variable_32 = 0; ldv_state_variable_63 = 0; ldv_state_variable_21 = 1; ldv_state_variable_71 = 0; work_init_7(); ldv_state_variable_7 = 1; ldv_state_variable_80 = 0; ldv_state_variable_26 = 1; ldv_state_variable_18 = 1; ldv_state_variable_72 = 0; ldv_state_variable_16 = 1; ldv_state_variable_44 = 0; ldv_state_variable_55 = 0; ldv_state_variable_84 = 0; ldv_state_variable_74 = 0; ldv_state_variable_27 = 1; ldv_state_variable_57 = 0; ldv_state_variable_61 = 0; ldv_state_variable_20 = 1; work_init_10(); ldv_state_variable_10 = 1; ldv_state_variable_31 = 0; ldv_state_variable_35 = 0; work_init_11(); ldv_state_variable_11 = 1; ldv_state_variable_78 = 0; ldv_state_variable_48 = 0; ldv_state_variable_87 = 0; ldv_state_variable_77 = 0; ldv_state_variable_65 = 0; ldv_state_variable_29 = 1; ldv_state_variable_50 = 0; ldv_state_variable_39 = 0; ldv_state_variable_64 = 0; ldv_state_variable_58 = 0; ldv_state_variable_41 = 0; work_init_12(); ldv_state_variable_12 = 1; ldv_state_variable_15 = 1; ldv_state_variable_81 = 0; ldv_state_variable_52 = 0; ldv_state_variable_60 = 0; ldv_state_variable_56 = 0; ldv_state_variable_73 = 0; ldv_state_variable_66 = 0; ldv_state_variable_45 = 0; ldv_state_variable_86 = 0; ldv_state_variable_76 = 0; ldv_state_variable_19 = 1; ldv_state_variable_62 = 0; ldv_state_variable_54 = 0; ldv_state_variable_67 = 0; ldv_state_variable_70 = 0; ldv_state_variable_68 = 0; work_init_2(); ldv_state_variable_2 = 1; ldv_state_variable_17 = 1; work_init_1(); ldv_state_variable_1 = 1; ldv_state_variable_88 = 0; ldv_state_variable_30 = 1; ldv_state_variable_82 = 0; ldv_state_variable_25 = 1; ldv_state_variable_28 = 1; ldv_state_variable_83 = 0; ldv_state_variable_75 = 0; ldv_state_variable_40 = 0; ldv_state_variable_14 = 1; ldv_state_variable_69 = 0; ldv_state_variable_59 = 0; ldv_state_variable_49 = 0; ldv_state_variable_24 = 1; ldv_state_variable_53 = 0; ldv_state_variable_79 = 0; ldv_state_variable_22 = 1; ldv_state_variable_42 = 0; ref_cnt = 0; ldv_state_variable_0 = 1; ldv_state_variable_46 = 0; ldv_state_variable_23 = 1; ldv_state_variable_13 = 1; work_init_6(); ldv_state_variable_6 = 1; ldv_state_variable_85 = 0; work_init_3(); ldv_state_variable_3 = 1; ldv_state_variable_36 = 0; work_init_9(); ldv_state_variable_9 = 1; ldv_state_variable_51 = 0; ldv_state_variable_47 = 0; work_init_8(); ldv_state_variable_8 = 1; ldv_state_variable_38 = 0; work_init_4(); ldv_state_variable_4 = 1; ldv_state_variable_34 = 0; ldv_state_variable_37 = 0; ldv_state_variable_43 = 0; work_init_5(); ldv_state_variable_5 = 1; ldv_69159: tmp___114 = __VERIFIER_nondet_int(); switch (tmp___114) { case 0: ; if (ldv_state_variable_33 != 0) { ldv_main_exported_33(); } else { } goto ldv_68677; case 1: ; if (ldv_state_variable_32 != 0) { ldv_main_exported_32(); } else { } goto ldv_68677; case 2: ; if (ldv_state_variable_63 != 0) { ldv_main_exported_63(); } else { } goto ldv_68677; case 3: ; goto ldv_68677; case 4: ; if (ldv_state_variable_71 != 0) { ldv_main_exported_71(); } else { } goto ldv_68677; case 5: ; goto ldv_68677; case 6: ; if (ldv_state_variable_80 != 0) { tmp___115 = __VERIFIER_nondet_int(); switch (tmp___115) { case 0: ; if (ldv_state_variable_80 == 1) { qla24xx_reset_adapter(qla83xx_isp_ops_group0); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_reset_adapter(qla83xx_isp_ops_group0); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 1: ; if (ldv_state_variable_80 == 2) { qla24xx_write_optrom_data(qla83xx_isp_ops_group0, ldvarg46, ldvarg45, ldvarg44); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 2: ; if (ldv_state_variable_80 == 1) { qla24xx_enable_intrs(qla83xx_isp_ops_group1); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_enable_intrs(qla83xx_isp_ops_group1); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 3: ; if (ldv_state_variable_80 == 1) { qla24xx_intr_handler(ldvarg42, ldvarg43); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_intr_handler(ldvarg42, ldvarg43); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 4: ; if (ldv_state_variable_80 == 1) { qla24xx_abort_target(qla83xx_isp_ops_group2, ldvarg41, ldvarg40); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_abort_target(qla83xx_isp_ops_group2, ldvarg41, ldvarg40); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 5: ; if (ldv_state_variable_80 == 1) { qla81xx_update_fw_options(qla83xx_isp_ops_group0); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla81xx_update_fw_options(qla83xx_isp_ops_group0); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 6: ; if (ldv_state_variable_80 == 1) { qla2x00_abort_isp(qla83xx_isp_ops_group0); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla2x00_abort_isp(qla83xx_isp_ops_group0); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 7: ; if (ldv_state_variable_80 == 1) { qla24xx_dif_start_scsi(ldvarg39); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_dif_start_scsi(ldvarg39); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 8: ; if (ldv_state_variable_80 == 1) { qla24xx_chip_diag(qla83xx_isp_ops_group0); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_chip_diag(qla83xx_isp_ops_group0); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 9: ; if (ldv_state_variable_80 == 1) { qla24xx_fabric_logout(qla83xx_isp_ops_group0, (int )ldvarg37, (int )ldvarg36, (int )ldvarg35, (int )ldvarg38); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_fabric_logout(qla83xx_isp_ops_group0, (int )ldvarg37, (int )ldvarg36, (int )ldvarg35, (int )ldvarg38); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 10: ; if (ldv_state_variable_80 == 1) { qla24xx_reset_chip(qla83xx_isp_ops_group0); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_reset_chip(qla83xx_isp_ops_group0); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 11: ; if (ldv_state_variable_80 == 1) { qla24xx_prep_ms_iocb(qla83xx_isp_ops_group0, ldvarg34, ldvarg33); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_prep_ms_iocb(qla83xx_isp_ops_group0, ldvarg34, ldvarg33); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 12: ; if (ldv_state_variable_80 == 1) { qla24xx_lun_reset(qla83xx_isp_ops_group2, ldvarg32, ldvarg31); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_lun_reset(qla83xx_isp_ops_group2, ldvarg32, ldvarg31); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 13: ; if (ldv_state_variable_80 == 1) { qla24xx_fw_version_str(qla83xx_isp_ops_group0, ldvarg30, ldvarg29); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_fw_version_str(qla83xx_isp_ops_group0, ldvarg30, ldvarg29); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 14: ; if (ldv_state_variable_80 == 1) { qla83xx_iospace_config(qla83xx_isp_ops_group1); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla83xx_iospace_config(qla83xx_isp_ops_group1); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 15: ; if (ldv_state_variable_80 == 1) { qla2x00_initialize_adapter(qla83xx_isp_ops_group0); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla2x00_initialize_adapter(qla83xx_isp_ops_group0); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 16: ; if (ldv_state_variable_80 == 1) { qla24xx_get_flash_version(qla83xx_isp_ops_group0, ldvarg28); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_get_flash_version(qla83xx_isp_ops_group0, ldvarg28); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 17: ; if (ldv_state_variable_80 == 1) { qla24xx_disable_intrs(qla83xx_isp_ops_group1); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_disable_intrs(qla83xx_isp_ops_group1); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 18: ; if (ldv_state_variable_80 == 1) { qla81xx_nvram_config(qla83xx_isp_ops_group0); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla81xx_nvram_config(qla83xx_isp_ops_group0); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 19: ; if (ldv_state_variable_80 == 1) { qla24xx_pci_info_str(qla83xx_isp_ops_group0, ldvarg27); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_pci_info_str(qla83xx_isp_ops_group0, ldvarg27); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 20: ; if (ldv_state_variable_80 == 2) { qla25xx_read_optrom_data(qla83xx_isp_ops_group0, ldvarg26, ldvarg25, ldvarg24); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 21: ; if (ldv_state_variable_80 == 1) { qla83xx_beacon_blink(qla83xx_isp_ops_group0); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla83xx_beacon_blink(qla83xx_isp_ops_group0); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 22: ; if (ldv_state_variable_80 == 1) { qla24xx_beacon_off(qla83xx_isp_ops_group0); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_beacon_off(qla83xx_isp_ops_group0); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 23: ; if (ldv_state_variable_80 == 1) { qla24xx_abort_command(ldvarg23); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_abort_command(ldvarg23); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 24: ; if (ldv_state_variable_80 == 1) { qla83xx_fw_dump(qla83xx_isp_ops_group0, ldvarg22); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla83xx_fw_dump(qla83xx_isp_ops_group0, ldvarg22); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 25: ; if (ldv_state_variable_80 == 1) { qla24xx_config_rings(qla83xx_isp_ops_group0); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_config_rings(qla83xx_isp_ops_group0); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 26: ; if (ldv_state_variable_80 == 1) { qla81xx_load_risc(qla83xx_isp_ops_group0, ldvarg21); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla81xx_load_risc(qla83xx_isp_ops_group0, ldvarg21); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 27: ; if (ldv_state_variable_80 == 1) { qla25xx_pci_config(qla83xx_isp_ops_group0); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla25xx_pci_config(qla83xx_isp_ops_group0); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 28: ; if (ldv_state_variable_80 == 1) { qla24xx_login_fabric(qla83xx_isp_ops_group0, (int )ldvarg19, (int )ldvarg17, (int )ldvarg16, (int )ldvarg20, ldvarg15, (int )ldvarg18); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_login_fabric(qla83xx_isp_ops_group0, (int )ldvarg19, (int )ldvarg17, (int )ldvarg16, (int )ldvarg20, ldvarg15, (int )ldvarg18); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 29: ; if (ldv_state_variable_80 == 1) { qla24xx_beacon_on(qla83xx_isp_ops_group0); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_beacon_on(qla83xx_isp_ops_group0); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 30: ; if (ldv_state_variable_80 == 1) { qla24xx_prep_ms_fdmi_iocb(qla83xx_isp_ops_group0, ldvarg14, ldvarg13); ldv_state_variable_80 = 1; } else { } if (ldv_state_variable_80 == 2) { qla24xx_prep_ms_fdmi_iocb(qla83xx_isp_ops_group0, ldvarg14, ldvarg13); ldv_state_variable_80 = 2; } else { } goto ldv_68685; case 31: ; if (ldv_state_variable_80 == 2) { ldv_release_80(); ldv_state_variable_80 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_68685; case 32: ; if (ldv_state_variable_80 == 1) { ldv_probe_80(); ldv_state_variable_80 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_68685; default: ldv_stop(); } ldv_68685: ; } else { } goto ldv_68677; case 7: ; goto ldv_68677; case 8: ; goto ldv_68677; case 9: ; if (ldv_state_variable_72 != 0) { ldv_main_exported_72(); } else { } goto ldv_68677; case 10: ; goto ldv_68677; case 11: ; if (ldv_state_variable_44 != 0) { ldv_main_exported_44(); } else { } goto ldv_68677; case 12: ; if (ldv_state_variable_55 != 0) { ldv_main_exported_55(); } else { } goto ldv_68677; case 13: ; if (ldv_state_variable_84 != 0) { tmp___116 = __VERIFIER_nondet_int(); switch (tmp___116) { case 0: ; if (ldv_state_variable_84 == 1) { qla24xx_reset_adapter(qla25xx_isp_ops_group0); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_reset_adapter(qla25xx_isp_ops_group0); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 1: ; if (ldv_state_variable_84 == 1) { qla24xx_enable_intrs(qla25xx_isp_ops_group1); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_enable_intrs(qla25xx_isp_ops_group1); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 2: ; if (ldv_state_variable_84 == 2) { qla24xx_write_optrom_data(qla25xx_isp_ops_group0, ldvarg98, ldvarg97, ldvarg96); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 3: ; if (ldv_state_variable_84 == 1) { qla24xx_intr_handler(ldvarg94, ldvarg95); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_intr_handler(ldvarg94, ldvarg95); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 4: ; if (ldv_state_variable_84 == 1) { qla24xx_abort_target(qla25xx_isp_ops_group2, ldvarg93, ldvarg92); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_abort_target(qla25xx_isp_ops_group2, ldvarg93, ldvarg92); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 5: ; if (ldv_state_variable_84 == 1) { qla24xx_update_fw_options(qla25xx_isp_ops_group0); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_update_fw_options(qla25xx_isp_ops_group0); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 6: ; if (ldv_state_variable_84 == 1) { qla24xx_dif_start_scsi(ldvarg91); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_dif_start_scsi(ldvarg91); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 7: ; if (ldv_state_variable_84 == 1) { qla2x00_abort_isp(qla25xx_isp_ops_group0); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla2x00_abort_isp(qla25xx_isp_ops_group0); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 8: ; if (ldv_state_variable_84 == 1) { qla24xx_chip_diag(qla25xx_isp_ops_group0); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_chip_diag(qla25xx_isp_ops_group0); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 9: ; if (ldv_state_variable_84 == 1) { qla24xx_fabric_logout(qla25xx_isp_ops_group0, (int )ldvarg89, (int )ldvarg88, (int )ldvarg87, (int )ldvarg90); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_fabric_logout(qla25xx_isp_ops_group0, (int )ldvarg89, (int )ldvarg88, (int )ldvarg87, (int )ldvarg90); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 10: ; if (ldv_state_variable_84 == 1) { qla24xx_reset_chip(qla25xx_isp_ops_group0); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_reset_chip(qla25xx_isp_ops_group0); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 11: ; if (ldv_state_variable_84 == 1) { qla24xx_prep_ms_iocb(qla25xx_isp_ops_group0, ldvarg86, ldvarg85); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_prep_ms_iocb(qla25xx_isp_ops_group0, ldvarg86, ldvarg85); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 12: ; if (ldv_state_variable_84 == 1) { qla24xx_lun_reset(qla25xx_isp_ops_group2, ldvarg84, ldvarg83); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_lun_reset(qla25xx_isp_ops_group2, ldvarg84, ldvarg83); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 13: ; if (ldv_state_variable_84 == 1) { qla24xx_fw_version_str(qla25xx_isp_ops_group0, ldvarg82, ldvarg81); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_fw_version_str(qla25xx_isp_ops_group0, ldvarg82, ldvarg81); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 14: ; if (ldv_state_variable_84 == 1) { qla2x00_iospace_config(qla25xx_isp_ops_group1); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla2x00_iospace_config(qla25xx_isp_ops_group1); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 15: ; if (ldv_state_variable_84 == 1) { qla2x00_initialize_adapter(qla25xx_isp_ops_group0); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla2x00_initialize_adapter(qla25xx_isp_ops_group0); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 16: ; if (ldv_state_variable_84 == 1) { qla24xx_disable_intrs(qla25xx_isp_ops_group1); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_disable_intrs(qla25xx_isp_ops_group1); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 17: ; if (ldv_state_variable_84 == 1) { qla24xx_get_flash_version(qla25xx_isp_ops_group0, ldvarg80); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_get_flash_version(qla25xx_isp_ops_group0, ldvarg80); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 18: ; if (ldv_state_variable_84 == 1) { qla24xx_nvram_config(qla25xx_isp_ops_group0); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_nvram_config(qla25xx_isp_ops_group0); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 19: ; if (ldv_state_variable_84 == 1) { qla24xx_pci_info_str(qla25xx_isp_ops_group0, ldvarg79); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_pci_info_str(qla25xx_isp_ops_group0, ldvarg79); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 20: ; if (ldv_state_variable_84 == 2) { qla25xx_read_optrom_data(qla25xx_isp_ops_group0, ldvarg78, ldvarg77, ldvarg76); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 21: ; if (ldv_state_variable_84 == 1) { qla24xx_beacon_blink(qla25xx_isp_ops_group0); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_beacon_blink(qla25xx_isp_ops_group0); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 22: ; if (ldv_state_variable_84 == 1) { qla24xx_beacon_off(qla25xx_isp_ops_group0); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_beacon_off(qla25xx_isp_ops_group0); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 23: ; if (ldv_state_variable_84 == 1) { qla24xx_abort_command(ldvarg75); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_abort_command(ldvarg75); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 24: ; if (ldv_state_variable_84 == 1) { qla25xx_fw_dump(qla25xx_isp_ops_group0, ldvarg74); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla25xx_fw_dump(qla25xx_isp_ops_group0, ldvarg74); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 25: ; if (ldv_state_variable_84 == 1) { qla24xx_config_rings(qla25xx_isp_ops_group0); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_config_rings(qla25xx_isp_ops_group0); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 26: ; if (ldv_state_variable_84 == 1) { qla24xx_load_risc(qla25xx_isp_ops_group0, ldvarg73); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_load_risc(qla25xx_isp_ops_group0, ldvarg73); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 27: ; if (ldv_state_variable_84 == 1) { qla25xx_pci_config(qla25xx_isp_ops_group0); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla25xx_pci_config(qla25xx_isp_ops_group0); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 28: ; if (ldv_state_variable_84 == 1) { qla24xx_login_fabric(qla25xx_isp_ops_group0, (int )ldvarg71, (int )ldvarg69, (int )ldvarg68, (int )ldvarg72, ldvarg67, (int )ldvarg70); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_login_fabric(qla25xx_isp_ops_group0, (int )ldvarg71, (int )ldvarg69, (int )ldvarg68, (int )ldvarg72, ldvarg67, (int )ldvarg70); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 29: ; if (ldv_state_variable_84 == 1) { qla25xx_write_nvram_data(qla25xx_isp_ops_group0, ldvarg66, ldvarg65, ldvarg64); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla25xx_write_nvram_data(qla25xx_isp_ops_group0, ldvarg66, ldvarg65, ldvarg64); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 30: ; if (ldv_state_variable_84 == 1) { qla24xx_beacon_on(qla25xx_isp_ops_group0); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_beacon_on(qla25xx_isp_ops_group0); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 31: ; if (ldv_state_variable_84 == 1) { qla25xx_read_nvram_data(qla25xx_isp_ops_group0, ldvarg63, ldvarg62, ldvarg61); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla25xx_read_nvram_data(qla25xx_isp_ops_group0, ldvarg63, ldvarg62, ldvarg61); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 32: ; if (ldv_state_variable_84 == 1) { qla24xx_prep_ms_fdmi_iocb(qla25xx_isp_ops_group0, ldvarg60, ldvarg59); ldv_state_variable_84 = 1; } else { } if (ldv_state_variable_84 == 2) { qla24xx_prep_ms_fdmi_iocb(qla25xx_isp_ops_group0, ldvarg60, ldvarg59); ldv_state_variable_84 = 2; } else { } goto ldv_68727; case 33: ; if (ldv_state_variable_84 == 2) { ldv_release_84(); ldv_state_variable_84 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_68727; case 34: ; if (ldv_state_variable_84 == 1) { ldv_probe_84(); ldv_state_variable_84 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_68727; default: ldv_stop(); } ldv_68727: ; } else { } goto ldv_68677; case 14: ; if (ldv_state_variable_74 != 0) { ldv_main_exported_74(); } else { } goto ldv_68677; case 15: ; goto ldv_68677; case 16: ; if (ldv_state_variable_57 != 0) { ldv_main_exported_57(); } else { } goto ldv_68677; case 17: ; if (ldv_state_variable_61 != 0) { ldv_main_exported_61(); } else { } goto ldv_68677; case 18: ; goto ldv_68677; case 19: ; goto ldv_68677; case 20: ; if (ldv_state_variable_31 != 0) { ldv_main_exported_31(); } else { } goto ldv_68677; case 21: ; if (ldv_state_variable_35 != 0) { ldv_main_exported_35(); } else { } goto ldv_68677; case 22: ; goto ldv_68677; case 23: ; if (ldv_state_variable_78 != 0) { tmp___117 = __VERIFIER_nondet_int(); switch (tmp___117) { case 0: ; if (ldv_state_variable_78 == 1) { qla24xx_reset_adapter(qla27xx_isp_ops_group0); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_reset_adapter(qla27xx_isp_ops_group0); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 1: ; if (ldv_state_variable_78 == 2) { qla24xx_write_optrom_data(qla27xx_isp_ops_group0, ldvarg152, ldvarg151, ldvarg150); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 2: ; if (ldv_state_variable_78 == 1) { qla24xx_enable_intrs(qla27xx_isp_ops_group1); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_enable_intrs(qla27xx_isp_ops_group1); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 3: ; if (ldv_state_variable_78 == 1) { qla24xx_intr_handler(ldvarg148, ldvarg149); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_intr_handler(ldvarg148, ldvarg149); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 4: ; if (ldv_state_variable_78 == 1) { qla24xx_abort_target(qla27xx_isp_ops_group2, ldvarg147, ldvarg146); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_abort_target(qla27xx_isp_ops_group2, ldvarg147, ldvarg146); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 5: ; if (ldv_state_variable_78 == 1) { qla81xx_update_fw_options(qla27xx_isp_ops_group0); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla81xx_update_fw_options(qla27xx_isp_ops_group0); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 6: ; if (ldv_state_variable_78 == 1) { qla2x00_abort_isp(qla27xx_isp_ops_group0); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla2x00_abort_isp(qla27xx_isp_ops_group0); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 7: ; if (ldv_state_variable_78 == 1) { qla24xx_dif_start_scsi(ldvarg145); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_dif_start_scsi(ldvarg145); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 8: ; if (ldv_state_variable_78 == 1) { qla24xx_chip_diag(qla27xx_isp_ops_group0); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_chip_diag(qla27xx_isp_ops_group0); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 9: ; if (ldv_state_variable_78 == 1) { qla24xx_fabric_logout(qla27xx_isp_ops_group0, (int )ldvarg143, (int )ldvarg142, (int )ldvarg141, (int )ldvarg144); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_fabric_logout(qla27xx_isp_ops_group0, (int )ldvarg143, (int )ldvarg142, (int )ldvarg141, (int )ldvarg144); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 10: ; if (ldv_state_variable_78 == 1) { qla24xx_reset_chip(qla27xx_isp_ops_group0); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_reset_chip(qla27xx_isp_ops_group0); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 11: ; if (ldv_state_variable_78 == 1) { qla24xx_prep_ms_iocb(qla27xx_isp_ops_group0, ldvarg140, ldvarg139); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_prep_ms_iocb(qla27xx_isp_ops_group0, ldvarg140, ldvarg139); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 12: ; if (ldv_state_variable_78 == 1) { qla24xx_lun_reset(qla27xx_isp_ops_group2, ldvarg138, ldvarg137); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_lun_reset(qla27xx_isp_ops_group2, ldvarg138, ldvarg137); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 13: ; if (ldv_state_variable_78 == 1) { qla24xx_fw_version_str(qla27xx_isp_ops_group0, ldvarg136, ldvarg135); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_fw_version_str(qla27xx_isp_ops_group0, ldvarg136, ldvarg135); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 14: ; if (ldv_state_variable_78 == 1) { qla83xx_iospace_config(qla27xx_isp_ops_group1); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla83xx_iospace_config(qla27xx_isp_ops_group1); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 15: ; if (ldv_state_variable_78 == 1) { qla2x00_initialize_adapter(qla27xx_isp_ops_group0); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla2x00_initialize_adapter(qla27xx_isp_ops_group0); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 16: ; if (ldv_state_variable_78 == 1) { qla24xx_get_flash_version(qla27xx_isp_ops_group0, ldvarg134); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_get_flash_version(qla27xx_isp_ops_group0, ldvarg134); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 17: ; if (ldv_state_variable_78 == 1) { qla24xx_disable_intrs(qla27xx_isp_ops_group1); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_disable_intrs(qla27xx_isp_ops_group1); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 18: ; if (ldv_state_variable_78 == 1) { qla81xx_nvram_config(qla27xx_isp_ops_group0); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla81xx_nvram_config(qla27xx_isp_ops_group0); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 19: ; if (ldv_state_variable_78 == 1) { qla24xx_pci_info_str(qla27xx_isp_ops_group0, ldvarg133); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_pci_info_str(qla27xx_isp_ops_group0, ldvarg133); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 20: ; if (ldv_state_variable_78 == 2) { qla25xx_read_optrom_data(qla27xx_isp_ops_group0, ldvarg132, ldvarg131, ldvarg130); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 21: ; if (ldv_state_variable_78 == 1) { qla83xx_beacon_blink(qla27xx_isp_ops_group0); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla83xx_beacon_blink(qla27xx_isp_ops_group0); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 22: ; if (ldv_state_variable_78 == 1) { qla24xx_beacon_off(qla27xx_isp_ops_group0); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_beacon_off(qla27xx_isp_ops_group0); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 23: ; if (ldv_state_variable_78 == 1) { qla24xx_abort_command(ldvarg129); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_abort_command(ldvarg129); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 24: ; if (ldv_state_variable_78 == 1) { qla27xx_fwdump(qla27xx_isp_ops_group0, ldvarg128); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla27xx_fwdump(qla27xx_isp_ops_group0, ldvarg128); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 25: ; if (ldv_state_variable_78 == 1) { qla24xx_config_rings(qla27xx_isp_ops_group0); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_config_rings(qla27xx_isp_ops_group0); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 26: ; if (ldv_state_variable_78 == 1) { qla81xx_load_risc(qla27xx_isp_ops_group0, ldvarg127); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla81xx_load_risc(qla27xx_isp_ops_group0, ldvarg127); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 27: ; if (ldv_state_variable_78 == 1) { qla25xx_pci_config(qla27xx_isp_ops_group0); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla25xx_pci_config(qla27xx_isp_ops_group0); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 28: ; if (ldv_state_variable_78 == 1) { qla24xx_login_fabric(qla27xx_isp_ops_group0, (int )ldvarg125, (int )ldvarg123, (int )ldvarg122, (int )ldvarg126, ldvarg121, (int )ldvarg124); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_login_fabric(qla27xx_isp_ops_group0, (int )ldvarg125, (int )ldvarg123, (int )ldvarg122, (int )ldvarg126, ldvarg121, (int )ldvarg124); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 29: ; if (ldv_state_variable_78 == 1) { qla24xx_beacon_on(qla27xx_isp_ops_group0); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_beacon_on(qla27xx_isp_ops_group0); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 30: ; if (ldv_state_variable_78 == 1) { qla24xx_prep_ms_fdmi_iocb(qla27xx_isp_ops_group0, ldvarg120, ldvarg119); ldv_state_variable_78 = 1; } else { } if (ldv_state_variable_78 == 2) { qla24xx_prep_ms_fdmi_iocb(qla27xx_isp_ops_group0, ldvarg120, ldvarg119); ldv_state_variable_78 = 2; } else { } goto ldv_68774; case 31: ; if (ldv_state_variable_78 == 2) { ldv_release_78(); ldv_state_variable_78 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_68774; case 32: ; if (ldv_state_variable_78 == 1) { ldv_probe_78(); ldv_state_variable_78 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_68774; default: ldv_stop(); } ldv_68774: ; } else { } goto ldv_68677; case 24: ; if (ldv_state_variable_48 != 0) { ldv_main_exported_48(); } else { } goto ldv_68677; case 25: ; if (ldv_state_variable_87 != 0) { tmp___118 = __VERIFIER_nondet_int(); switch (tmp___118) { case 0: ; if (ldv_state_variable_87 == 1) { qla2x00_reset_adapter(qla2100_isp_ops_group0); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_reset_adapter(qla2100_isp_ops_group0); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 1: ; if (ldv_state_variable_87 == 2) { qla2x00_write_optrom_data(qla2100_isp_ops_group0, ldvarg199, ldvarg198, ldvarg197); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 2: ; if (ldv_state_variable_87 == 1) { qla2x00_enable_intrs(qla2100_isp_ops_group1); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_enable_intrs(qla2100_isp_ops_group1); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 3: ; if (ldv_state_variable_87 == 1) { qla2100_intr_handler(ldvarg195, ldvarg196); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2100_intr_handler(ldvarg195, ldvarg196); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 4: ; if (ldv_state_variable_87 == 1) { qla2x00_abort_target(qla2100_isp_ops_group2, ldvarg194, ldvarg193); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_abort_target(qla2100_isp_ops_group2, ldvarg194, ldvarg193); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 5: ; if (ldv_state_variable_87 == 1) { qla2x00_update_fw_options(qla2100_isp_ops_group0); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_update_fw_options(qla2100_isp_ops_group0); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 6: ; if (ldv_state_variable_87 == 1) { qla2x00_abort_isp(qla2100_isp_ops_group0); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_abort_isp(qla2100_isp_ops_group0); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 7: ; if (ldv_state_variable_87 == 1) { qla2x00_start_scsi(ldvarg192); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_start_scsi(ldvarg192); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 8: ; if (ldv_state_variable_87 == 1) { qla2x00_chip_diag(qla2100_isp_ops_group0); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_chip_diag(qla2100_isp_ops_group0); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 9: ; if (ldv_state_variable_87 == 1) { qla2x00_fabric_logout(qla2100_isp_ops_group0, (int )ldvarg190, (int )ldvarg189, (int )ldvarg188, (int )ldvarg191); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_fabric_logout(qla2100_isp_ops_group0, (int )ldvarg190, (int )ldvarg189, (int )ldvarg188, (int )ldvarg191); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 10: ; if (ldv_state_variable_87 == 1) { qla2x00_reset_chip(qla2100_isp_ops_group0); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_reset_chip(qla2100_isp_ops_group0); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 11: ; if (ldv_state_variable_87 == 1) { qla2x00_prep_ms_iocb(qla2100_isp_ops_group0, ldvarg187, ldvarg186); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_prep_ms_iocb(qla2100_isp_ops_group0, ldvarg187, ldvarg186); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 12: ; if (ldv_state_variable_87 == 1) { qla2x00_lun_reset(qla2100_isp_ops_group2, ldvarg185, ldvarg184); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_lun_reset(qla2100_isp_ops_group2, ldvarg185, ldvarg184); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 13: ; if (ldv_state_variable_87 == 1) { qla2x00_fw_version_str(qla2100_isp_ops_group0, ldvarg183, ldvarg182); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_fw_version_str(qla2100_isp_ops_group0, ldvarg183, ldvarg182); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 14: ; if (ldv_state_variable_87 == 1) { qla2x00_iospace_config(qla2100_isp_ops_group1); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_iospace_config(qla2100_isp_ops_group1); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 15: ; if (ldv_state_variable_87 == 1) { qla2x00_initialize_adapter(qla2100_isp_ops_group0); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_initialize_adapter(qla2100_isp_ops_group0); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 16: ; if (ldv_state_variable_87 == 1) { qla2x00_get_flash_version(qla2100_isp_ops_group0, ldvarg181); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_get_flash_version(qla2100_isp_ops_group0, ldvarg181); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 17: ; if (ldv_state_variable_87 == 1) { qla2x00_disable_intrs(qla2100_isp_ops_group1); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_disable_intrs(qla2100_isp_ops_group1); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 18: ; if (ldv_state_variable_87 == 1) { qla2x00_calc_iocbs_32((int )ldvarg180); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_calc_iocbs_32((int )ldvarg180); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 19: ; if (ldv_state_variable_87 == 1) { qla2x00_nvram_config(qla2100_isp_ops_group0); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_nvram_config(qla2100_isp_ops_group0); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 20: ; if (ldv_state_variable_87 == 1) { qla2x00_pci_info_str(qla2100_isp_ops_group0, ldvarg179); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_pci_info_str(qla2100_isp_ops_group0, ldvarg179); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 21: ; if (ldv_state_variable_87 == 2) { qla2x00_read_optrom_data(qla2100_isp_ops_group0, ldvarg178, ldvarg177, ldvarg176); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 22: ; if (ldv_state_variable_87 == 1) { qla2x00_build_scsi_iocbs_32(ldvarg173, ldvarg175, (int )ldvarg174); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_build_scsi_iocbs_32(ldvarg173, ldvarg175, (int )ldvarg174); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 23: ; if (ldv_state_variable_87 == 1) { qla2x00_abort_command(ldvarg172); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_abort_command(ldvarg172); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 24: ; if (ldv_state_variable_87 == 1) { qla2x00_config_rings(qla2100_isp_ops_group0); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_config_rings(qla2100_isp_ops_group0); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 25: ; if (ldv_state_variable_87 == 1) { qla2100_fw_dump(qla2100_isp_ops_group0, ldvarg171); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2100_fw_dump(qla2100_isp_ops_group0, ldvarg171); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 26: ; if (ldv_state_variable_87 == 1) { qla2x00_load_risc(qla2100_isp_ops_group0, ldvarg170); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_load_risc(qla2100_isp_ops_group0, ldvarg170); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 27: ; if (ldv_state_variable_87 == 1) { qla2100_pci_config(qla2100_isp_ops_group0); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2100_pci_config(qla2100_isp_ops_group0); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 28: ; if (ldv_state_variable_87 == 1) { qla2x00_login_fabric(qla2100_isp_ops_group0, (int )ldvarg168, (int )ldvarg166, (int )ldvarg165, (int )ldvarg169, ldvarg164, (int )ldvarg167); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_login_fabric(qla2100_isp_ops_group0, (int )ldvarg168, (int )ldvarg166, (int )ldvarg165, (int )ldvarg169, ldvarg164, (int )ldvarg167); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 29: ; if (ldv_state_variable_87 == 1) { qla2x00_write_nvram_data(qla2100_isp_ops_group0, ldvarg163, ldvarg162, ldvarg161); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_write_nvram_data(qla2100_isp_ops_group0, ldvarg163, ldvarg162, ldvarg161); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 30: ; if (ldv_state_variable_87 == 1) { qla2x00_read_nvram_data(qla2100_isp_ops_group0, ldvarg160, ldvarg159, ldvarg158); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_read_nvram_data(qla2100_isp_ops_group0, ldvarg160, ldvarg159, ldvarg158); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 31: ; if (ldv_state_variable_87 == 1) { qla2x00_prep_ms_fdmi_iocb(qla2100_isp_ops_group0, ldvarg157, ldvarg156); ldv_state_variable_87 = 1; } else { } if (ldv_state_variable_87 == 2) { qla2x00_prep_ms_fdmi_iocb(qla2100_isp_ops_group0, ldvarg157, ldvarg156); ldv_state_variable_87 = 2; } else { } goto ldv_68811; case 32: ; if (ldv_state_variable_87 == 2) { ldv_release_87(); ldv_state_variable_87 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_68811; case 33: ; if (ldv_state_variable_87 == 1) { ldv_probe_87(); ldv_state_variable_87 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_68811; default: ldv_stop(); } ldv_68811: ; } else { } goto ldv_68677; case 26: ; if (ldv_state_variable_77 != 0) { tmp___119 = __VERIFIER_nondet_int(); switch (tmp___119) { case 0: ; if (ldv_state_variable_77 == 3) { qla2xxx_pci_resume(qla2xxx_err_handler_group0); ldv_state_variable_77 = 2; } else { } goto ldv_68848; case 1: ; if (ldv_state_variable_77 == 1) { qla2xxx_pci_slot_reset(qla2xxx_err_handler_group0); ldv_state_variable_77 = 1; } else { } if (ldv_state_variable_77 == 3) { qla2xxx_pci_slot_reset(qla2xxx_err_handler_group0); ldv_state_variable_77 = 3; } else { } if (ldv_state_variable_77 == 2) { qla2xxx_pci_slot_reset(qla2xxx_err_handler_group0); ldv_state_variable_77 = 2; } else { } goto ldv_68848; case 2: ; if (ldv_state_variable_77 == 1) { qla2xxx_pci_error_detected(qla2xxx_err_handler_group0, (pci_channel_state_t )ldvarg200); ldv_state_variable_77 = 1; } else { } if (ldv_state_variable_77 == 3) { qla2xxx_pci_error_detected(qla2xxx_err_handler_group0, (pci_channel_state_t )ldvarg200); ldv_state_variable_77 = 3; } else { } if (ldv_state_variable_77 == 2) { qla2xxx_pci_error_detected(qla2xxx_err_handler_group0, (pci_channel_state_t )ldvarg200); ldv_state_variable_77 = 2; } else { } goto ldv_68848; case 3: ; if (ldv_state_variable_77 == 1) { qla2xxx_pci_mmio_enabled(qla2xxx_err_handler_group0); ldv_state_variable_77 = 1; } else { } if (ldv_state_variable_77 == 3) { qla2xxx_pci_mmio_enabled(qla2xxx_err_handler_group0); ldv_state_variable_77 = 3; } else { } if (ldv_state_variable_77 == 2) { qla2xxx_pci_mmio_enabled(qla2xxx_err_handler_group0); ldv_state_variable_77 = 2; } else { } goto ldv_68848; case 4: ; if (ldv_state_variable_77 == 2) { ldv_suspend_77(); ldv_state_variable_77 = 3; } else { } goto ldv_68848; case 5: ; if (ldv_state_variable_77 == 3) { ldv_release_77(); ldv_state_variable_77 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_77 == 2) { ldv_release_77(); ldv_state_variable_77 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_68848; case 6: ; if (ldv_state_variable_77 == 1) { ldv_probe_77(); ldv_state_variable_77 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_68848; default: ldv_stop(); } ldv_68848: ; } else { } goto ldv_68677; case 27: ; if (ldv_state_variable_65 != 0) { ldv_main_exported_65(); } else { } goto ldv_68677; case 28: ; goto ldv_68677; case 29: ; if (ldv_state_variable_50 != 0) { ldv_main_exported_50(); } else { } goto ldv_68677; case 30: ; if (ldv_state_variable_39 != 0) { ldv_main_exported_39(); } else { } goto ldv_68677; case 31: ; if (ldv_state_variable_64 != 0) { ldv_main_exported_64(); } else { } goto ldv_68677; case 32: ; if (ldv_state_variable_58 != 0) { ldv_main_exported_58(); } else { } goto ldv_68677; case 33: ; if (ldv_state_variable_41 != 0) { ldv_main_exported_41(); } else { } goto ldv_68677; case 34: ; goto ldv_68677; case 35: ; goto ldv_68677; case 36: ; if (ldv_state_variable_81 != 0) { tmp___120 = __VERIFIER_nondet_int(); switch (tmp___120) { case 0: ; if (ldv_state_variable_81 == 1) { qla24xx_reset_adapter(qla8044_isp_ops_group0); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla24xx_reset_adapter(qla8044_isp_ops_group0); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 1: ; if (ldv_state_variable_81 == 2) { qla8044_write_optrom_data(qla8044_isp_ops_group0, ldvarg255, ldvarg254, ldvarg253); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 2: ; if (ldv_state_variable_81 == 1) { qla82xx_enable_intrs(qla8044_isp_ops_group1); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla82xx_enable_intrs(qla8044_isp_ops_group1); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 3: ; if (ldv_state_variable_81 == 1) { qla8044_intr_handler(ldvarg251, ldvarg252); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla8044_intr_handler(ldvarg251, ldvarg252); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 4: ; if (ldv_state_variable_81 == 1) { qla24xx_abort_target(qla8044_isp_ops_group2, ldvarg250, ldvarg249); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla24xx_abort_target(qla8044_isp_ops_group2, ldvarg250, ldvarg249); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 5: ; if (ldv_state_variable_81 == 1) { qla24xx_update_fw_options(qla8044_isp_ops_group0); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla24xx_update_fw_options(qla8044_isp_ops_group0); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 6: ; if (ldv_state_variable_81 == 1) { qla8044_abort_isp(qla8044_isp_ops_group0); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla8044_abort_isp(qla8044_isp_ops_group0); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 7: ; if (ldv_state_variable_81 == 1) { qla82xx_start_scsi(ldvarg248); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla82xx_start_scsi(ldvarg248); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 8: ; if (ldv_state_variable_81 == 1) { qla24xx_chip_diag(qla8044_isp_ops_group0); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla24xx_chip_diag(qla8044_isp_ops_group0); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 9: ; if (ldv_state_variable_81 == 1) { qla24xx_fabric_logout(qla8044_isp_ops_group0, (int )ldvarg246, (int )ldvarg245, (int )ldvarg244, (int )ldvarg247); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla24xx_fabric_logout(qla8044_isp_ops_group0, (int )ldvarg246, (int )ldvarg245, (int )ldvarg244, (int )ldvarg247); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 10: ; if (ldv_state_variable_81 == 1) { qla82xx_reset_chip(qla8044_isp_ops_group0); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla82xx_reset_chip(qla8044_isp_ops_group0); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 11: ; if (ldv_state_variable_81 == 1) { qla24xx_prep_ms_iocb(qla8044_isp_ops_group0, ldvarg243, ldvarg242); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla24xx_prep_ms_iocb(qla8044_isp_ops_group0, ldvarg243, ldvarg242); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 12: ; if (ldv_state_variable_81 == 1) { qla24xx_lun_reset(qla8044_isp_ops_group2, ldvarg241, ldvarg240); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla24xx_lun_reset(qla8044_isp_ops_group2, ldvarg241, ldvarg240); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 13: ; if (ldv_state_variable_81 == 1) { qla24xx_fw_version_str(qla8044_isp_ops_group0, ldvarg239, ldvarg238); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla24xx_fw_version_str(qla8044_isp_ops_group0, ldvarg239, ldvarg238); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 14: ; if (ldv_state_variable_81 == 1) { qla82xx_iospace_config(qla8044_isp_ops_group1); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla82xx_iospace_config(qla8044_isp_ops_group1); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 15: ; if (ldv_state_variable_81 == 1) { qla2x00_initialize_adapter(qla8044_isp_ops_group0); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla2x00_initialize_adapter(qla8044_isp_ops_group0); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 16: ; if (ldv_state_variable_81 == 1) { qla82xx_get_flash_version(qla8044_isp_ops_group0, ldvarg237); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla82xx_get_flash_version(qla8044_isp_ops_group0, ldvarg237); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 17: ; if (ldv_state_variable_81 == 1) { qla82xx_disable_intrs(qla8044_isp_ops_group1); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla82xx_disable_intrs(qla8044_isp_ops_group1); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 18: ; if (ldv_state_variable_81 == 1) { qla81xx_nvram_config(qla8044_isp_ops_group0); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla81xx_nvram_config(qla8044_isp_ops_group0); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 19: ; if (ldv_state_variable_81 == 1) { qla24xx_pci_info_str(qla8044_isp_ops_group0, ldvarg236); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla24xx_pci_info_str(qla8044_isp_ops_group0, ldvarg236); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 20: ; if (ldv_state_variable_81 == 2) { qla8044_read_optrom_data(qla8044_isp_ops_group0, ldvarg235, ldvarg234, ldvarg233); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 21: ; if (ldv_state_variable_81 == 1) { qla82xx_beacon_off(qla8044_isp_ops_group0); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla82xx_beacon_off(qla8044_isp_ops_group0); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 22: ; if (ldv_state_variable_81 == 1) { qla24xx_abort_command(ldvarg232); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla24xx_abort_command(ldvarg232); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 23: ; if (ldv_state_variable_81 == 1) { qla8044_fw_dump(qla8044_isp_ops_group0, ldvarg231); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla8044_fw_dump(qla8044_isp_ops_group0, ldvarg231); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 24: ; if (ldv_state_variable_81 == 1) { qla82xx_config_rings(qla8044_isp_ops_group0); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla82xx_config_rings(qla8044_isp_ops_group0); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 25: ; if (ldv_state_variable_81 == 1) { qla82xx_load_risc(qla8044_isp_ops_group0, ldvarg230); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla82xx_load_risc(qla8044_isp_ops_group0, ldvarg230); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 26: ; if (ldv_state_variable_81 == 1) { qla82xx_pci_config(qla8044_isp_ops_group0); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla82xx_pci_config(qla8044_isp_ops_group0); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 27: ; if (ldv_state_variable_81 == 1) { qla24xx_login_fabric(qla8044_isp_ops_group0, (int )ldvarg228, (int )ldvarg226, (int )ldvarg225, (int )ldvarg229, ldvarg224, (int )ldvarg227); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla24xx_login_fabric(qla8044_isp_ops_group0, (int )ldvarg228, (int )ldvarg226, (int )ldvarg225, (int )ldvarg229, ldvarg224, (int )ldvarg227); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 28: ; if (ldv_state_variable_81 == 1) { qla82xx_beacon_on(qla8044_isp_ops_group0); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla82xx_beacon_on(qla8044_isp_ops_group0); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 29: ; if (ldv_state_variable_81 == 1) { qla24xx_prep_ms_fdmi_iocb(qla8044_isp_ops_group0, ldvarg223, ldvarg222); ldv_state_variable_81 = 1; } else { } if (ldv_state_variable_81 == 2) { qla24xx_prep_ms_fdmi_iocb(qla8044_isp_ops_group0, ldvarg223, ldvarg222); ldv_state_variable_81 = 2; } else { } goto ldv_68867; case 30: ; if (ldv_state_variable_81 == 2) { ldv_release_81(); ldv_state_variable_81 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_68867; case 31: ; if (ldv_state_variable_81 == 1) { ldv_probe_81(); ldv_state_variable_81 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_68867; default: ldv_stop(); } ldv_68867: ; } else { } goto ldv_68677; case 37: ; if (ldv_state_variable_52 != 0) { ldv_main_exported_52(); } else { } goto ldv_68677; case 38: ; if (ldv_state_variable_60 != 0) { ldv_main_exported_60(); } else { } goto ldv_68677; case 39: ; if (ldv_state_variable_56 != 0) { ldv_main_exported_56(); } else { } goto ldv_68677; case 40: ; if (ldv_state_variable_73 != 0) { ldv_main_exported_73(); } else { } goto ldv_68677; case 41: ; if (ldv_state_variable_66 != 0) { ldv_main_exported_66(); } else { } goto ldv_68677; case 42: ; if (ldv_state_variable_45 != 0) { ldv_main_exported_45(); } else { } goto ldv_68677; case 43: ; if (ldv_state_variable_86 != 0) { tmp___121 = __VERIFIER_nondet_int(); switch (tmp___121) { case 0: ; if (ldv_state_variable_86 == 1) { qla2x00_reset_adapter(qla2300_isp_ops_group0); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_reset_adapter(qla2300_isp_ops_group0); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 1: ; if (ldv_state_variable_86 == 1) { qla2x00_enable_intrs(qla2300_isp_ops_group1); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_enable_intrs(qla2300_isp_ops_group1); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 2: ; if (ldv_state_variable_86 == 2) { qla2x00_write_optrom_data(qla2300_isp_ops_group0, ldvarg323, ldvarg322, ldvarg321); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 3: ; if (ldv_state_variable_86 == 1) { qla2300_intr_handler(ldvarg319, ldvarg320); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2300_intr_handler(ldvarg319, ldvarg320); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 4: ; if (ldv_state_variable_86 == 1) { qla2x00_abort_target(qla2300_isp_ops_group2, ldvarg318, ldvarg317); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_abort_target(qla2300_isp_ops_group2, ldvarg318, ldvarg317); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 5: ; if (ldv_state_variable_86 == 1) { qla2x00_update_fw_options(qla2300_isp_ops_group0); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_update_fw_options(qla2300_isp_ops_group0); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 6: ; if (ldv_state_variable_86 == 1) { qla2x00_abort_isp(qla2300_isp_ops_group0); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_abort_isp(qla2300_isp_ops_group0); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 7: ; if (ldv_state_variable_86 == 1) { qla2x00_start_scsi(ldvarg316); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_start_scsi(ldvarg316); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 8: ; if (ldv_state_variable_86 == 1) { qla2x00_chip_diag(qla2300_isp_ops_group0); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_chip_diag(qla2300_isp_ops_group0); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 9: ; if (ldv_state_variable_86 == 1) { qla2x00_fabric_logout(qla2300_isp_ops_group0, (int )ldvarg314, (int )ldvarg313, (int )ldvarg312, (int )ldvarg315); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_fabric_logout(qla2300_isp_ops_group0, (int )ldvarg314, (int )ldvarg313, (int )ldvarg312, (int )ldvarg315); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 10: ; if (ldv_state_variable_86 == 1) { qla2x00_reset_chip(qla2300_isp_ops_group0); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_reset_chip(qla2300_isp_ops_group0); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 11: ; if (ldv_state_variable_86 == 1) { qla2x00_prep_ms_iocb(qla2300_isp_ops_group0, ldvarg311, ldvarg310); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_prep_ms_iocb(qla2300_isp_ops_group0, ldvarg311, ldvarg310); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 12: ; if (ldv_state_variable_86 == 1) { qla2x00_lun_reset(qla2300_isp_ops_group2, ldvarg309, ldvarg308); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_lun_reset(qla2300_isp_ops_group2, ldvarg309, ldvarg308); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 13: ; if (ldv_state_variable_86 == 1) { qla2x00_fw_version_str(qla2300_isp_ops_group0, ldvarg307, ldvarg306); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_fw_version_str(qla2300_isp_ops_group0, ldvarg307, ldvarg306); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 14: ; if (ldv_state_variable_86 == 1) { qla2x00_iospace_config(qla2300_isp_ops_group1); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_iospace_config(qla2300_isp_ops_group1); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 15: ; if (ldv_state_variable_86 == 1) { qla2x00_initialize_adapter(qla2300_isp_ops_group0); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_initialize_adapter(qla2300_isp_ops_group0); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 16: ; if (ldv_state_variable_86 == 1) { qla2x00_disable_intrs(qla2300_isp_ops_group1); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_disable_intrs(qla2300_isp_ops_group1); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 17: ; if (ldv_state_variable_86 == 1) { qla2x00_get_flash_version(qla2300_isp_ops_group0, ldvarg305); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_get_flash_version(qla2300_isp_ops_group0, ldvarg305); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 18: ; if (ldv_state_variable_86 == 1) { qla2x00_calc_iocbs_32((int )ldvarg304); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_calc_iocbs_32((int )ldvarg304); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 19: ; if (ldv_state_variable_86 == 1) { qla2x00_nvram_config(qla2300_isp_ops_group0); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_nvram_config(qla2300_isp_ops_group0); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 20: ; if (ldv_state_variable_86 == 1) { qla2x00_pci_info_str(qla2300_isp_ops_group0, ldvarg303); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_pci_info_str(qla2300_isp_ops_group0, ldvarg303); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 21: ; if (ldv_state_variable_86 == 2) { qla2x00_read_optrom_data(qla2300_isp_ops_group0, ldvarg302, ldvarg301, ldvarg300); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 22: ; if (ldv_state_variable_86 == 1) { qla2x00_beacon_blink(qla2300_isp_ops_group0); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_beacon_blink(qla2300_isp_ops_group0); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 23: ; if (ldv_state_variable_86 == 1) { qla2x00_beacon_off(qla2300_isp_ops_group0); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_beacon_off(qla2300_isp_ops_group0); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 24: ; if (ldv_state_variable_86 == 1) { qla2x00_build_scsi_iocbs_32(ldvarg297, ldvarg299, (int )ldvarg298); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_build_scsi_iocbs_32(ldvarg297, ldvarg299, (int )ldvarg298); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 25: ; if (ldv_state_variable_86 == 1) { qla2x00_abort_command(ldvarg296); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_abort_command(ldvarg296); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 26: ; if (ldv_state_variable_86 == 1) { qla2300_fw_dump(qla2300_isp_ops_group0, ldvarg295); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2300_fw_dump(qla2300_isp_ops_group0, ldvarg295); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 27: ; if (ldv_state_variable_86 == 1) { qla2x00_config_rings(qla2300_isp_ops_group0); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_config_rings(qla2300_isp_ops_group0); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 28: ; if (ldv_state_variable_86 == 1) { qla2x00_load_risc(qla2300_isp_ops_group0, ldvarg294); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_load_risc(qla2300_isp_ops_group0, ldvarg294); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 29: ; if (ldv_state_variable_86 == 1) { qla2300_pci_config(qla2300_isp_ops_group0); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2300_pci_config(qla2300_isp_ops_group0); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 30: ; if (ldv_state_variable_86 == 1) { qla2x00_login_fabric(qla2300_isp_ops_group0, (int )ldvarg292, (int )ldvarg290, (int )ldvarg289, (int )ldvarg293, ldvarg288, (int )ldvarg291); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_login_fabric(qla2300_isp_ops_group0, (int )ldvarg292, (int )ldvarg290, (int )ldvarg289, (int )ldvarg293, ldvarg288, (int )ldvarg291); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 31: ; if (ldv_state_variable_86 == 1) { qla2x00_write_nvram_data(qla2300_isp_ops_group0, ldvarg287, ldvarg286, ldvarg285); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_write_nvram_data(qla2300_isp_ops_group0, ldvarg287, ldvarg286, ldvarg285); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 32: ; if (ldv_state_variable_86 == 1) { qla2x00_beacon_on(qla2300_isp_ops_group0); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_beacon_on(qla2300_isp_ops_group0); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 33: ; if (ldv_state_variable_86 == 1) { qla2x00_read_nvram_data(qla2300_isp_ops_group0, ldvarg284, ldvarg283, ldvarg282); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_read_nvram_data(qla2300_isp_ops_group0, ldvarg284, ldvarg283, ldvarg282); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 34: ; if (ldv_state_variable_86 == 1) { qla2x00_prep_ms_fdmi_iocb(qla2300_isp_ops_group0, ldvarg281, ldvarg280); ldv_state_variable_86 = 1; } else { } if (ldv_state_variable_86 == 2) { qla2x00_prep_ms_fdmi_iocb(qla2300_isp_ops_group0, ldvarg281, ldvarg280); ldv_state_variable_86 = 2; } else { } goto ldv_68908; case 35: ; if (ldv_state_variable_86 == 2) { ldv_release_86(); ldv_state_variable_86 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_68908; case 36: ; if (ldv_state_variable_86 == 1) { ldv_probe_86(); ldv_state_variable_86 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_68908; default: ldv_stop(); } ldv_68908: ; } else { } goto ldv_68677; case 44: ; if (ldv_state_variable_76 != 0) { tmp___122 = __VERIFIER_nondet_int(); switch (tmp___122) { case 0: ; if (ldv_state_variable_76 == 1) { ldv_retval_1 = qla2x00_probe_one(qla2xxx_pci_driver_group1, (struct pci_device_id const *)ldvarg324); if (ldv_retval_1 == 0) { ldv_state_variable_76 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_68948; case 1: ; if (ldv_state_variable_76 == 2) { qla2x00_shutdown(qla2xxx_pci_driver_group1); ldv_state_variable_76 = 2; } else { } goto ldv_68948; case 2: ; if (ldv_state_variable_76 == 2) { qla2x00_remove_one(qla2xxx_pci_driver_group1); ldv_state_variable_76 = 1; } else { } goto ldv_68948; default: ldv_stop(); } ldv_68948: ; } else { } goto ldv_68677; case 45: ; goto ldv_68677; case 46: ; if (ldv_state_variable_62 != 0) { ldv_main_exported_62(); } else { } goto ldv_68677; case 47: ; if (ldv_state_variable_54 != 0) { ldv_main_exported_54(); } else { } goto ldv_68677; case 48: ; if (ldv_state_variable_67 != 0) { ldv_main_exported_67(); } else { } goto ldv_68677; case 49: ; if (ldv_state_variable_70 != 0) { ldv_main_exported_70(); } else { } goto ldv_68677; case 50: ; if (ldv_state_variable_68 != 0) { ldv_main_exported_68(); } else { } goto ldv_68677; case 51: ; if (ldv_state_variable_2 != 0) { invoke_work_2(); } else { } goto ldv_68677; case 52: ; goto ldv_68677; case 53: ; if (ldv_state_variable_1 != 0) { invoke_work_1(); } else { } goto ldv_68677; case 54: ; if (ldv_state_variable_88 != 0) { tmp___123 = __VERIFIER_nondet_int(); switch (tmp___123) { case 0: ; if (ldv_state_variable_88 == 1) { qla2xxx_scan_finished(qla2xxx_driver_template_group1, ldvarg350); ldv_state_variable_88 = 1; } else { } goto ldv_68963; case 1: ; if (ldv_state_variable_88 == 1) { qla2xxx_slave_configure(qla2xxx_driver_template_group2); ldv_state_variable_88 = 1; } else { } goto ldv_68963; case 2: ; if (ldv_state_variable_88 == 1) { scsi_change_queue_depth(qla2xxx_driver_template_group2, ldvarg349); ldv_state_variable_88 = 1; } else { } goto ldv_68963; case 3: ; if (ldv_state_variable_88 == 1) { qla2xxx_queuecommand(qla2xxx_driver_template_group1, qla2xxx_driver_template_group0); ldv_state_variable_88 = 1; } else { } goto ldv_68963; case 4: ; if (ldv_state_variable_88 == 1) { qla2xxx_eh_target_reset(qla2xxx_driver_template_group0); ldv_state_variable_88 = 1; } else { } goto ldv_68963; case 5: ; if (ldv_state_variable_88 == 1) { qla2xxx_eh_device_reset(qla2xxx_driver_template_group0); ldv_state_variable_88 = 1; } else { } goto ldv_68963; case 6: ; if (ldv_state_variable_88 == 1) { qla2xxx_scan_start(qla2xxx_driver_template_group1); ldv_state_variable_88 = 1; } else { } goto ldv_68963; case 7: ; if (ldv_state_variable_88 == 1) { qla2xxx_eh_abort(qla2xxx_driver_template_group0); ldv_state_variable_88 = 1; } else { } goto ldv_68963; case 8: ; if (ldv_state_variable_88 == 1) { qla2xxx_slave_alloc(qla2xxx_driver_template_group2); ldv_state_variable_88 = 1; } else { } goto ldv_68963; case 9: ; if (ldv_state_variable_88 == 1) { qla2xxx_slave_destroy(qla2xxx_driver_template_group2); ldv_state_variable_88 = 1; } else { } goto ldv_68963; case 10: ; if (ldv_state_variable_88 == 1) { qla2xxx_eh_bus_reset(qla2xxx_driver_template_group0); ldv_state_variable_88 = 1; } else { } goto ldv_68963; case 11: ; if (ldv_state_variable_88 == 1) { qla2xxx_eh_host_reset(qla2xxx_driver_template_group0); ldv_state_variable_88 = 1; } else { } goto ldv_68963; default: ldv_stop(); } ldv_68963: ; } else { } goto ldv_68677; case 55: ; goto ldv_68677; case 56: ; if (ldv_state_variable_82 != 0) { tmp___124 = __VERIFIER_nondet_int(); switch (tmp___124) { case 0: ; if (ldv_state_variable_82 == 1) { qla24xx_reset_adapter(qla82xx_isp_ops_group0); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_reset_adapter(qla82xx_isp_ops_group0); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 1: ; if (ldv_state_variable_82 == 1) { qla82xx_enable_intrs(qla82xx_isp_ops_group1); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_enable_intrs(qla82xx_isp_ops_group1); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 2: ; if (ldv_state_variable_82 == 2) { qla82xx_write_optrom_data(qla82xx_isp_ops_group0, ldvarg390, ldvarg389, ldvarg388); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 3: ; if (ldv_state_variable_82 == 1) { qla82xx_intr_handler(ldvarg386, ldvarg387); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_intr_handler(ldvarg386, ldvarg387); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 4: ; if (ldv_state_variable_82 == 1) { qla24xx_abort_target(qla82xx_isp_ops_group2, ldvarg385, ldvarg384); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_abort_target(qla82xx_isp_ops_group2, ldvarg385, ldvarg384); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 5: ; if (ldv_state_variable_82 == 1) { qla24xx_update_fw_options(qla82xx_isp_ops_group0); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_update_fw_options(qla82xx_isp_ops_group0); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 6: ; if (ldv_state_variable_82 == 1) { qla82xx_start_scsi(ldvarg383); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_start_scsi(ldvarg383); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 7: ; if (ldv_state_variable_82 == 1) { qla82xx_abort_isp(qla82xx_isp_ops_group0); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_abort_isp(qla82xx_isp_ops_group0); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 8: ; if (ldv_state_variable_82 == 1) { qla24xx_chip_diag(qla82xx_isp_ops_group0); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_chip_diag(qla82xx_isp_ops_group0); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 9: ; if (ldv_state_variable_82 == 1) { qla24xx_fabric_logout(qla82xx_isp_ops_group0, (int )ldvarg381, (int )ldvarg380, (int )ldvarg379, (int )ldvarg382); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_fabric_logout(qla82xx_isp_ops_group0, (int )ldvarg381, (int )ldvarg380, (int )ldvarg379, (int )ldvarg382); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 10: ; if (ldv_state_variable_82 == 1) { qla82xx_reset_chip(qla82xx_isp_ops_group0); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_reset_chip(qla82xx_isp_ops_group0); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 11: ; if (ldv_state_variable_82 == 1) { qla24xx_prep_ms_iocb(qla82xx_isp_ops_group0, ldvarg378, ldvarg377); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_prep_ms_iocb(qla82xx_isp_ops_group0, ldvarg378, ldvarg377); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 12: ; if (ldv_state_variable_82 == 1) { qla24xx_lun_reset(qla82xx_isp_ops_group2, ldvarg376, ldvarg375); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_lun_reset(qla82xx_isp_ops_group2, ldvarg376, ldvarg375); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 13: ; if (ldv_state_variable_82 == 1) { qla24xx_fw_version_str(qla82xx_isp_ops_group0, ldvarg374, ldvarg373); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_fw_version_str(qla82xx_isp_ops_group0, ldvarg374, ldvarg373); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 14: ; if (ldv_state_variable_82 == 1) { qla82xx_iospace_config(qla82xx_isp_ops_group1); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_iospace_config(qla82xx_isp_ops_group1); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 15: ; if (ldv_state_variable_82 == 1) { qla2x00_initialize_adapter(qla82xx_isp_ops_group0); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla2x00_initialize_adapter(qla82xx_isp_ops_group0); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 16: ; if (ldv_state_variable_82 == 1) { qla82xx_disable_intrs(qla82xx_isp_ops_group1); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_disable_intrs(qla82xx_isp_ops_group1); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 17: ; if (ldv_state_variable_82 == 1) { qla82xx_get_flash_version(qla82xx_isp_ops_group0, ldvarg372); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_get_flash_version(qla82xx_isp_ops_group0, ldvarg372); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 18: ; if (ldv_state_variable_82 == 1) { qla81xx_nvram_config(qla82xx_isp_ops_group0); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla81xx_nvram_config(qla82xx_isp_ops_group0); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 19: ; if (ldv_state_variable_82 == 1) { qla24xx_pci_info_str(qla82xx_isp_ops_group0, ldvarg371); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_pci_info_str(qla82xx_isp_ops_group0, ldvarg371); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 20: ; if (ldv_state_variable_82 == 2) { qla82xx_read_optrom_data(qla82xx_isp_ops_group0, ldvarg370, ldvarg369, ldvarg368); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 21: ; if (ldv_state_variable_82 == 1) { qla82xx_beacon_off(qla82xx_isp_ops_group0); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_beacon_off(qla82xx_isp_ops_group0); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 22: ; if (ldv_state_variable_82 == 1) { qla24xx_abort_command(ldvarg367); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_abort_command(ldvarg367); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 23: ; if (ldv_state_variable_82 == 1) { qla82xx_fw_dump(qla82xx_isp_ops_group0, ldvarg366); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_fw_dump(qla82xx_isp_ops_group0, ldvarg366); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 24: ; if (ldv_state_variable_82 == 1) { qla82xx_config_rings(qla82xx_isp_ops_group0); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_config_rings(qla82xx_isp_ops_group0); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 25: ; if (ldv_state_variable_82 == 1) { qla82xx_load_risc(qla82xx_isp_ops_group0, ldvarg365); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_load_risc(qla82xx_isp_ops_group0, ldvarg365); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 26: ; if (ldv_state_variable_82 == 1) { qla82xx_pci_config(qla82xx_isp_ops_group0); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_pci_config(qla82xx_isp_ops_group0); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 27: ; if (ldv_state_variable_82 == 1) { qla24xx_login_fabric(qla82xx_isp_ops_group0, (int )ldvarg363, (int )ldvarg361, (int )ldvarg360, (int )ldvarg364, ldvarg359, (int )ldvarg362); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_login_fabric(qla82xx_isp_ops_group0, (int )ldvarg363, (int )ldvarg361, (int )ldvarg360, (int )ldvarg364, ldvarg359, (int )ldvarg362); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 28: ; if (ldv_state_variable_82 == 1) { qla24xx_write_nvram_data(qla82xx_isp_ops_group0, ldvarg358, ldvarg357, ldvarg356); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_write_nvram_data(qla82xx_isp_ops_group0, ldvarg358, ldvarg357, ldvarg356); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 29: ; if (ldv_state_variable_82 == 1) { qla82xx_beacon_on(qla82xx_isp_ops_group0); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla82xx_beacon_on(qla82xx_isp_ops_group0); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 30: ; if (ldv_state_variable_82 == 1) { qla24xx_read_nvram_data(qla82xx_isp_ops_group0, ldvarg355, ldvarg354, ldvarg353); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_read_nvram_data(qla82xx_isp_ops_group0, ldvarg355, ldvarg354, ldvarg353); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 31: ; if (ldv_state_variable_82 == 1) { qla24xx_prep_ms_fdmi_iocb(qla82xx_isp_ops_group0, ldvarg352, ldvarg351); ldv_state_variable_82 = 1; } else { } if (ldv_state_variable_82 == 2) { qla24xx_prep_ms_fdmi_iocb(qla82xx_isp_ops_group0, ldvarg352, ldvarg351); ldv_state_variable_82 = 2; } else { } goto ldv_68979; case 32: ; if (ldv_state_variable_82 == 2) { ldv_release_82(); ldv_state_variable_82 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_68979; case 33: ; if (ldv_state_variable_82 == 1) { ldv_probe_82(); ldv_state_variable_82 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_68979; default: ldv_stop(); } ldv_68979: ; } else { } goto ldv_68677; case 57: ; goto ldv_68677; case 58: ; goto ldv_68677; case 59: ; if (ldv_state_variable_83 != 0) { tmp___125 = __VERIFIER_nondet_int(); switch (tmp___125) { case 0: ; if (ldv_state_variable_83 == 1) { qla24xx_reset_adapter(qla81xx_isp_ops_group0); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_reset_adapter(qla81xx_isp_ops_group0); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 1: ; if (ldv_state_variable_83 == 2) { qla24xx_write_optrom_data(qla81xx_isp_ops_group0, ldvarg424, ldvarg423, ldvarg422); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 2: ; if (ldv_state_variable_83 == 1) { qla24xx_enable_intrs(qla81xx_isp_ops_group1); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_enable_intrs(qla81xx_isp_ops_group1); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 3: ; if (ldv_state_variable_83 == 1) { qla24xx_intr_handler(ldvarg420, ldvarg421); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_intr_handler(ldvarg420, ldvarg421); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 4: ; if (ldv_state_variable_83 == 1) { qla24xx_abort_target(qla81xx_isp_ops_group2, ldvarg419, ldvarg418); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_abort_target(qla81xx_isp_ops_group2, ldvarg419, ldvarg418); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 5: ; if (ldv_state_variable_83 == 1) { qla81xx_update_fw_options(qla81xx_isp_ops_group0); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla81xx_update_fw_options(qla81xx_isp_ops_group0); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 6: ; if (ldv_state_variable_83 == 1) { qla2x00_abort_isp(qla81xx_isp_ops_group0); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla2x00_abort_isp(qla81xx_isp_ops_group0); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 7: ; if (ldv_state_variable_83 == 1) { qla24xx_dif_start_scsi(ldvarg417); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_dif_start_scsi(ldvarg417); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 8: ; if (ldv_state_variable_83 == 1) { qla24xx_chip_diag(qla81xx_isp_ops_group0); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_chip_diag(qla81xx_isp_ops_group0); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 9: ; if (ldv_state_variable_83 == 1) { qla24xx_fabric_logout(qla81xx_isp_ops_group0, (int )ldvarg415, (int )ldvarg414, (int )ldvarg413, (int )ldvarg416); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_fabric_logout(qla81xx_isp_ops_group0, (int )ldvarg415, (int )ldvarg414, (int )ldvarg413, (int )ldvarg416); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 10: ; if (ldv_state_variable_83 == 1) { qla24xx_reset_chip(qla81xx_isp_ops_group0); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_reset_chip(qla81xx_isp_ops_group0); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 11: ; if (ldv_state_variable_83 == 1) { qla24xx_prep_ms_iocb(qla81xx_isp_ops_group0, ldvarg412, ldvarg411); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_prep_ms_iocb(qla81xx_isp_ops_group0, ldvarg412, ldvarg411); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 12: ; if (ldv_state_variable_83 == 1) { qla24xx_lun_reset(qla81xx_isp_ops_group2, ldvarg410, ldvarg409); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_lun_reset(qla81xx_isp_ops_group2, ldvarg410, ldvarg409); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 13: ; if (ldv_state_variable_83 == 1) { qla24xx_fw_version_str(qla81xx_isp_ops_group0, ldvarg408, ldvarg407); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_fw_version_str(qla81xx_isp_ops_group0, ldvarg408, ldvarg407); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 14: ; if (ldv_state_variable_83 == 1) { qla2x00_iospace_config(qla81xx_isp_ops_group1); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla2x00_iospace_config(qla81xx_isp_ops_group1); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 15: ; if (ldv_state_variable_83 == 1) { qla2x00_initialize_adapter(qla81xx_isp_ops_group0); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla2x00_initialize_adapter(qla81xx_isp_ops_group0); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 16: ; if (ldv_state_variable_83 == 1) { qla24xx_get_flash_version(qla81xx_isp_ops_group0, ldvarg406); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_get_flash_version(qla81xx_isp_ops_group0, ldvarg406); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 17: ; if (ldv_state_variable_83 == 1) { qla24xx_disable_intrs(qla81xx_isp_ops_group1); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_disable_intrs(qla81xx_isp_ops_group1); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 18: ; if (ldv_state_variable_83 == 1) { qla81xx_nvram_config(qla81xx_isp_ops_group0); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla81xx_nvram_config(qla81xx_isp_ops_group0); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 19: ; if (ldv_state_variable_83 == 1) { qla24xx_pci_info_str(qla81xx_isp_ops_group0, ldvarg405); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_pci_info_str(qla81xx_isp_ops_group0, ldvarg405); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 20: ; if (ldv_state_variable_83 == 2) { qla25xx_read_optrom_data(qla81xx_isp_ops_group0, ldvarg404, ldvarg403, ldvarg402); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 21: ; if (ldv_state_variable_83 == 1) { qla83xx_beacon_blink(qla81xx_isp_ops_group0); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla83xx_beacon_blink(qla81xx_isp_ops_group0); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 22: ; if (ldv_state_variable_83 == 1) { qla24xx_beacon_off(qla81xx_isp_ops_group0); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_beacon_off(qla81xx_isp_ops_group0); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 23: ; if (ldv_state_variable_83 == 1) { qla24xx_abort_command(ldvarg401); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_abort_command(ldvarg401); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 24: ; if (ldv_state_variable_83 == 1) { qla81xx_fw_dump(qla81xx_isp_ops_group0, ldvarg400); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla81xx_fw_dump(qla81xx_isp_ops_group0, ldvarg400); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 25: ; if (ldv_state_variable_83 == 1) { qla24xx_config_rings(qla81xx_isp_ops_group0); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_config_rings(qla81xx_isp_ops_group0); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 26: ; if (ldv_state_variable_83 == 1) { qla81xx_load_risc(qla81xx_isp_ops_group0, ldvarg399); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla81xx_load_risc(qla81xx_isp_ops_group0, ldvarg399); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 27: ; if (ldv_state_variable_83 == 1) { qla25xx_pci_config(qla81xx_isp_ops_group0); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla25xx_pci_config(qla81xx_isp_ops_group0); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 28: ; if (ldv_state_variable_83 == 1) { qla24xx_login_fabric(qla81xx_isp_ops_group0, (int )ldvarg397, (int )ldvarg395, (int )ldvarg394, (int )ldvarg398, ldvarg393, (int )ldvarg396); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_login_fabric(qla81xx_isp_ops_group0, (int )ldvarg397, (int )ldvarg395, (int )ldvarg394, (int )ldvarg398, ldvarg393, (int )ldvarg396); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 29: ; if (ldv_state_variable_83 == 1) { qla24xx_beacon_on(qla81xx_isp_ops_group0); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_beacon_on(qla81xx_isp_ops_group0); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 30: ; if (ldv_state_variable_83 == 1) { qla24xx_prep_ms_fdmi_iocb(qla81xx_isp_ops_group0, ldvarg392, ldvarg391); ldv_state_variable_83 = 1; } else { } if (ldv_state_variable_83 == 2) { qla24xx_prep_ms_fdmi_iocb(qla81xx_isp_ops_group0, ldvarg392, ldvarg391); ldv_state_variable_83 = 2; } else { } goto ldv_69018; case 31: ; if (ldv_state_variable_83 == 2) { ldv_release_83(); ldv_state_variable_83 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_69018; case 32: ; if (ldv_state_variable_83 == 1) { ldv_probe_83(); ldv_state_variable_83 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_69018; default: ldv_stop(); } ldv_69018: ; } else { } goto ldv_68677; case 60: ; if (ldv_state_variable_75 != 0) { tmp___126 = __VERIFIER_nondet_int(); switch (tmp___126) { case 0: ; if (ldv_state_variable_75 == 2) { noop_llseek(apidev_fops_group2, ldvarg426, ldvarg425); ldv_state_variable_75 = 2; } else { } goto ldv_69054; case 1: ; if (ldv_state_variable_75 == 1) { ldv_retval_2 = ldv_open_75(); if (ldv_retval_2 == 0) { ldv_state_variable_75 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_69054; case 2: ; if (ldv_state_variable_75 == 2) { ldv_release_75(); ldv_state_variable_75 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_69054; default: ldv_stop(); } ldv_69054: ; } else { } goto ldv_68677; case 61: ; if (ldv_state_variable_40 != 0) { ldv_main_exported_40(); } else { } goto ldv_68677; case 62: ; if (ldv_state_variable_14 != 0) { choose_timer_14(ldv_timer_list_14); } else { } goto ldv_68677; case 63: ; if (ldv_state_variable_69 != 0) { ldv_main_exported_69(); } else { } goto ldv_68677; case 64: ; if (ldv_state_variable_59 != 0) { ldv_main_exported_59(); } else { } goto ldv_68677; case 65: ; if (ldv_state_variable_49 != 0) { ldv_main_exported_49(); } else { } goto ldv_68677; case 66: ; goto ldv_68677; case 67: ; if (ldv_state_variable_53 != 0) { ldv_main_exported_53(); } else { } goto ldv_68677; case 68: ; if (ldv_state_variable_79 != 0) { tmp___127 = __VERIFIER_nondet_int(); switch (tmp___127) { case 0: ; if (ldv_state_variable_79 == 1) { qlafx00_soft_reset(qlafx00_isp_ops_group0); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_soft_reset(qlafx00_isp_ops_group0); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 1: ; if (ldv_state_variable_79 == 2) { qla24xx_write_optrom_data(qlafx00_isp_ops_group0, ldvarg472, ldvarg471, ldvarg470); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 2: ; if (ldv_state_variable_79 == 1) { qlafx00_enable_intrs(qlafx00_isp_ops_group1); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_enable_intrs(qlafx00_isp_ops_group1); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 3: ; if (ldv_state_variable_79 == 1) { qlafx00_intr_handler(ldvarg468, ldvarg469); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_intr_handler(ldvarg468, ldvarg469); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 4: ; if (ldv_state_variable_79 == 1) { qlafx00_abort_target(qlafx00_isp_ops_group2, ldvarg467, ldvarg466); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_abort_target(qlafx00_isp_ops_group2, ldvarg467, ldvarg466); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 5: ; if (ldv_state_variable_79 == 1) { qlafx00_abort_isp(qlafx00_isp_ops_group0); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_abort_isp(qlafx00_isp_ops_group0); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 6: ; if (ldv_state_variable_79 == 1) { qlafx00_start_scsi(ldvarg465); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_start_scsi(ldvarg465); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 7: ; if (ldv_state_variable_79 == 1) { qlafx00_chip_diag(qlafx00_isp_ops_group0); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_chip_diag(qlafx00_isp_ops_group0); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 8: ; if (ldv_state_variable_79 == 1) { qlafx00_soft_reset(qlafx00_isp_ops_group0); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_soft_reset(qlafx00_isp_ops_group0); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 9: ; if (ldv_state_variable_79 == 1) { qla24xx_prep_ms_iocb(qlafx00_isp_ops_group0, ldvarg464, ldvarg463); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qla24xx_prep_ms_iocb(qlafx00_isp_ops_group0, ldvarg464, ldvarg463); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 10: ; if (ldv_state_variable_79 == 1) { qlafx00_lun_reset(qlafx00_isp_ops_group2, ldvarg462, ldvarg461); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_lun_reset(qlafx00_isp_ops_group2, ldvarg462, ldvarg461); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 11: ; if (ldv_state_variable_79 == 1) { qlafx00_fw_version_str(qlafx00_isp_ops_group0, ldvarg460, ldvarg459); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_fw_version_str(qlafx00_isp_ops_group0, ldvarg460, ldvarg459); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 12: ; if (ldv_state_variable_79 == 1) { qlafx00_iospace_config(qlafx00_isp_ops_group1); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_iospace_config(qlafx00_isp_ops_group1); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 13: ; if (ldv_state_variable_79 == 1) { qlafx00_initialize_adapter(qlafx00_isp_ops_group0); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_initialize_adapter(qlafx00_isp_ops_group0); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 14: ; if (ldv_state_variable_79 == 1) { qla24xx_get_flash_version(qlafx00_isp_ops_group0, ldvarg458); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qla24xx_get_flash_version(qlafx00_isp_ops_group0, ldvarg458); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 15: ; if (ldv_state_variable_79 == 1) { qlafx00_disable_intrs(qlafx00_isp_ops_group1); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_disable_intrs(qlafx00_isp_ops_group1); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 16: ; if (ldv_state_variable_79 == 1) { qlafx00_pci_info_str(qlafx00_isp_ops_group0, ldvarg457); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_pci_info_str(qlafx00_isp_ops_group0, ldvarg457); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 17: ; if (ldv_state_variable_79 == 2) { qla24xx_read_optrom_data(qlafx00_isp_ops_group0, ldvarg456, ldvarg455, ldvarg454); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 18: ; if (ldv_state_variable_79 == 1) { qla24xx_beacon_off(qlafx00_isp_ops_group0); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qla24xx_beacon_off(qlafx00_isp_ops_group0); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 19: ; if (ldv_state_variable_79 == 1) { qla24xx_async_abort_command(ldvarg453); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qla24xx_async_abort_command(ldvarg453); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 20: ; if (ldv_state_variable_79 == 1) { qlafx00_config_rings(qlafx00_isp_ops_group0); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_config_rings(qlafx00_isp_ops_group0); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 21: ; if (ldv_state_variable_79 == 1) { qlafx00_pci_config(qlafx00_isp_ops_group0); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qlafx00_pci_config(qlafx00_isp_ops_group0); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 22: ; if (ldv_state_variable_79 == 1) { qla24xx_write_nvram_data(qlafx00_isp_ops_group0, ldvarg452, ldvarg451, ldvarg450); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qla24xx_write_nvram_data(qlafx00_isp_ops_group0, ldvarg452, ldvarg451, ldvarg450); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 23: ; if (ldv_state_variable_79 == 1) { qla24xx_beacon_on(qlafx00_isp_ops_group0); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qla24xx_beacon_on(qlafx00_isp_ops_group0); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 24: ; if (ldv_state_variable_79 == 1) { qla24xx_read_nvram_data(qlafx00_isp_ops_group0, ldvarg449, ldvarg448, ldvarg447); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qla24xx_read_nvram_data(qlafx00_isp_ops_group0, ldvarg449, ldvarg448, ldvarg447); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 25: ; if (ldv_state_variable_79 == 1) { qla24xx_prep_ms_fdmi_iocb(qlafx00_isp_ops_group0, ldvarg446, ldvarg445); ldv_state_variable_79 = 1; } else { } if (ldv_state_variable_79 == 2) { qla24xx_prep_ms_fdmi_iocb(qlafx00_isp_ops_group0, ldvarg446, ldvarg445); ldv_state_variable_79 = 2; } else { } goto ldv_69067; case 26: ; if (ldv_state_variable_79 == 2) { ldv_release_79(); ldv_state_variable_79 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_69067; case 27: ; if (ldv_state_variable_79 == 1) { ldv_probe_79(); ldv_state_variable_79 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_69067; default: ldv_stop(); } ldv_69067: ; } else { } goto ldv_68677; case 69: ; goto ldv_68677; case 70: ; if (ldv_state_variable_42 != 0) { ldv_main_exported_42(); } else { } goto ldv_68677; case 71: ; if (ldv_state_variable_0 != 0) { tmp___128 = __VERIFIER_nondet_int(); switch (tmp___128) { case 0: ; if (ldv_state_variable_0 == 3 && ref_cnt == 0) { qla2x00_module_exit(); ldv_state_variable_0 = 2; goto ldv_final; } else { } goto ldv_69101; case 1: ; if (ldv_state_variable_0 == 1) { ldv_retval_3 = qla2x00_module_init(); if (ldv_retval_3 == 0) { ldv_state_variable_0 = 3; ldv_state_variable_54 = 1; ldv_initialize_device_attribute_54(); ldv_state_variable_62 = 1; ldv_state_variable_43 = 1; ldv_state_variable_86 = 1; ldv_initialize_isp_operations_86(); ldv_state_variable_37 = 1; ldv_state_variable_45 = 1; ldv_state_variable_66 = 1; ldv_state_variable_73 = 1; ldv_initialize_bin_attribute_73(); ldv_state_variable_56 = 1; ldv_state_variable_34 = 1; ldv_initialize_device_attribute_34(); ldv_state_variable_60 = 1; ldv_state_variable_52 = 1; ldv_state_variable_38 = 1; ldv_state_variable_81 = 1; ldv_initialize_isp_operations_81(); ldv_state_variable_47 = 1; ldv_state_variable_58 = 1; ldv_state_variable_41 = 1; ldv_state_variable_51 = 1; ldv_state_variable_36 = 1; ldv_state_variable_64 = 1; ldv_state_variable_39 = 1; ldv_state_variable_85 = 1; ldv_initialize_isp_operations_85(); ldv_state_variable_50 = 1; ldv_state_variable_65 = 1; ldv_state_variable_46 = 1; ldv_state_variable_77 = 1; ldv_initialize_pci_error_handlers_77(); ldv_state_variable_87 = 1; ldv_initialize_isp_operations_87(); ldv_state_variable_42 = 1; ldv_state_variable_48 = 1; ldv_state_variable_79 = 1; ldv_initialize_isp_operations_79(); ldv_state_variable_78 = 1; ldv_initialize_isp_operations_78(); ldv_state_variable_53 = 1; ldv_initialize_device_attribute_53(); ldv_state_variable_35 = 1; ldv_state_variable_49 = 1; ldv_state_variable_59 = 1; ldv_state_variable_69 = 1; ldv_initialize_bin_attribute_69(); ldv_state_variable_61 = 1; ldv_state_variable_40 = 1; ldv_state_variable_57 = 1; ldv_state_variable_83 = 1; ldv_initialize_isp_operations_83(); ldv_state_variable_74 = 1; ldv_initialize_bin_attribute_74(); ldv_state_variable_84 = 1; ldv_initialize_isp_operations_84(); ldv_state_variable_55 = 1; ldv_initialize_device_attribute_55(); ldv_state_variable_82 = 1; ldv_initialize_isp_operations_82(); ldv_state_variable_44 = 1; ldv_state_variable_72 = 1; ldv_initialize_bin_attribute_72(); ldv_state_variable_68 = 1; ldv_state_variable_80 = 1; ldv_initialize_isp_operations_80(); ldv_state_variable_70 = 1; ldv_state_variable_71 = 1; ldv_initialize_bin_attribute_71(); ldv_state_variable_63 = 1; ldv_state_variable_32 = 1; ldv_initialize_fc_function_template_32(); ldv_state_variable_33 = 1; ldv_initialize_fc_function_template_33(); ldv_state_variable_67 = 1; } else { } if (ldv_retval_3 != 0) { ldv_state_variable_0 = 2; goto ldv_final; } else { } } else { } goto ldv_69101; default: ldv_stop(); } ldv_69101: ; } else { } goto ldv_68677; case 72: ; if (ldv_state_variable_46 != 0) { ldv_main_exported_46(); } else { } goto ldv_68677; case 73: ; goto ldv_68677; case 74: ; if (ldv_state_variable_13 != 0) { choose_timer_13(ldv_timer_list_13); } else { } goto ldv_68677; case 75: ; goto ldv_68677; case 76: ; if (ldv_state_variable_85 != 0) { tmp___129 = __VERIFIER_nondet_int(); switch (tmp___129) { case 0: ; if (ldv_state_variable_85 == 1) { qla24xx_reset_adapter(qla24xx_isp_ops_group0); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_reset_adapter(qla24xx_isp_ops_group0); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 1: ; if (ldv_state_variable_85 == 1) { qla24xx_enable_intrs(qla24xx_isp_ops_group1); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_enable_intrs(qla24xx_isp_ops_group1); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 2: ; if (ldv_state_variable_85 == 2) { qla24xx_write_optrom_data(qla24xx_isp_ops_group0, ldvarg518, ldvarg517, ldvarg516); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 3: ; if (ldv_state_variable_85 == 1) { qla24xx_intr_handler(ldvarg514, ldvarg515); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_intr_handler(ldvarg514, ldvarg515); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 4: ; if (ldv_state_variable_85 == 1) { qla24xx_abort_target(qla24xx_isp_ops_group2, ldvarg513, ldvarg512); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_abort_target(qla24xx_isp_ops_group2, ldvarg513, ldvarg512); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 5: ; if (ldv_state_variable_85 == 1) { qla24xx_update_fw_options(qla24xx_isp_ops_group0); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_update_fw_options(qla24xx_isp_ops_group0); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 6: ; if (ldv_state_variable_85 == 1) { qla24xx_start_scsi(ldvarg511); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_start_scsi(ldvarg511); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 7: ; if (ldv_state_variable_85 == 1) { qla2x00_abort_isp(qla24xx_isp_ops_group0); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla2x00_abort_isp(qla24xx_isp_ops_group0); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 8: ; if (ldv_state_variable_85 == 1) { qla24xx_chip_diag(qla24xx_isp_ops_group0); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_chip_diag(qla24xx_isp_ops_group0); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 9: ; if (ldv_state_variable_85 == 1) { qla24xx_fabric_logout(qla24xx_isp_ops_group0, (int )ldvarg509, (int )ldvarg508, (int )ldvarg507, (int )ldvarg510); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_fabric_logout(qla24xx_isp_ops_group0, (int )ldvarg509, (int )ldvarg508, (int )ldvarg507, (int )ldvarg510); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 10: ; if (ldv_state_variable_85 == 1) { qla24xx_reset_chip(qla24xx_isp_ops_group0); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_reset_chip(qla24xx_isp_ops_group0); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 11: ; if (ldv_state_variable_85 == 1) { qla24xx_prep_ms_iocb(qla24xx_isp_ops_group0, ldvarg506, ldvarg505); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_prep_ms_iocb(qla24xx_isp_ops_group0, ldvarg506, ldvarg505); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 12: ; if (ldv_state_variable_85 == 1) { qla24xx_lun_reset(qla24xx_isp_ops_group2, ldvarg504, ldvarg503); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_lun_reset(qla24xx_isp_ops_group2, ldvarg504, ldvarg503); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 13: ; if (ldv_state_variable_85 == 1) { qla24xx_fw_version_str(qla24xx_isp_ops_group0, ldvarg502, ldvarg501); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_fw_version_str(qla24xx_isp_ops_group0, ldvarg502, ldvarg501); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 14: ; if (ldv_state_variable_85 == 1) { qla2x00_iospace_config(qla24xx_isp_ops_group1); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla2x00_iospace_config(qla24xx_isp_ops_group1); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 15: ; if (ldv_state_variable_85 == 1) { qla2x00_initialize_adapter(qla24xx_isp_ops_group0); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla2x00_initialize_adapter(qla24xx_isp_ops_group0); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 16: ; if (ldv_state_variable_85 == 1) { qla24xx_disable_intrs(qla24xx_isp_ops_group1); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_disable_intrs(qla24xx_isp_ops_group1); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 17: ; if (ldv_state_variable_85 == 1) { qla24xx_get_flash_version(qla24xx_isp_ops_group0, ldvarg500); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_get_flash_version(qla24xx_isp_ops_group0, ldvarg500); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 18: ; if (ldv_state_variable_85 == 1) { qla24xx_nvram_config(qla24xx_isp_ops_group0); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_nvram_config(qla24xx_isp_ops_group0); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 19: ; if (ldv_state_variable_85 == 1) { qla24xx_pci_info_str(qla24xx_isp_ops_group0, ldvarg499); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_pci_info_str(qla24xx_isp_ops_group0, ldvarg499); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 20: ; if (ldv_state_variable_85 == 2) { qla24xx_read_optrom_data(qla24xx_isp_ops_group0, ldvarg498, ldvarg497, ldvarg496); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 21: ; if (ldv_state_variable_85 == 1) { qla24xx_beacon_blink(qla24xx_isp_ops_group0); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_beacon_blink(qla24xx_isp_ops_group0); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 22: ; if (ldv_state_variable_85 == 1) { qla24xx_beacon_off(qla24xx_isp_ops_group0); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_beacon_off(qla24xx_isp_ops_group0); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 23: ; if (ldv_state_variable_85 == 1) { qla24xx_abort_command(ldvarg495); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_abort_command(ldvarg495); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 24: ; if (ldv_state_variable_85 == 1) { qla24xx_fw_dump(qla24xx_isp_ops_group0, ldvarg494); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_fw_dump(qla24xx_isp_ops_group0, ldvarg494); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 25: ; if (ldv_state_variable_85 == 1) { qla24xx_config_rings(qla24xx_isp_ops_group0); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_config_rings(qla24xx_isp_ops_group0); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 26: ; if (ldv_state_variable_85 == 1) { qla24xx_load_risc(qla24xx_isp_ops_group0, ldvarg493); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_load_risc(qla24xx_isp_ops_group0, ldvarg493); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 27: ; if (ldv_state_variable_85 == 1) { qla24xx_pci_config(qla24xx_isp_ops_group0); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_pci_config(qla24xx_isp_ops_group0); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 28: ; if (ldv_state_variable_85 == 1) { qla24xx_login_fabric(qla24xx_isp_ops_group0, (int )ldvarg491, (int )ldvarg489, (int )ldvarg488, (int )ldvarg492, ldvarg487, (int )ldvarg490); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_login_fabric(qla24xx_isp_ops_group0, (int )ldvarg491, (int )ldvarg489, (int )ldvarg488, (int )ldvarg492, ldvarg487, (int )ldvarg490); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 29: ; if (ldv_state_variable_85 == 1) { qla24xx_write_nvram_data(qla24xx_isp_ops_group0, ldvarg486, ldvarg485, ldvarg484); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_write_nvram_data(qla24xx_isp_ops_group0, ldvarg486, ldvarg485, ldvarg484); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 30: ; if (ldv_state_variable_85 == 1) { qla24xx_beacon_on(qla24xx_isp_ops_group0); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_beacon_on(qla24xx_isp_ops_group0); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 31: ; if (ldv_state_variable_85 == 1) { qla24xx_read_nvram_data(qla24xx_isp_ops_group0, ldvarg483, ldvarg482, ldvarg481); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_read_nvram_data(qla24xx_isp_ops_group0, ldvarg483, ldvarg482, ldvarg481); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 32: ; if (ldv_state_variable_85 == 1) { qla24xx_prep_ms_fdmi_iocb(qla24xx_isp_ops_group0, ldvarg480, ldvarg479); ldv_state_variable_85 = 1; } else { } if (ldv_state_variable_85 == 2) { qla24xx_prep_ms_fdmi_iocb(qla24xx_isp_ops_group0, ldvarg480, ldvarg479); ldv_state_variable_85 = 2; } else { } goto ldv_69110; case 33: ; if (ldv_state_variable_85 == 2) { ldv_release_85(); ldv_state_variable_85 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_69110; case 34: ; if (ldv_state_variable_85 == 1) { ldv_probe_85(); ldv_state_variable_85 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_69110; default: ldv_stop(); } ldv_69110: ; } else { } goto ldv_68677; case 77: ; if (ldv_state_variable_3 != 0) { invoke_work_3(); } else { } goto ldv_68677; case 78: ; if (ldv_state_variable_36 != 0) { ldv_main_exported_36(); } else { } goto ldv_68677; case 79: ; goto ldv_68677; case 80: ; if (ldv_state_variable_51 != 0) { ldv_main_exported_51(); } else { } goto ldv_68677; case 81: ; if (ldv_state_variable_47 != 0) { ldv_main_exported_47(); } else { } goto ldv_68677; case 82: ; goto ldv_68677; case 83: ; if (ldv_state_variable_38 != 0) { ldv_main_exported_38(); } else { } goto ldv_68677; case 84: ; if (ldv_state_variable_4 != 0) { invoke_work_4(); } else { } goto ldv_68677; case 85: ; if (ldv_state_variable_34 != 0) { ldv_main_exported_34(); } else { } goto ldv_68677; case 86: ; if (ldv_state_variable_37 != 0) { ldv_main_exported_37(); } else { } goto ldv_68677; case 87: ; if (ldv_state_variable_43 != 0) { ldv_main_exported_43(); } else { } goto ldv_68677; case 88: ; if (ldv_state_variable_5 != 0) { invoke_work_5(); } else { } goto ldv_68677; default: ldv_stop(); } ldv_68677: ; goto ldv_69159; ldv_final: ldv_check_final_state(); return 0; } } __inline static long PTR_ERR(void const *ptr ) { long tmp ; { tmp = ldv_ptr_err(ptr); return (tmp); } } __inline static bool IS_ERR(void const *ptr ) { bool tmp ; { tmp = ldv_is_err(ptr); return (tmp); } } bool ldv_queue_work_on_5(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_6(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_7(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_8(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_9(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_10(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } int ldv_mod_timer_11(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___4 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_21(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } int ldv_del_timer_sync_12(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___5 ldv_func_res ; int tmp ; { tmp = del_timer_sync(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_21(ldv_func_arg1); return (ldv_func_res); } } void ldv_destroy_workqueue_13(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } void ldv_flush_workqueue_14(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } void ldv_flush_workqueue_15(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } void ldv_destroy_workqueue_16(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_cancel_work_sync_17(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___6 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_7(ldv_func_arg1); return (ldv_func_res); } } void ldv_destroy_workqueue_18(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_cancel_work_sync_19(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___7 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_7(ldv_func_arg1); return (ldv_func_res); } } bool ldv_cancel_work_sync_20(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___8 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_7(ldv_func_arg1); return (ldv_func_res); } } bool ldv_cancel_work_sync_21(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___9 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_7(ldv_func_arg1); return (ldv_func_res); } } void ldv_destroy_workqueue_22(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_cancel_work_sync_23(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___10 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_7(ldv_func_arg1); return (ldv_func_res); } } void ldv_scsi_remove_host_24(struct Scsi_Host *shost ) { { scsi_remove_host(shost); ldv_state_variable_88 = 0; return; } } struct Scsi_Host *ldv_scsi_host_alloc_25(struct scsi_host_template *sht , int privsize ) { ldv_func_ret_type___11 ldv_func_res ; struct Scsi_Host *tmp ; { tmp = scsi_host_alloc(sht, privsize); ldv_func_res = tmp; if ((unsigned long )ldv_func_res != (unsigned long )((ldv_func_ret_type___11 )0)) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } void ldv_scsi_remove_host_26(struct Scsi_Host *shost ) { { scsi_remove_host(shost); ldv_state_variable_88 = 0; return; } } __inline static int ldv_register_chrdev_27(unsigned int major , char const *name , struct file_operations const *fops ) { ldv_func_ret_type___12 ldv_func_res ; int tmp ; { tmp = register_chrdev(major, name, fops); ldv_func_res = tmp; ldv_state_variable_31 = 1; ldv_file_operations_31(); return (ldv_func_res); } } int ldv___pci_register_driver_28(struct pci_driver *ldv_func_arg1 , struct module *ldv_func_arg2 , char const *ldv_func_arg3 ) { ldv_func_ret_type___13 ldv_func_res ; int tmp ; { tmp = __pci_register_driver(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; ldv_state_variable_76 = 1; ldv_pci_driver_76(); return (ldv_func_res); } } __inline static void ldv_unregister_chrdev_29(unsigned int major , char const *name ) { { unregister_chrdev(major, name); ldv_state_variable_31 = 0; return; } } void ldv_pci_unregister_driver_30(struct pci_driver *ldv_func_arg1 ) { { pci_unregister_driver(ldv_func_arg1); ldv_state_variable_76 = 0; return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern unsigned long find_first_zero_bit(unsigned long const * , unsigned long ) ; __inline static __u32 __arch_swab32(__u32 val ) { { __asm__ ("bswapl %0": "=r" (val): "0" (val)); return (val); } } __inline static __u16 __fswab16(__u16 val ) { { return ((__u16 )((int )((short )((int )val << 8)) | (int )((short )((int )val >> 8)))); } } __inline static __u32 __fswab32(__u32 val ) { __u32 tmp ; { tmp = __arch_swab32(val); return (tmp); } } __inline static void list_move_tail(struct list_head *list , struct list_head *head ) { { __list_del_entry(list); list_add_tail(list, head); return; } } extern void warn_slowpath_null(char const * , int const ) ; extern int memcmp(void const * , void const * , size_t ) ; extern char *strncpy(char * , char const * , __kernel_size_t ) ; extern void __xadd_wrong_size(void) ; __inline static int atomic_sub_and_test(int i , atomic_t *v ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; subl %2, %0; sete %1": "+m" (v->counter), "=qm" (c): "er" (i): "memory"); return ((int )((signed char )c) != 0); } } __inline static int atomic_add_return(int i , atomic_t *v ) { int __ret ; { __ret = i; switch (4UL) { case 1UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddb %b0, %1\n": "+q" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5659; case 2UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddw %w0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5659; case 4UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddl %0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5659; case 8UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddq %q0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5659; default: __xadd_wrong_size(); } ldv_5659: ; return (__ret + i); } } extern void wait_for_completion(struct completion * ) ; __inline static u64 get_jiffies_64(void) { { return ((u64 )jiffies); } } extern unsigned int jiffies_to_msecs(unsigned long const ) ; extern int del_timer(struct timer_list * ) ; int ldv_del_timer_67(struct timer_list *ldv_func_arg1 ) ; bool ldv_queue_work_on_61(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_63(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_62(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_65(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_64(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void kref_init(struct kref *kref ) { { atomic_set(& kref->refcount, 1); return; } } __inline static void kref_get(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub(kref, 1U, release); return (tmp); } } __inline static void *kmalloc_array(size_t n , size_t size , gfp_t flags ) { void *tmp ; { if (size != 0UL && 0xffffffffffffffffUL / size < n) { return ((void *)0); } else { } tmp = __kmalloc(n * size, flags); return (tmp); } } __inline static void *kcalloc(size_t n , size_t size , gfp_t flags ) { void *tmp ; { tmp = kmalloc_array(n, size, flags | 32768U); return (tmp); } } int reg_timer_15(struct timer_list *timer ) ; void activate_pending_timer_15(struct timer_list *timer , unsigned long data , int pending_flag ) ; void choose_timer_15(struct timer_list *timer ) ; void disable_suitable_timer_15(struct timer_list *timer ) ; extern int pci_find_capability(struct pci_dev * , int ) ; extern int pci_bus_write_config_byte(struct pci_bus * , unsigned int , int , u8 ) ; extern int pci_bus_write_config_word(struct pci_bus * , unsigned int , int , u16 ) ; __inline static int pci_write_config_byte(struct pci_dev const *dev , int where , u8 val ) { int tmp ; { tmp = pci_bus_write_config_byte(dev->bus, dev->devfn, where, (int )val); return (tmp); } } __inline static int pci_write_config_word(struct pci_dev const *dev , int where , u16 val ) { int tmp ; { tmp = pci_bus_write_config_word(dev->bus, dev->devfn, where, (int )val); return (tmp); } } extern void pci_set_master(struct pci_dev * ) ; extern int pci_try_set_mwi(struct pci_dev * ) ; extern void pci_clear_mwi(struct pci_dev * ) ; extern int pcix_set_mmrbc(struct pci_dev * , int ) ; extern int pcie_set_readrq(struct pci_dev * , int ) ; extern void pci_disable_rom(struct pci_dev * ) ; __inline static void *dma_zalloc_coherent(struct device *dev , size_t size , dma_addr_t *dma_handle , gfp_t flag ) { void *ret ; void *tmp ; { tmp = dma_alloc_attrs(dev, size, dma_handle, flag | 32768U, (struct dma_attrs *)0); ret = tmp; return (ret); } } int ldv_scsi_add_host_with_dma_66(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; __inline static u64 wwn_to_u64(u8 *wwn ) { { return (((((((((unsigned long long )*wwn << 56) | ((unsigned long long )*(wwn + 1UL) << 48)) | ((unsigned long long )*(wwn + 2UL) << 40)) | ((unsigned long long )*(wwn + 3UL) << 32)) | ((unsigned long long )*(wwn + 4UL) << 24)) | ((unsigned long long )*(wwn + 5UL) << 16)) | ((unsigned long long )*(wwn + 6UL) << 8)) | (unsigned long long )*(wwn + 7UL)); } } extern struct fc_rport *fc_remote_port_add(struct Scsi_Host * , int , struct fc_rport_identifiers * ) ; extern void fc_remote_port_rolechg(struct fc_rport * , u32 ) ; static char const * const port_state_str___0[5U] = { "Unknown", "UNCONFIGURED", "DEAD", "LOST", "ONLINE"}; void qla2x00_alloc_fw_dump(scsi_qla_host_t *vha ) ; int qla2x00_async_tm_cmd(fc_port_t *fcport , uint32_t flags , uint32_t lun , uint32_t tag ) ; int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha , fc_port_t *fcport ) ; fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *vha , gfp_t flags ) ; int qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha , struct req_que *req ) ; int qla2x00_init_rings(scsi_qla_host_t *vha ) ; int qla24xx_configure_vhba(scsi_qla_host_t *vha ) ; int qla2x00_marker(struct scsi_qla_host *vha , struct req_que *req , struct rsp_que *rsp , uint16_t loop_id , uint64_t lun , uint8_t type ) ; int qla2x00_start_sp(srb_t *sp ) ; unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *vha ) ; int qla2x00_load_ram(scsi_qla_host_t *vha , dma_addr_t req_dma , uint32_t risc_addr , uint32_t risc_code_size ) ; int qla2x00_execute_fw(scsi_qla_host_t *vha , uint32_t risc_addr ) ; int qla2x00_get_fw_version(scsi_qla_host_t *vha ) ; int qla2x00_get_fw_options(scsi_qla_host_t *vha , uint16_t *fwopts ) ; int qla2x00_set_fw_options(scsi_qla_host_t *vha , uint16_t *fwopts ) ; int qla2x00_mbx_reg_test(scsi_qla_host_t *vha ) ; int qla2x00_verify_checksum(scsi_qla_host_t *vha , uint32_t risc_addr ) ; int qla2x00_get_adapter_id(scsi_qla_host_t *vha , uint16_t *id , uint8_t *al_pa , uint8_t *area , uint8_t *domain , uint16_t *top , uint16_t *sw_cap ) ; int qla2x00_get_retry_cnt(scsi_qla_host_t *vha , uint8_t *retry_cnt , uint8_t *tov , uint16_t *r_a_tov ) ; int qla2x00_init_firmware(scsi_qla_host_t *vha , uint16_t size ) ; int qla2x00_get_firmware_state(scsi_qla_host_t *vha , uint16_t *states ) ; int qla2x00_get_port_name(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t *name , uint8_t opt ) ; int qla24xx_link_initialize(scsi_qla_host_t *vha ) ; int qla2x00_login_local_device(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t *mb_ret , uint8_t opt ) ; int qla2x00_get_id_list(scsi_qla_host_t *vha , void *id_list , dma_addr_t id_list_dma , uint16_t *entries ) ; int qla2x00_get_resource_cnts(scsi_qla_host_t *vha , uint16_t *cur_xchg_cnt , uint16_t *orig_xchg_cnt , uint16_t *cur_iocb_cnt , uint16_t *orig_iocb_cnt , uint16_t *max_npiv_vports , uint16_t *max_fcfs ) ; int qla2x00_set_serdes_params(scsi_qla_host_t *vha , uint16_t sw_em_1g , uint16_t sw_em_2g , uint16_t sw_em_4g ) ; int qla2x00_stop_firmware(scsi_qla_host_t *vha ) ; int qla2x00_enable_eft_trace(scsi_qla_host_t *vha , dma_addr_t eft_dma , uint16_t buffers ) ; int qla2x00_enable_fce_trace(scsi_qla_host_t *vha , dma_addr_t fce_dma , uint16_t buffers , uint16_t *mb , uint32_t *dwords ) ; int qla82xx_set_driver_version(scsi_qla_host_t *vha , char *version ) ; int qla25xx_set_driver_version(scsi_qla_host_t *vha , char *version ) ; int qla2x00_set_idma_speed(scsi_qla_host_t *vha , uint16_t loop_id , uint16_t port_speed , uint16_t *mb ) ; int qla84xx_verify_chip(struct scsi_qla_host *vha , uint16_t *status ) ; int qla81xx_fac_get_sector_size(scsi_qla_host_t *vha , uint32_t *sector_size ) ; int qla2x00_read_ram_word(scsi_qla_host_t *vha , uint32_t risc_addr , uint32_t *data ) ; int qla2x00_write_ram_word(scsi_qla_host_t *vha , uint32_t risc_addr , uint32_t data ) ; int qla81xx_write_mpi_register(scsi_qla_host_t *vha , uint16_t *mb ) ; int qla2x00_get_data_rate(scsi_qla_host_t *vha ) ; int qla24xx_set_fcp_prio(scsi_qla_host_t *vha , uint16_t loop_id , uint16_t priority , uint16_t *mb ) ; int qla81xx_get_port_config(scsi_qla_host_t *vha , uint16_t *mb ) ; int qla2x00_dump_mctp_data(scsi_qla_host_t *vha , dma_addr_t req_dma , uint32_t addr , uint32_t size ) ; char const *qla2x00_get_link_speed_str(struct qla_hw_data *ha , uint16_t speed ) ; uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *vha , uint32_t *dwptr , uint32_t faddr , uint32_t dwords ) ; int qla2x00_is_a_vp_did(scsi_qla_host_t *vha , uint32_t rscn_entry ) ; int qla2xxx_get_flash_info(scsi_qla_host_t *vha ) ; int qla2xxx_get_vpd_field(scsi_qla_host_t *vha , char *key , char *str , size_t size ) ; int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha ) ; ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha ) ; int qla27xx_fwdt_template_valid(void *p ) ; ulong qla27xx_fwdt_template_size(void *p ) ; void const *qla27xx_fwdt_template_default(void) ; ulong qla27xx_fwdt_template_default_size(void) ; void ql_dump_buffer(uint32_t level , scsi_qla_host_t *vha , int32_t id , uint8_t *b , uint32_t size ) ; int qla2x00_ga_nxt(scsi_qla_host_t *vha , fc_port_t *fcport ) ; int qla2x00_gid_pt(scsi_qla_host_t *vha , sw_info_t *list ) ; int qla2x00_gpn_id(scsi_qla_host_t *vha , sw_info_t *list ) ; int qla2x00_gnn_id(scsi_qla_host_t *vha , sw_info_t *list ) ; void qla2x00_gff_id(scsi_qla_host_t *vha , sw_info_t *list ) ; int qla2x00_rft_id(scsi_qla_host_t *vha ) ; int qla2x00_rff_id(scsi_qla_host_t *vha ) ; int qla2x00_rnn_id(scsi_qla_host_t *vha ) ; int qla2x00_rsnn_nn(scsi_qla_host_t *vha ) ; int qla2x00_fdmi_register(scsi_qla_host_t *vha ) ; int qla2x00_gfpn_id(scsi_qla_host_t *vha , sw_info_t *list ) ; int qla2x00_gpsc(scsi_qla_host_t *vha , sw_info_t *list ) ; int qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha ) ; int qla25xx_init_req_que(struct scsi_qla_host *vha , struct req_que *req ) ; int qla25xx_init_rsp_que(struct scsi_qla_host *vha , struct rsp_que *rsp ) ; void qla2x00_init_response_q_entries(struct rsp_que *rsp ) ; int qlafx00_init_firmware(scsi_qla_host_t *vha , uint16_t size ) ; int qlafx00_fw_ready(scsi_qla_host_t *vha ) ; int qlafx00_configure_devices(scsi_qla_host_t *vha ) ; void qlafx00_init_response_q_entries(struct rsp_que *rsp ) ; void qla2x00_sp_free(void *data , void *ptr ) ; void qla2x00_sp_timeout(unsigned long __data ) ; __inline void qla2x00_set_model_info(scsi_qla_host_t *vha , uint8_t *model , size_t len , char *def ) ; int qla82xx_check_md_needed(scsi_qla_host_t *vha ) ; void qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha ) ; void qla8044_read_reset_template(struct scsi_qla_host *vha ) ; void qla8044_set_idc_dontreset(struct scsi_qla_host *vha ) ; int qla8044_rd_direct(struct scsi_qla_host *vha , uint32_t const crb_reg ) ; extern void __const_udelay(unsigned long ) ; void qlt_fc_port_added(struct scsi_qla_host *vha , fc_port_t *fcport ) ; void qlt_update_vp_map(struct scsi_qla_host *vha , int cmd ) ; __inline static bool qla_tgt_mode_enabled(struct scsi_qla_host *ha ) { { return (((int )(ha->host)->active_mode & 2) != 0); } } void qlt_init_atio_q_entries(struct scsi_qla_host *vha ) ; void qlt_24xx_process_atio_queue(struct scsi_qla_host *vha ) ; void qlt_24xx_config_rings(struct scsi_qla_host *vha ) ; void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha , struct nvram_24xx *nv ) ; void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha , struct init_cb_24xx *icb ) ; void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha , struct init_cb_81xx *icb ) ; void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha , struct nvram_81xx *nv ) ; __inline static uint16_t qla2x00_debounce_register(uint16_t volatile *addr ) { uint16_t volatile first ; uint16_t volatile second ; unsigned short tmp ; unsigned short tmp___0 ; { ldv_65633: tmp = readw((void const volatile *)addr); first = tmp; __asm__ volatile ("": : : "memory"); cpu_relax(); tmp___0 = readw((void const volatile *)addr); second = tmp___0; if ((int )((unsigned short )first) != (int )((unsigned short )second)) { goto ldv_65633; } else { } return ((uint16_t )first); } } __inline static void qla2x00_set_fcport_state___0(fc_port_t *fcport , int state ) { int old_state ; { old_state = atomic_read((atomic_t const *)(& fcport->state)); atomic_set(& fcport->state, state); if (old_state != 0 && old_state != state) { ql_dbg(268435456U, fcport->vha, 8317, "FCPort state transitioned from %s to %s - portid=%02x%02x%02x.\n", port_state_str___0[old_state], port_state_str___0[state], (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } return; } } __inline static void qla2x00_init_timer(srb_t *sp , unsigned long tmo ) { { reg_timer_21(& sp->u.iocb_cmd.timer); sp->u.iocb_cmd.timer.expires = tmo * 250UL + (unsigned long )jiffies; sp->u.iocb_cmd.timer.data = (unsigned long )sp; sp->u.iocb_cmd.timer.function = & qla2x00_sp_timeout; add_timer(& sp->u.iocb_cmd.timer); sp->free = & qla2x00_sp_free; if (((((sp->fcport)->vha)->hw)->device_type & 131072U) != 0U && (unsigned int )sp->type == 10U) { init_completion(& sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); } else { } return; } } extern void *vmalloc(unsigned long ) ; static char *qla2x00_model_name[184U] = { (char *)"QLA2340", (char *)"133MHz PCI-X to 2Gb FC, Single Channel", (char *)"QLA2342", (char *)"133MHz PCI-X to 2Gb FC, Dual Channel", (char *)"QLA2344", (char *)"133MHz PCI-X to 2Gb FC, Quad Channel", (char *)"QCP2342", (char *)"cPCI to 2Gb FC, Dual Channel", (char *)"QSB2340", (char *)"SBUS to 2Gb FC, Single Channel", (char *)"QSB2342", (char *)"SBUS to 2Gb FC, Dual Channel", (char *)"QLA2310", (char *)"Sun 66MHz PCI-X to 2Gb FC, Single Channel", (char *)"QLA2332", (char *)"Sun 66MHz PCI-X to 2Gb FC, Single Channel", (char *)"QCP2332", (char *)"Sun cPCI to 2Gb FC, Dual Channel", (char *)"QCP2340", (char *)"cPCI to 2Gb FC, Single Channel", (char *)"QLA2342", (char *)"Sun 133MHz PCI-X to 2Gb FC, Dual Channel", (char *)"QCP2342", (char *)"Sun - cPCI to 2Gb FC, Dual Channel", (char *)"QLA2350", (char *)"133MHz PCI-X to 2Gb FC, Single Channel", (char *)"QLA2352", (char *)"133MHz PCI-X to 2Gb FC, Dual Channel", (char *)"QLA2352", (char *)"Sun 133MHz PCI-X to 2Gb FC, Dual Channel", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QLA2360", (char *)"133MHz PCI-X to 2Gb FC, Single Channel", (char *)"QLA2362", (char *)"133MHz PCI-X to 2Gb FC, Dual Channel", (char *)"QLE2360", (char *)"PCI-Express to 2Gb FC, Single Channel", (char *)"QLE2362", (char *)"PCI-Express to 2Gb FC, Dual Channel", (char *)"QLA200", (char *)"133MHz PCI-X to 2Gb FC Optical", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QLA200P", (char *)"133MHz PCI-X to 2Gb FC SFP", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QLA210", (char *)"133MHz PCI-X to 2Gb FC, Single Channel", (char *)"EMC 250", (char *)"133MHz PCI-X to 2Gb FC, Single Channel", (char *)"HP A7538A", (char *)"HP 1p2g PCI-X to 2Gb FC, Single Channel", (char *)"QLA210", (char *)"Sun 133MHz PCI-X to 2Gb FC, Single Channel", (char *)"QLA2460", (char *)"PCI-X 2.0 to 4Gb FC, Single Channel", (char *)"QLA2462", (char *)"PCI-X 2.0 to 4Gb FC, Dual Channel", (char *)"QMC2462", (char *)"IBM eServer BC 4Gb FC Expansion Card", (char *)"QMC2462S", (char *)"IBM eServer BC 4Gb FC Expansion Card SFF", (char *)"QLE2460", (char *)"PCI-Express to 4Gb FC, Single Channel", (char *)"QLE2462", (char *)"PCI-Express to 4Gb FC, Dual Channel", (char *)"QME2462", (char *)"Dell BS PCI-Express to 4Gb FC, Dual Channel", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QEM2462", (char *)"Sun Server I/O Module 4Gb FC, Dual Channel", (char *)"QLE210", (char *)"PCI-Express to 2Gb FC, Single Channel", (char *)"QLE220", (char *)"PCI-Express to 4Gb FC, Single Channel", (char *)"QLA2460", (char *)"Sun PCI-X 2.0 to 4Gb FC, Single Channel", (char *)"QLA2462", (char *)"Sun PCI-X 2.0 to 4Gb FC, Dual Channel", (char *)"QLE2460", (char *)"Sun PCI-Express to 2Gb FC, Single Channel", (char *)"QLE2462", (char *)"Sun PCI-Express to 4Gb FC, Single Channel", (char *)"QEM2462", (char *)"Server I/O Module 4Gb FC, Dual Channel", (char *)"QLE2440", (char *)"PCI-Express to 4Gb FC, Single Channel", (char *)"QLE2464", (char *)"PCI-Express to 4Gb FC, Quad Channel", (char *)"QLA2440", (char *)"PCI-X 2.0 to 4Gb FC, Single Channel", (char *)"HP AE369A", (char *)"PCI-X 2.0 to 4Gb FC, Dual Channel", (char *)"QLA2340", (char *)"Sun 133MHz PCI-X to 2Gb FC, Single Channel", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QMC2432M", (char *)"IBM eServer BC 4Gb FC Expansion Card CFFE", (char *)"QMC2422M", (char *)"IBM eServer BC 4Gb FC Expansion Card CFFX", (char *)"QLE220", (char *)"Sun PCI-Express to 4Gb FC, Single Channel", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QME2462", (char *)"PCI-Express to 4Gb FC, Dual Channel Mezz HBA", (char *)"QMH2462", (char *)"PCI-Express to 4Gb FC, Dual Channel Mezz HBA", (char *)" ", (char *)" ", (char *)"QLE220", (char *)"PCI-Express to 4Gb FC, Single Channel", (char *)"QLE220", (char *)"PCI-Express to 4Gb FC, Single Channel", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QME2472", (char *)"Dell BS PCI-Express to 4Gb FC, Dual Channel"}; static int qla2x00_isp_firmware(scsi_qla_host_t *vha ) ; static int qla2x00_setup_chip(scsi_qla_host_t *vha ) ; static int qla2x00_fw_ready(scsi_qla_host_t *vha ) ; static int qla2x00_configure_hba(scsi_qla_host_t *vha ) ; static int qla2x00_configure_loop(scsi_qla_host_t *vha ) ; static int qla2x00_configure_local_loop(scsi_qla_host_t *vha ) ; static int qla2x00_configure_fabric(scsi_qla_host_t *vha ) ; static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha , struct list_head *new_fcports ) ; static int qla2x00_fabric_dev_login(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t *next_loopid ) ; static int qla2x00_restart_isp(scsi_qla_host_t *vha ) ; static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *vha ) ; static int qla84xx_init_chip(scsi_qla_host_t *vha ) ; static int qla25xx_init_queues(struct qla_hw_data *ha ) ; void qla2x00_sp_timeout(unsigned long __data ) { srb_t *sp ; struct srb_iocb *iocb ; fc_port_t *fcport ; struct qla_hw_data *ha ; struct req_que *req ; unsigned long flags ; raw_spinlock_t *tmp ; { sp = (srb_t *)__data; fcport = sp->fcport; ha = (fcport->vha)->hw; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); req = *(ha->req_q_map); *(req->outstanding_cmds + (unsigned long )sp->handle) = (srb_t *)0; iocb = & sp->u.iocb_cmd; (*(iocb->timeout))((void *)sp); (*(sp->free))((void *)fcport->vha, (void *)sp); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } void qla2x00_sp_free(void *data , void *ptr ) { srb_t *sp ; struct srb_iocb *iocb ; struct scsi_qla_host *vha ; { sp = (srb_t *)ptr; iocb = & sp->u.iocb_cmd; vha = (struct scsi_qla_host *)data; ldv_del_timer_67(& iocb->timer); qla2x00_rel_sp(vha, sp); return; } } unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *vha ) { unsigned long tmo ; struct qla_hw_data *ha ; { ha = vha->hw; tmo = (unsigned long )((int )((unsigned int )ha->r_a_tov / 10U) * 2); if ((ha->device_type & 131072U) != 0U) { tmo = 20UL; } else if ((ha->device_type & 134217728U) == 0U) { tmo = (unsigned long )ha->login_timeout; } else { } return (tmo); } } static void qla2x00_async_iocb_timeout(void *data ) { srb_t *sp ; fc_port_t *fcport ; struct srb_iocb *lio ; { sp = (srb_t *)data; fcport = sp->fcport; ql_dbg(268435456U, fcport->vha, 8305, "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n", sp->name, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); fcport->flags = fcport->flags & 4294967287U; if ((unsigned int )sp->type == 1U) { lio = & sp->u.iocb_cmd; qla2x00_post_async_logout_work(fcport->vha, fcport, (uint16_t *)0U); lio->u.logio.data[0] = 16389U; lio->u.logio.data[1] = (unsigned int )lio->u.logio.flags & 1U; qla2x00_post_async_login_done_work(fcport->vha, fcport, (uint16_t *)(& lio->u.logio.data)); } else { } return; } } static void qla2x00_async_login_sp_done(void *data , void *ptr , int res ) { srb_t *sp ; struct srb_iocb *lio ; struct scsi_qla_host *vha ; int tmp ; { sp = (srb_t *)ptr; lio = & sp->u.iocb_cmd; vha = (struct scsi_qla_host *)data; tmp = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp == 0) { qla2x00_post_async_login_done_work((sp->fcport)->vha, sp->fcport, (uint16_t *)(& lio->u.logio.data)); } else { } (*(sp->free))((void *)(sp->fcport)->vha, (void *)sp); return; } } int qla2x00_async_login(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { srb_t *sp ; struct srb_iocb *lio ; int rval ; unsigned long tmp ; { rval = 258; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } sp->type = 1U; sp->name = (char *)"login"; tmp = qla2x00_get_async_timeout(vha); qla2x00_init_timer(sp, tmp + 2UL); lio = & sp->u.iocb_cmd; lio->timeout = & qla2x00_async_iocb_timeout; sp->done = & qla2x00_async_login_sp_done; lio->u.logio.flags = (uint16_t )((unsigned int )lio->u.logio.flags | 2U); if ((int )*(data + 1UL) & 1) { lio->u.logio.flags = (uint16_t )((unsigned int )lio->u.logio.flags | 1U); } else { } rval = qla2x00_start_sp(sp); if (rval != 0) { goto done_free_sp; } else { } ql_dbg(268435456U, vha, 8306, "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x retries=%d.\n", sp->handle, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, fcport->login_retry); return (rval); done_free_sp: (*(sp->free))((void *)fcport->vha, (void *)sp); done: ; return (rval); } } static void qla2x00_async_logout_sp_done(void *data , void *ptr , int res ) { srb_t *sp ; struct srb_iocb *lio ; struct scsi_qla_host *vha ; int tmp ; { sp = (srb_t *)ptr; lio = & sp->u.iocb_cmd; vha = (struct scsi_qla_host *)data; tmp = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp == 0) { qla2x00_post_async_logout_done_work((sp->fcport)->vha, sp->fcport, (uint16_t *)(& lio->u.logio.data)); } else { } (*(sp->free))((void *)(sp->fcport)->vha, (void *)sp); return; } } int qla2x00_async_logout(struct scsi_qla_host *vha , fc_port_t *fcport ) { srb_t *sp ; struct srb_iocb *lio ; int rval ; unsigned long tmp ; { rval = 258; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } sp->type = 2U; sp->name = (char *)"logout"; tmp = qla2x00_get_async_timeout(vha); qla2x00_init_timer(sp, tmp + 2UL); lio = & sp->u.iocb_cmd; lio->timeout = & qla2x00_async_iocb_timeout; sp->done = & qla2x00_async_logout_sp_done; rval = qla2x00_start_sp(sp); if (rval != 0) { goto done_free_sp; } else { } ql_dbg(268435456U, vha, 8304, "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", sp->handle, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); return (rval); done_free_sp: (*(sp->free))((void *)fcport->vha, (void *)sp); done: ; return (rval); } } static void qla2x00_async_adisc_sp_done(void *data , void *ptr , int res ) { srb_t *sp ; struct srb_iocb *lio ; struct scsi_qla_host *vha ; int tmp ; { sp = (srb_t *)ptr; lio = & sp->u.iocb_cmd; vha = (struct scsi_qla_host *)data; tmp = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp == 0) { qla2x00_post_async_adisc_done_work((sp->fcport)->vha, sp->fcport, (uint16_t *)(& lio->u.logio.data)); } else { } (*(sp->free))((void *)(sp->fcport)->vha, (void *)sp); return; } } int qla2x00_async_adisc(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { srb_t *sp ; struct srb_iocb *lio ; int rval ; unsigned long tmp ; { rval = 258; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } sp->type = 6U; sp->name = (char *)"adisc"; tmp = qla2x00_get_async_timeout(vha); qla2x00_init_timer(sp, tmp + 2UL); lio = & sp->u.iocb_cmd; lio->timeout = & qla2x00_async_iocb_timeout; sp->done = & qla2x00_async_adisc_sp_done; if ((int )*(data + 1UL) & 1) { lio->u.logio.flags = (uint16_t )((unsigned int )lio->u.logio.flags | 1U); } else { } rval = qla2x00_start_sp(sp); if (rval != 0) { goto done_free_sp; } else { } ql_dbg(268435456U, vha, 8303, "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n", sp->handle, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); return (rval); done_free_sp: (*(sp->free))((void *)fcport->vha, (void *)sp); done: ; return (rval); } } static void qla2x00_tmf_iocb_timeout(void *data ) { srb_t *sp ; struct srb_iocb *tmf ; { sp = (srb_t *)data; tmf = & sp->u.iocb_cmd; tmf->u.tmf.comp_status = 6U; complete(& tmf->u.tmf.comp); return; } } static void qla2x00_tmf_sp_done(void *data , void *ptr , int res ) { srb_t *sp ; struct srb_iocb *tmf ; { sp = (srb_t *)ptr; tmf = & sp->u.iocb_cmd; complete(& tmf->u.tmf.comp); return; } } int qla2x00_async_tm_cmd(fc_port_t *fcport , uint32_t flags , uint32_t lun , uint32_t tag ) { struct scsi_qla_host *vha ; struct srb_iocb *tm_iocb ; srb_t *sp ; int rval ; unsigned long tmp ; int tmp___0 ; { vha = fcport->vha; rval = 258; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } tm_iocb = & sp->u.iocb_cmd; sp->type = 7U; sp->name = (char *)"tmf"; tmp = qla2x00_get_async_timeout(vha); qla2x00_init_timer(sp, tmp); tm_iocb->u.tmf.flags = flags; tm_iocb->u.tmf.lun = (uint64_t )lun; tm_iocb->u.tmf.data = tag; sp->done = & qla2x00_tmf_sp_done; tm_iocb->timeout = & qla2x00_tmf_iocb_timeout; init_completion(& tm_iocb->u.tmf.comp); rval = qla2x00_start_sp(sp); if (rval != 0) { goto done_free_sp; } else { } ql_dbg(4194304U, vha, 32815, "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", sp->handle, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); wait_for_completion(& tm_iocb->u.tmf.comp); rval = (unsigned int )tm_iocb->u.tmf.comp_status == 0U ? 0 : 258; if (rval != 0 || tm_iocb->u.tmf.data != 0U) { ql_dbg(4194304U, vha, 32816, "TM IOCB failed (%x).\n", rval); } else { } tmp___0 = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 == 0 && ((vha->hw)->device_type & 131072U) == 0U) { flags = tm_iocb->u.tmf.flags; lun = (uint32_t )((unsigned short )tm_iocb->u.tmf.lun); qla2x00_marker(vha, *((vha->hw)->req_q_map), *((vha->hw)->rsp_q_map), (int )(sp->fcport)->loop_id, (uint64_t )lun, flags != 16U); } else { } done_free_sp: (*(sp->free))((void *)vha, (void *)sp); done: ; return (rval); } } static void qla24xx_abort_iocb_timeout(void *data ) { srb_t *sp ; struct srb_iocb *abt ; { sp = (srb_t *)data; abt = & sp->u.iocb_cmd; abt->u.abt.comp_status = 6U; complete(& abt->u.abt.comp); return; } } static void qla24xx_abort_sp_done(void *data , void *ptr , int res ) { srb_t *sp ; struct srb_iocb *abt ; { sp = (srb_t *)ptr; abt = & sp->u.iocb_cmd; complete(& abt->u.abt.comp); return; } } static int qla24xx_async_abort_cmd(srb_t *cmd_sp ) { scsi_qla_host_t *vha ; fc_port_t *fcport ; struct srb_iocb *abt_iocb ; srb_t *sp ; int rval ; unsigned long tmp ; { vha = (cmd_sp->fcport)->vha; fcport = cmd_sp->fcport; rval = 258; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } abt_iocb = & sp->u.iocb_cmd; sp->type = 12U; sp->name = (char *)"abort"; tmp = qla2x00_get_async_timeout(vha); qla2x00_init_timer(sp, tmp); abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; sp->done = & qla24xx_abort_sp_done; abt_iocb->timeout = & qla24xx_abort_iocb_timeout; init_completion(& abt_iocb->u.abt.comp); rval = qla2x00_start_sp(sp); if (rval != 0) { goto done_free_sp; } else { } ql_dbg(33554432U, vha, 20604, "Abort command issued - hdl=%x, target_id=%x\n", cmd_sp->handle, (int )fcport->tgt_id); wait_for_completion(& abt_iocb->u.abt.comp); rval = (unsigned int )abt_iocb->u.abt.comp_status == 0U ? 0 : 258; done_free_sp: (*(sp->free))((void *)vha, (void *)sp); done: ; return (rval); } } int qla24xx_async_abort_command(srb_t *sp ) { unsigned long flags ; uint32_t handle ; fc_port_t *fcport ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct req_que *req ; raw_spinlock_t *tmp ; int tmp___0 ; int tmp___1 ; { flags = 0UL; fcport = sp->fcport; vha = fcport->vha; ha = vha->hw; req = vha->req; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); handle = 1U; goto ldv_66081; ldv_66080: ; if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )sp) { goto ldv_66079; } else { } handle = handle + 1U; ldv_66081: ; if ((uint32_t )req->num_outstanding_cmds > handle) { goto ldv_66080; } else { } ldv_66079: spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((uint32_t )req->num_outstanding_cmds == handle) { return (258); } else { } if ((unsigned int )sp->type == 10U) { tmp___0 = qlafx00_fx_disc(vha, & (vha->hw)->mr.fcport, 255); return (tmp___0); } else { } tmp___1 = qla24xx_async_abort_cmd(sp); return (tmp___1); } } void qla2x00_async_login_done(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { int rval ; { switch ((int )*data) { case 16384: rval = qla2x00_get_port_database(vha, fcport, 0); if (rval == 10) { fcport->flags = fcport->flags & 4294967287U; fcport->flags = fcport->flags | 2U; set_bit(8L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_66089; } else { } if (rval != 0) { qla2x00_post_async_logout_work(vha, fcport, (uint16_t *)0U); qla2x00_post_async_login_work(vha, fcport, (uint16_t *)0U); goto ldv_66089; } else { } if ((fcport->flags & 4U) != 0U) { qla2x00_post_async_adisc_work(vha, fcport, data); goto ldv_66089; } else { } qla2x00_update_fcport(vha, fcport); goto ldv_66089; case 16389: fcport->flags = fcport->flags & 4294967287U; if ((int )*(data + 1UL) & 1) { set_bit(8L, (unsigned long volatile *)(& vha->dpc_flags)); } else { qla2x00_mark_device_lost(vha, fcport, 1, 0); } goto ldv_66089; case 16391: fcport->loop_id = *(data + 1UL); qla2x00_post_async_logout_work(vha, fcport, (uint16_t *)0U); qla2x00_post_async_login_work(vha, fcport, (uint16_t *)0U); goto ldv_66089; case 16392: fcport->loop_id = (uint16_t )((int )fcport->loop_id + 1); rval = qla2x00_find_new_loop_id(vha, fcport); if (rval != 0) { fcport->flags = fcport->flags & 4294967287U; qla2x00_mark_device_lost(vha, fcport, 1, 0); goto ldv_66089; } else { } qla2x00_post_async_login_work(vha, fcport, (uint16_t *)0U); goto ldv_66089; } ldv_66089: ; return; } } void qla2x00_async_logout_done(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { { qla2x00_mark_device_lost(vha, fcport, 1, 0); return; } } void qla2x00_async_adisc_done(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { { if ((unsigned int )*data == 16384U) { qla2x00_update_fcport(vha, fcport); return; } else { } fcport->flags = fcport->flags & 4294967287U; if ((int )*(data + 1UL) & 1) { set_bit(8L, (unsigned long volatile *)(& vha->dpc_flags)); } else { qla2x00_mark_device_lost(vha, fcport, 1, 0); } return; } } static int qla83xx_nic_core_fw_load(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; uint32_t idc_major_ver ; uint32_t idc_minor_ver ; uint16_t config[4U] ; int tmp ; int tmp___0 ; { rval = 0; ha = vha->hw; qla83xx_idc_lock(vha, 0); ha->fcoe_dev_init_timeout = 30U; ha->fcoe_reset_timeout = 10U; tmp = __qla83xx_set_drv_presence(vha); if (tmp != 0) { ql_dbg(524288U, vha, 45175, "Error while setting DRV-Presence.\n"); rval = 258; goto exit; } else { } qla83xx_reset_ownership(vha); qla83xx_rd_reg(vha, 571483008U, & idc_major_ver); if (*((unsigned long *)ha + 2UL) != 0UL) { idc_major_ver = 1U; qla83xx_wr_reg(vha, 571483008U, idc_major_ver); qla83xx_wr_reg(vha, 571483036U, 0U); } else if (idc_major_ver != 1U) { ql_log(1U, vha, 45181, "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n", idc_major_ver, 1); __qla83xx_clear_drv_presence(vha); rval = 258; goto exit; } else { } qla83xx_rd_reg(vha, 571483032U, & idc_minor_ver); idc_minor_ver = idc_minor_ver; qla83xx_wr_reg(vha, 571483032U, idc_minor_ver); if (*((unsigned long *)ha + 2UL) != 0UL) { memset((void *)(& config), 0, 8UL); tmp___0 = qla81xx_get_port_config(vha, (uint16_t *)(& config)); if (tmp___0 == 0) { qla83xx_wr_reg(vha, 571483012U, 3U); } else { } } else { } rval = qla83xx_idc_state_handler(vha); exit: qla83xx_idc_unlock(vha, 0); return (rval); } } int qla2x00_initialize_adapter(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; struct req_que *req ; int tmp ; bool tmp___0 ; { ha = vha->hw; req = *(ha->req_q_map); vha->flags.online = 0U; ha->flags.chip_reset_done = 0U; vha->flags.reset_active = 0U; ha->flags.pci_channel_io_perm_failure = 0U; ha->flags.eeh_busy = 0U; vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); atomic_set(& vha->loop_down_timer, 255); atomic_set(& vha->loop_state, 2); vha->device_flags = 2U; vha->dpc_flags = 0UL; vha->flags.management_server_logged_in = 0U; vha->marker_needed = 0U; ha->isp_abort_cnt = 0U; ha->beacon_blink_led = 0U; set_bit(0L, (unsigned long volatile *)(& ha->req_qid_map)); set_bit(0L, (unsigned long volatile *)(& ha->rsp_qid_map)); ql_dbg(1073741824U, vha, 64, "Configuring PCI space...\n"); rval = (*((ha->isp_ops)->pci_config))(vha); if (rval != 0) { ql_log(1U, vha, 68, "Unable to configure PCI space.\n"); return (rval); } else { } (*((ha->isp_ops)->reset_chip))(vha); rval = qla2xxx_get_flash_info(vha); if (rval != 0) { ql_log(0U, vha, 79, "Unable to validate FLASH data.\n"); return (rval); } else { } if ((ha->device_type & 262144U) != 0U) { qla8044_read_reset_template(vha); if (ql2xdontresethba == 1) { qla8044_set_idc_dontreset(vha); } else { } } else { } (*((ha->isp_ops)->get_flash_version))(vha, (void *)req->ring); ql_dbg(1073741824U, vha, 97, "Configure NVRAM parameters...\n"); (*((ha->isp_ops)->nvram_config))(vha); if (*((unsigned long *)ha + 2UL) != 0UL) { ql_log(2U, vha, 119, "Masking HBA WWPN %8phN (via NVRAM).\n", (uint8_t *)(& vha->port_name)); return (258); } else { } ql_dbg(1073741824U, vha, 120, "Verifying loaded RISC code...\n"); tmp = qla2x00_isp_firmware(vha); if (tmp != 0) { rval = (*((ha->isp_ops)->chip_diag))(vha); if (rval != 0) { return (rval); } else { } rval = qla2x00_setup_chip(vha); if (rval != 0) { return (rval); } else { } } else { } if ((ha->device_type & 4096U) != 0U) { ha->cs84xx = qla84xx_get_chip(vha); if ((unsigned long )ha->cs84xx == (unsigned long )((struct qla_chip_state_84xx *)0)) { ql_log(1U, vha, 208, "Unable to configure ISP84XX.\n"); return (258); } else { } } else { } tmp___0 = qla_ini_mode_enabled(vha); if ((int )tmp___0) { rval = qla2x00_init_rings(vha); } else { } ha->flags.chip_reset_done = 1U; if (rval == 0 && (ha->device_type & 4096U) != 0U) { rval = qla84xx_init_chip(vha); if (rval != 0) { ql_log(1U, vha, 212, "Unable to initialize ISP84XX.\n"); qla84xx_put_chip(vha); } else { } } else { } if ((ha->device_type & 65536U) != 0U) { rval = qla83xx_nic_core_fw_load(vha); if (rval != 0) { ql_log(1U, vha, 292, "Error in initializing NIC Core f/w.\n"); } else { } } else { } if (((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) { qla24xx_read_fcp_prio_cfg(vha); } else { } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { qla82xx_set_driver_version(vha, (char *)"8.07.00.18-k"); } else { qla25xx_set_driver_version(vha, (char *)"8.07.00.18-k"); } return (rval); } } int qla2100_pci_config(struct scsi_qla_host *vha ) { uint16_t w ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; unsigned short tmp___0 ; { ha = vha->hw; reg = & (ha->iobase)->isp; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & w); w = (uint16_t )((unsigned int )w | 320U); pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )w); pci_disable_rom(ha->pdev); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = readw((void const volatile *)(& reg->ctrl_status)); ha->pci_attr = (uint32_t )tmp___0; spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); } } int qla2300_pci_config(struct scsi_qla_host *vha ) { uint16_t w ; unsigned long flags ; uint32_t cnt ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; unsigned short tmp___0 ; unsigned short tmp___1 ; raw_spinlock_t *tmp___2 ; unsigned short tmp___3 ; { flags = 0UL; ha = vha->hw; reg = & (ha->iobase)->isp; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & w); w = (uint16_t )((unsigned int )w | 320U); if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { w = (unsigned int )w & 64511U; } else { } pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )w); if ((ha->device_type & 4U) != 0U) { tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writew(8192, (void volatile *)(& reg->hccr)); cnt = 0U; goto ldv_66141; ldv_66140: tmp___0 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___0 & 32) != 0) { goto ldv_66139; } else { } __const_udelay(42950UL); cnt = cnt + 1U; ldv_66141: ; if (cnt <= 29999U) { goto ldv_66140; } else { } ldv_66139: writew(32, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); ha->fb_rev = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->fb_cmd_2100 : & reg->u.isp2300.fb_cmd)); if ((unsigned int )ha->fb_rev == 6U) { pci_clear_mwi(ha->pdev); } else { } writew(0, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); writew(12288, (void volatile *)(& reg->hccr)); cnt = 0U; goto ldv_66144; ldv_66143: tmp___1 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___1 & 32) == 0) { goto ldv_66142; } else { } __const_udelay(42950UL); cnt = cnt + 1U; ldv_66144: ; if (cnt <= 29999U) { goto ldv_66143; } else { } ldv_66142: spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } pci_write_config_byte((struct pci_dev const *)ha->pdev, 13, 128); pci_disable_rom(ha->pdev); tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); tmp___3 = readw((void const volatile *)(& reg->ctrl_status)); ha->pci_attr = (uint32_t )tmp___3; spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); } } int qla24xx_pci_config(scsi_qla_host_t *vha ) { uint16_t w ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; int tmp ; bool tmp___0 ; raw_spinlock_t *tmp___1 ; { flags = 0UL; ha = vha->hw; reg = & (ha->iobase)->isp24; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & w); w = (uint16_t )((unsigned int )w | 320U); w = (unsigned int )w & 64511U; pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )w); pci_write_config_byte((struct pci_dev const *)ha->pdev, 13, 128); tmp = pci_find_capability(ha->pdev, 7); if (tmp != 0) { pcix_set_mmrbc(ha->pdev, 2048); } else { } tmp___0 = pci_is_pcie(ha->pdev); if ((int )tmp___0) { pcie_set_readrq(ha->pdev, 4096); } else { } pci_disable_rom(ha->pdev); ha->chip_revision = (uint16_t )(ha->pdev)->revision; tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); ha->pci_attr = readl((void const volatile *)(& reg->ctrl_status)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); } } int qla25xx_pci_config(scsi_qla_host_t *vha ) { uint16_t w ; struct qla_hw_data *ha ; bool tmp ; { ha = vha->hw; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & w); w = (uint16_t )((unsigned int )w | 320U); w = (unsigned int )w & 64511U; pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )w); tmp = pci_is_pcie(ha->pdev); if ((int )tmp) { pcie_set_readrq(ha->pdev, 4096); } else { } pci_disable_rom(ha->pdev); ha->chip_revision = (uint16_t )(ha->pdev)->revision; return (0); } } static int qla2x00_isp_firmware(scsi_qla_host_t *vha ) { int rval ; uint16_t loop_id ; uint16_t topo ; uint16_t sw_cap ; uint8_t domain ; uint8_t area ; uint8_t al_pa ; struct qla_hw_data *ha ; { ha = vha->hw; rval = 258; if (*((unsigned long *)ha + 2UL) != 0UL) { ql_log(2U, vha, 121, "RISC CODE NOT loaded.\n"); rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); if (rval == 0) { rval = qla2x00_get_adapter_id(vha, & loop_id, & al_pa, & area, & domain, & topo, & sw_cap); } else { } } else { } if (rval != 0) { ql_dbg(1073741824U, vha, 122, "**** Load RISC code ****.\n"); } else { } return (rval); } } void qla2x00_reset_chip(struct scsi_qla_host *vha ) { unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; uint32_t cnt ; uint16_t cmd ; int tmp ; long tmp___0 ; raw_spinlock_t *tmp___1 ; unsigned short tmp___2 ; unsigned short tmp___3 ; unsigned short tmp___4 ; unsigned short tmp___5 ; { flags = 0UL; ha = vha->hw; reg = & (ha->iobase)->isp; tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return; } else { } (*((ha->isp_ops)->disable_intrs))(ha); tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); cmd = 0U; pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & cmd); cmd = (unsigned int )cmd & 65531U; pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )cmd); if ((ha->device_type & 1U) == 0U) { writew(8192, (void volatile *)(& reg->hccr)); if ((ha->device_type & 2U) != 0U || (ha->device_type & 4U) != 0U) { cnt = 0U; goto ldv_66187; ldv_66186: tmp___2 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___2 & 32) != 0) { goto ldv_66185; } else { } __const_udelay(429500UL); cnt = cnt + 1U; ldv_66187: ; if (cnt <= 29999U) { goto ldv_66186; } else { } ldv_66185: ; } else { readw((void const volatile *)(& reg->hccr)); __const_udelay(42950UL); } writew(32, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); writew(256, (void volatile *)(& reg->fpm_diag_config)); readw((void const volatile *)(& reg->fpm_diag_config)); if ((ha->device_type & 2U) == 0U) { writew(0, (void volatile *)(& reg->fpm_diag_config)); readw((void const volatile *)(& reg->fpm_diag_config)); } else { } writew(16, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); if ((ha->device_type & 2U) != 0U) { writew(40960, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->fb_cmd_2100 : & reg->u.isp2300.fb_cmd)); readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->fb_cmd_2100 : & reg->u.isp2300.fb_cmd)); } else { writew(252, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->fb_cmd_2100 : & reg->u.isp2300.fb_cmd)); cnt = 0U; goto ldv_66190; ldv_66189: tmp___3 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->fb_cmd_2100 : & reg->u.isp2300.fb_cmd)); if (((int )tmp___3 & 255) == 0) { goto ldv_66188; } else { } __const_udelay(429500UL); cnt = cnt + 1U; ldv_66190: ; if (cnt <= 2999U) { goto ldv_66189; } else { } ldv_66188: ; } writew(0, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); writew(4096, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); writew(12288, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); } else { } writew(28672, (void volatile *)(& reg->hccr)); writew(24576, (void volatile *)(& reg->hccr)); writew(1, (void volatile *)(& reg->ctrl_status)); if (((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) || (ha->device_type & 4U) != 0U) { __const_udelay(85900UL); cnt = 30000U; goto ldv_66193; ldv_66192: tmp___4 = readw((void const volatile *)(& reg->ctrl_status)); if (((int )tmp___4 & 1) == 0) { goto ldv_66191; } else { } __const_udelay(429500UL); cnt = cnt - 1U; ldv_66193: ; if (cnt != 0U) { goto ldv_66192; } else { } ldv_66191: ; } else { __const_udelay(42950UL); } writew(4096, (void volatile *)(& reg->hccr)); writew(0, (void volatile *)(& reg->semaphore)); writew(12288, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); if (((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) || (ha->device_type & 4U) != 0U) { cnt = 0U; goto ldv_66196; ldv_66195: tmp___5 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); if ((unsigned int )tmp___5 != 4U) { goto ldv_66194; } else { } __const_udelay(429500UL); cnt = cnt + 1U; ldv_66196: ; if (cnt <= 29999U) { goto ldv_66195; } else { } ldv_66194: ; } else { __const_udelay(429500UL); } cmd = (uint16_t )((unsigned int )cmd | 4U); pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )cmd); if ((ha->device_type & 1U) == 0U) { writew(16385, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static int qla81xx_reset_mpi(scsi_qla_host_t *vha ) { uint16_t mb[4U] ; int tmp ; { mb[0] = 4112U; mb[1] = 0U; mb[2] = 1U; mb[3] = 0U; if (((vha->hw)->device_type & 8192U) == 0U) { return (0); } else { } tmp = qla81xx_write_mpi_register(vha, (uint16_t *)(& mb)); return (tmp); } } __inline static int qla24xx_reset_risc(scsi_qla_host_t *vha ) { unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; uint32_t cnt ; uint32_t d2 ; uint16_t wd ; int abts_cnt ; int rval ; raw_spinlock_t *tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; unsigned int tmp___2 ; unsigned int tmp___3 ; unsigned int tmp___4 ; unsigned short tmp___5 ; unsigned short tmp___6 ; unsigned int tmp___7 ; unsigned int tmp___8 ; unsigned int tmp___9 ; unsigned int tmp___10 ; unsigned int tmp___11 ; unsigned int tmp___12 ; int tmp___13 ; int tmp___14 ; unsigned short tmp___15 ; unsigned short tmp___16 ; unsigned short tmp___17 ; unsigned int tmp___18 ; { flags = 0UL; ha = vha->hw; reg = & (ha->iobase)->isp24; rval = 0; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writel(65584U, (void volatile *)(& reg->ctrl_status)); cnt = 0U; goto ldv_66217; ldv_66216: tmp___0 = readl((void const volatile *)(& reg->ctrl_status)); if ((tmp___0 & 131072U) == 0U) { goto ldv_66215; } else { } __const_udelay(42950UL); cnt = cnt + 1U; ldv_66217: ; if (cnt <= 29999U) { goto ldv_66216; } else { } ldv_66215: tmp___1 = readl((void const volatile *)(& reg->ctrl_status)); if ((tmp___1 & 131072U) == 0U) { set_bit(1L, (unsigned long volatile *)(& ha->fw_dump_cap_flags)); } else { } tmp___2 = readl((void const volatile *)(& reg->ctrl_status)); tmp___3 = readl((void const volatile *)(& reg->ctrl_status)); tmp___4 = readl((void const volatile *)(& reg->hccr)); ql_dbg(1073774592U, vha, 382, "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", tmp___4, tmp___3, tmp___2 & 131072U); writel(65585U, (void volatile *)(& reg->ctrl_status)); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & wd); __const_udelay(429500UL); tmp___5 = readw((void const volatile *)(& reg->mailbox0)); d2 = (unsigned int )tmp___5; cnt = 10000U; goto ldv_66219; ldv_66218: __asm__ volatile ("": : : "memory"); if (cnt != 0U) { __const_udelay(21475UL); } else { rval = 256; } cnt = cnt - 1U; ldv_66219: tmp___6 = readw((void const volatile *)(& reg->mailbox0)); if ((unsigned int )tmp___6 != 0U && rval == 0) { goto ldv_66218; } else { } if (rval == 0) { set_bit(6L, (unsigned long volatile *)(& ha->fw_dump_cap_flags)); } else { } tmp___7 = readl((void const volatile *)(& reg->mailbox0)); tmp___8 = readl((void const volatile *)(& reg->hccr)); ql_dbg(1073774592U, vha, 383, "HCCR: 0x%x, MailBox0 Status 0x%x\n", tmp___8, tmp___7); d2 = readl((void const volatile *)(& reg->ctrl_status)); cnt = 0U; goto ldv_66223; ldv_66222: __asm__ volatile ("": : : "memory"); tmp___9 = readl((void const volatile *)(& reg->ctrl_status)); if ((tmp___9 & 1U) == 0U) { goto ldv_66221; } else { } __const_udelay(21475UL); cnt = cnt + 1U; ldv_66223: ; if (cnt <= 5999999U) { goto ldv_66222; } else { } ldv_66221: tmp___10 = readl((void const volatile *)(& reg->ctrl_status)); if ((tmp___10 & 1U) == 0U) { set_bit(7L, (unsigned long volatile *)(& ha->fw_dump_cap_flags)); } else { } tmp___11 = readl((void const volatile *)(& reg->ctrl_status)); tmp___12 = readl((void const volatile *)(& reg->hccr)); ql_dbg(1073774592U, vha, 349, "HCCR: 0x%x, Soft Reset status: 0x%x\n", tmp___12, tmp___11); tmp___14 = test_and_clear_bit(19L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___14 != 0) { tmp___13 = qla81xx_reset_mpi(vha); if (tmp___13 != 0) { abts_cnt = abts_cnt + 1; if (abts_cnt <= 4) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(19L, (unsigned long volatile *)(& vha->dpc_flags)); } else { abts_cnt = 0; vha->flags.online = 0U; } } else { } } else { } writel(268435456U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); writel(1073741824U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); writel(536870912U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); tmp___15 = readw((void const volatile *)(& reg->mailbox0)); d2 = (unsigned int )tmp___15; cnt = 6000000U; goto ldv_66225; ldv_66224: __asm__ volatile ("": : : "memory"); if (cnt != 0U) { __const_udelay(21475UL); } else { rval = 256; } cnt = cnt - 1U; ldv_66225: tmp___16 = readw((void const volatile *)(& reg->mailbox0)); if ((unsigned int )tmp___16 != 0U && rval == 0) { goto ldv_66224; } else { } if (rval == 0) { set_bit(3L, (unsigned long volatile *)(& ha->fw_dump_cap_flags)); } else { } tmp___17 = readw((void const volatile *)(& reg->mailbox0)); tmp___18 = readl((void const volatile *)(& reg->hccr)); ql_dbg(1073774592U, vha, 350, "Host Risc 0x%x, mailbox0 0x%x\n", tmp___18, (int )tmp___17); spin_unlock_irqrestore(& ha->hardware_lock, flags); ql_dbg(1073774592U, vha, 351, "Driver in %s mode\n", (ha->device_type & 8192U) != 0U && *((unsigned long *)ha + 2UL) != 0UL ? (char *)"Interrupt" : (char *)"Polling"); if ((ha->device_type & 8192U) != 0U && *((unsigned long *)ha + 2UL) != 0UL) { (*((ha->isp_ops)->enable_intrs))(ha); } else { } return (rval); } } static void qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha , uint32_t *data ) { struct device_reg_24xx *reg ; { reg = & ((vha->hw)->iobase)->isp24; writel(28688U, (void volatile *)(& reg->iobase_addr)); *data = readl((void const volatile *)(& reg->iobase_window) + 6U); return; } } static void qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha , uint32_t data ) { struct device_reg_24xx *reg ; { reg = & ((vha->hw)->iobase)->isp24; writel(28688U, (void volatile *)(& reg->iobase_addr)); writel(data, (void volatile *)(& reg->iobase_window) + 6U); return; } } static void qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t wd32 ; uint delta_msec ; uint elapsed_msec ; uint timeout_msec ; ulong n ; ulong tmp ; ulong tmp___0 ; { ha = vha->hw; wd32 = 0U; delta_msec = 100U; elapsed_msec = 0U; if ((ha->device_type & 2048U) == 0U && (ha->device_type & 32768U) == 0U) { return; } else { } attempt: timeout_msec = 2500U; n = (ulong )(timeout_msec / delta_msec); goto ldv_66250; ldv_66249: qla25xx_write_risc_sema_reg(vha, 65537U); qla25xx_read_risc_sema_reg(vha, & wd32); if ((int )wd32 & 1) { goto ldv_66247; } else { } msleep(delta_msec); elapsed_msec = elapsed_msec + delta_msec; if (elapsed_msec > 4500U) { goto force; } else { } ldv_66250: tmp = n; n = n - 1UL; if (tmp != 0UL) { goto ldv_66249; } else { } ldv_66247: ; if (((unsigned long )wd32 & 1UL) == 0UL) { goto force; } else { } if (((unsigned long )wd32 & 32768UL) == 0UL) { goto acquired; } else { } qla25xx_write_risc_sema_reg(vha, 65536U); timeout_msec = 2000U; n = (ulong )(timeout_msec / delta_msec); goto ldv_66254; ldv_66253: qla25xx_read_risc_sema_reg(vha, & wd32); if (((unsigned long )wd32 & 32768UL) == 0UL) { goto ldv_66252; } else { } msleep(delta_msec); elapsed_msec = elapsed_msec + delta_msec; if (elapsed_msec > 4500U) { goto force; } else { } ldv_66254: tmp___0 = n; n = n - 1UL; if (tmp___0 != 0UL) { goto ldv_66253; } else { } ldv_66252: ; if (((unsigned long )wd32 & 32768UL) != 0UL) { qla25xx_write_risc_sema_reg(vha, 2147483648U); } else { } goto attempt; force: qla25xx_write_risc_sema_reg(vha, 2147516416U); acquired: ; return; } } void qla24xx_reset_chip(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = pci_channel_offline(ha->pdev); if (tmp != 0 && *((unsigned long *)ha + 2UL) != 0UL) { return; } else { } (*((ha->isp_ops)->disable_intrs))(ha); qla25xx_manipulate_risc_semaphore(vha); qla24xx_reset_risc(vha); return; } } int qla2x00_chip_diag(struct scsi_qla_host *vha ) { int rval ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; unsigned long flags ; uint16_t data ; uint32_t cnt ; uint16_t mb[5U] ; struct req_que *req ; raw_spinlock_t *tmp ; unsigned short tmp___0 ; raw_spinlock_t *tmp___1 ; { ha = vha->hw; reg = & (ha->iobase)->isp; flags = 0UL; req = *(ha->req_q_map); rval = 258; ql_dbg(1073741824U, vha, 123, "Testing device at %lx.\n", (unsigned long )(& reg->flash_address)); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writew(1, (void volatile *)(& reg->ctrl_status)); __const_udelay(85900UL); data = qla2x00_debounce_register((uint16_t volatile *)(& reg->ctrl_status)); cnt = 6000000U; goto ldv_66274; ldv_66273: __const_udelay(21475UL); data = readw((void const volatile *)(& reg->ctrl_status)); __asm__ volatile ("": : : "memory"); cnt = cnt - 1U; ldv_66274: ; if (cnt != 0U && (int )data & 1) { goto ldv_66273; } else { } if (cnt == 0U) { goto chip_diag_failed; } else { } ql_dbg(1073741824U, vha, 124, "Reset register cleared by chip reset.\n"); writew(4096, (void volatile *)(& reg->hccr)); writew(12288, (void volatile *)(& reg->hccr)); if (((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) || (ha->device_type & 4U) != 0U) { data = qla2x00_debounce_register((uint16_t volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); cnt = 6000000U; goto ldv_66278; ldv_66277: __const_udelay(21475UL); data = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); __asm__ volatile ("": : : "memory"); cnt = cnt - 1U; ldv_66278: ; if (cnt != 0U && (unsigned int )data == 4U) { goto ldv_66277; } else { } } else { __const_udelay(42950UL); } if (cnt == 0U) { goto chip_diag_failed; } else { } ql_dbg(1073741824U, vha, 125, "Checking product Id of chip.\n"); mb[1] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 1U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 1U); mb[2] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 2U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 2U); mb[3] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 3U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 3U); mb[4] = qla2x00_debounce_register((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (uint16_t volatile *)(& reg->u.isp2100.mailbox0) + 4U : (uint16_t volatile *)(& reg->u.isp2300.mailbox0) + 4U); if (((unsigned int )mb[1] != 18771U || ((unsigned int )mb[2] != 0U && (unsigned int )mb[2] != 20512U)) || (unsigned int )mb[3] != 8224U) { ql_log(1U, vha, 98, "Wrong product ID = 0x%x,0x%x,0x%x.\n", (int )mb[1], (int )mb[2], (int )mb[3]); goto chip_diag_failed; } else { } ha->product_id[0] = mb[1]; ha->product_id[1] = mb[2]; ha->product_id[2] = mb[3]; ha->product_id[3] = mb[4]; if ((unsigned int )req->length > 1024U) { ha->fw_transfer_size = 65536U; } else { ha->fw_transfer_size = (uint32_t )req->length * 64U; } if ((ha->device_type & 2U) != 0U) { tmp___0 = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 7U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 7U); if ((unsigned int )tmp___0 == 4U) { ql_dbg(1073741824U, vha, 126, "Found QLA2200A Chip.\n"); ha->device_type = ha->device_type | 1073741824U; ha->fw_transfer_size = 128U; } else { } } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); ql_dbg(1073741824U, vha, 127, "Checking mailboxes.\n"); rval = qla2x00_mbx_reg_test(vha); if (rval != 0) { ql_log(1U, vha, 128, "Failed mailbox send register test.\n"); } else { rval = 0; } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); chip_diag_failed: ; if (rval != 0) { ql_log(2U, vha, 129, "Chip diagnostics **** FAILED ****.\n"); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (rval); } } int qla24xx_chip_diag(struct scsi_qla_host *vha ) { int rval ; struct qla_hw_data *ha ; struct req_que *req ; { ha = vha->hw; req = *(ha->req_q_map); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return (0); } else { } ha->fw_transfer_size = (uint32_t )req->length * 64U; rval = qla2x00_mbx_reg_test(vha); if (rval != 0) { ql_log(1U, vha, 130, "Failed mailbox send register test.\n"); } else { rval = 0; } return (rval); } } void qla2x00_alloc_fw_dump(scsi_qla_host_t *vha ) { int rval ; uint32_t dump_size ; uint32_t fixed_size ; uint32_t mem_size ; uint32_t req_q_size ; uint32_t rsp_q_size ; uint32_t eft_size ; uint32_t fce_size ; uint32_t mq_size ; dma_addr_t tc_dma ; void *tc ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; ulong tmp ; void *tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; __u32 tmp___5 ; __u32 tmp___6 ; __u32 tmp___7 ; { ha = vha->hw; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); if ((unsigned long )ha->fw_dump != (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_dbg(1073741824U, vha, 189, "Firmware dump already allocated.\n"); return; } else { } ha->fw_dumped = 0; ha->fw_dump_cap_flags = 0UL; mq_size = 0U; fce_size = mq_size; eft_size = fce_size; mem_size = eft_size; fixed_size = mem_size; dump_size = fixed_size; rsp_q_size = 0U; req_q_size = rsp_q_size; if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { goto try_fce; } else { } if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { fixed_size = 123634U; } else if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { fixed_size = 136098U; mem_size = (ha->fw_memory_size + 2147414017U) * 2U; } else if ((ha->device_type & 134217728U) != 0U) { if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { fixed_size = 51192U; } else if ((ha->device_type & 8192U) != 0U) { fixed_size = 39416U; } else if ((ha->device_type & 2048U) != 0U) { fixed_size = 39224U; } else { fixed_size = 37972U; } mem_size = (ha->fw_memory_size + 1072693249U) * 4U; if ((unsigned int )ha->mqenable != 0U) { if ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) { mq_size = 524U; } else { } mq_size = ((uint32_t )ha->max_req_queues * (uint32_t )req->length) * 64U + mq_size; mq_size = ((uint32_t )ha->max_rsp_queues * (uint32_t )rsp->length) * 64U + mq_size; } else { } if ((unsigned long )ha->tgt.atio_ring != (unsigned long )((struct atio *)0)) { mq_size = (uint32_t )ha->tgt.atio_q_length * 64U + mq_size; } else { } if ((((ha->device_type & 2048U) == 0U && (ha->device_type & 8192U) == 0U) && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) && ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U)) { goto try_eft; } else { } try_fce: ; if ((unsigned long )ha->fce != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 65536UL, ha->fce, ha->fce_dma, (struct dma_attrs *)0); } else { } tc = dma_zalloc_coherent(& (ha->pdev)->dev, 65536UL, & tc_dma, 208U); if ((unsigned long )tc == (unsigned long )((void *)0)) { ql_log(1U, vha, 190, "Unable to allocate (%d KB) for FCE.\n", 64); goto try_eft; } else { } rval = qla2x00_enable_fce_trace(vha, tc_dma, 64, (uint16_t *)(& ha->fce_mb), & ha->fce_bufs); if (rval != 0) { ql_log(1U, vha, 191, "Unable to initialize FCE (%d).\n", rval); dma_free_attrs(& (ha->pdev)->dev, 65536UL, tc, tc_dma, (struct dma_attrs *)0); ha->flags.fce_enabled = 0U; goto try_eft; } else { } ql_dbg(1073741824U, vha, 192, "Allocate (%d KB) for FCE...\n", 64); fce_size = 65588U; ha->flags.fce_enabled = 1U; ha->fce_dma = tc_dma; ha->fce = tc; try_eft: ; if ((unsigned long )ha->eft != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 65536UL, ha->eft, ha->eft_dma, (struct dma_attrs *)0); } else { } tc = dma_zalloc_coherent(& (ha->pdev)->dev, 65536UL, & tc_dma, 208U); if ((unsigned long )tc == (unsigned long )((void *)0)) { ql_log(1U, vha, 193, "Unable to allocate (%d KB) for EFT.\n", 64); goto cont_alloc; } else { } rval = qla2x00_enable_eft_trace(vha, tc_dma, 4); if (rval != 0) { ql_log(1U, vha, 194, "Unable to initialize EFT (%d).\n", rval); dma_free_attrs(& (ha->pdev)->dev, 65536UL, tc, tc_dma, (struct dma_attrs *)0); goto cont_alloc; } else { } ql_dbg(1073741824U, vha, 195, "Allocated (%d KB) EFT ...\n", 64); eft_size = 65536U; ha->eft_dma = tc_dma; ha->eft = tc; } else { } cont_alloc: ; if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { if ((unsigned long )ha->fw_dump_template == (unsigned long )((void *)0)) { ql_log(1U, vha, 186, "Failed missing fwdump template\n"); return; } else { } tmp = qla27xx_fwdt_calculate_dump_size(vha); dump_size = (uint32_t )tmp; ql_dbg(1073741824U, vha, 250, "-> allocating fwdump (%x bytes)...\n", dump_size); goto allocate; } else { } req_q_size = (uint32_t )req->length * 64U; rsp_q_size = (uint32_t )rsp->length * 64U; dump_size = 72U; dump_size = ((((fixed_size + mem_size) + req_q_size) + rsp_q_size) + eft_size) + dump_size; ha->chain_offset = dump_size; dump_size = (mq_size + fce_size) + dump_size; allocate: tmp___0 = vmalloc((unsigned long )dump_size); ha->fw_dump = (struct qla2xxx_fw_dump *)tmp___0; if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 196, "Unable to allocate (%d KB) for firmware dump.\n", dump_size / 1024U); if ((unsigned long )ha->fce != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 65536UL, ha->fce, ha->fce_dma, (struct dma_attrs *)0); ha->fce = (void *)0; ha->fce_dma = 0ULL; } else { } if ((unsigned long )ha->eft != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )eft_size, ha->eft, ha->eft_dma, (struct dma_attrs *)0); ha->eft = (void *)0; ha->eft_dma = 0ULL; } else { } return; } else { } ha->fw_dump_len = dump_size; ql_dbg(1073741824U, vha, 197, "Allocated (%d KB) for firmware dump.\n", dump_size / 1024U); if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { return; } else { } (ha->fw_dump)->signature[0] = 81U; (ha->fw_dump)->signature[1] = 76U; (ha->fw_dump)->signature[2] = 71U; (ha->fw_dump)->signature[3] = 67U; (ha->fw_dump)->version = 16777216U; tmp___1 = __fswab32(fixed_size); (ha->fw_dump)->fixed_size = tmp___1; tmp___2 = __fswab32(mem_size); (ha->fw_dump)->mem_size = tmp___2; tmp___3 = __fswab32(req_q_size); (ha->fw_dump)->req_q_size = tmp___3; tmp___4 = __fswab32(rsp_q_size); (ha->fw_dump)->rsp_q_size = tmp___4; tmp___5 = __fswab32(eft_size); (ha->fw_dump)->eft_size = tmp___5; tmp___6 = __fswab32((unsigned int )ha->eft_dma); (ha->fw_dump)->eft_addr_l = tmp___6; tmp___7 = __fswab32((unsigned int )(ha->eft_dma >> 32ULL)); (ha->fw_dump)->eft_addr_h = tmp___7; (ha->fw_dump)->header_size = 1207959552U; return; } } static int qla81xx_mpi_sync(scsi_qla_host_t *vha ) { int rval ; uint16_t dc ; uint32_t dw ; { if (((vha->hw)->device_type & 8192U) == 0U) { return (0); } else { } rval = qla2x00_write_ram_word(vha, 31744U, 1U); if (rval != 0) { ql_log(1U, vha, 261, "Unable to acquire semaphore.\n"); goto done; } else { } pci_read_config_word((struct pci_dev const *)(vha->hw)->pdev, 84, & dc); rval = qla2x00_read_ram_word(vha, 31253U, & dw); if (rval != 0) { ql_log(1U, vha, 103, "Unable to read sync.\n"); goto done_release; } else { } dc = (unsigned int )dc & 224U; if ((uint32_t )dc == (dw & 224U)) { goto done_release; } else { } dw = dw & 4294967071U; dw = (uint32_t )dc | dw; rval = qla2x00_write_ram_word(vha, 31253U, dw); if (rval != 0) { ql_log(1U, vha, 276, "Unable to gain sync.\n"); } else { } done_release: rval = qla2x00_write_ram_word(vha, 31744U, 0U); if (rval != 0) { ql_log(1U, vha, 109, "Unable to release semaphore.\n"); } else { } done: ; return (rval); } } int qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha , struct req_que *req ) { void *tmp ; void *tmp___0 ; { if ((unsigned long )req->outstanding_cmds != (unsigned long )((srb_t **)0)) { return (0); } else { } if ((ha->device_type & 134217728U) == 0U || ((unsigned long )ha->mqiobase != (unsigned long )((device_reg_t *)0) && (ql2xmultique_tag != 0 || ql2xmaxqueues > 1))) { req->num_outstanding_cmds = 1024U; } else if ((int )ha->fw_xcb_count <= (int )ha->fw_iocb_count) { req->num_outstanding_cmds = ha->fw_xcb_count; } else { req->num_outstanding_cmds = ha->fw_iocb_count; } tmp = kzalloc((unsigned long )req->num_outstanding_cmds * 8UL, 208U); req->outstanding_cmds = (srb_t **)tmp; if ((unsigned long )req->outstanding_cmds == (unsigned long )((srb_t **)0)) { req->num_outstanding_cmds = 128U; tmp___0 = kzalloc((unsigned long )req->num_outstanding_cmds * 8UL, 208U); req->outstanding_cmds = (srb_t **)tmp___0; if ((unsigned long )req->outstanding_cmds == (unsigned long )((srb_t **)0)) { ql_log(0U, (scsi_qla_host_t *)0, 294, "Failed to allocate memory for outstanding_cmds for req_que %p.\n", req); req->num_outstanding_cmds = 0U; return (258); } else { } } else { } return (0); } } static int qla2x00_setup_chip(scsi_qla_host_t *vha ) { int rval ; uint32_t srisc_address ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; unsigned long flags ; uint16_t fw_major_version ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; uint32_t size ; { srisc_address = 0U; ha = vha->hw; reg = & (ha->iobase)->isp; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { rval = (*((ha->isp_ops)->load_risc))(vha, & srisc_address); if (rval == 0) { qla2x00_stop_firmware(vha); goto enable_82xx_npiv; } else { goto failed; } } else { } if (((ha->device_type & 134217728U) == 0U && (ha->device_type & 1U) == 0U) && (ha->device_type & 2U) == 0U) { tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writew(40960, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } qla81xx_mpi_sync(vha); rval = (*((ha->isp_ops)->load_risc))(vha, & srisc_address); if (rval == 0) { ql_dbg(1073741824U, vha, 201, "Verifying Checksum of loaded RISC code.\n"); rval = qla2x00_verify_checksum(vha, srisc_address); if (rval == 0) { ql_dbg(1073741824U, vha, 202, "Starting firmware.\n"); rval = qla2x00_execute_fw(vha, srisc_address); if (rval == 0) { enable_82xx_npiv: fw_major_version = ha->fw_major_version; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { qla82xx_check_md_needed(vha); } else { rval = qla2x00_get_fw_version(vha); } if (rval != 0) { goto failed; } else { } ha->flags.npiv_supported = 0U; if ((((((((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) || (ha->device_type & 8192U) != 0U) || (ha->device_type & 16384U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || (ha->device_type & 262144U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) && ((int )ha->fw_attributes & 4) != 0) { ha->flags.npiv_supported = 1U; if ((unsigned int )ha->max_npiv_vports == 0U || ((unsigned int )((int )ha->max_npiv_vports + 1) & 63U) != 0U) { ha->max_npiv_vports = 63U; } else { } } else { } qla2x00_get_resource_cnts(vha, (uint16_t *)0U, & ha->fw_xcb_count, (uint16_t *)0U, & ha->fw_iocb_count, & ha->max_npiv_vports, (uint16_t *)0U); rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); if (rval != 0) { goto failed; } else { } if (((unsigned int )fw_major_version == 0U && ql2xallocfwdump != 0) && ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U)) { qla2x00_alloc_fw_dump(vha); } else { } } else { goto failed; } } else { ql_log(0U, vha, 205, "ISP Firmware failed checksum.\n"); goto failed; } } else { goto failed; } if (((ha->device_type & 134217728U) == 0U && (ha->device_type & 1U) == 0U) && (ha->device_type & 2U) == 0U) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); if ((ha->device_type & 4U) != 0U) { writew(40961, (void volatile *)(& reg->hccr)); } else { writew(40967, (void volatile *)(& reg->hccr)); } readw((void const volatile *)(& reg->hccr)); spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { ha->flags.fac_supported = 1U; } else if (rval == 0 && (((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U))) { rval = qla81xx_fac_get_sector_size(vha, & size); if (rval == 0) { ha->flags.fac_supported = 1U; ha->fdt_block_size = size << 2; } else { ql_log(1U, vha, 206, "Unsupported FAC firmware (%d.%02d.%02d).\n", (int )ha->fw_major_version, (int )ha->fw_minor_version, (int )ha->fw_subminor_version); if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { ha->flags.fac_supported = 0U; rval = 0; } else { } } } else { } failed: ; if (rval != 0) { ql_log(0U, vha, 207, "Setup chip ****FAILED****.\n"); } else { } return (rval); } } void qla2x00_init_response_q_entries(struct rsp_que *rsp ) { uint16_t cnt ; response_t *pkt ; { rsp->ring_ptr = rsp->ring; rsp->ring_index = 0U; rsp->status_srb = (srb_t *)0; pkt = rsp->ring_ptr; cnt = 0U; goto ldv_66346; ldv_66345: pkt->signature = 3735936685U; pkt = pkt + 1; cnt = (uint16_t )((int )cnt + 1); ldv_66346: ; if ((int )rsp->length > (int )cnt) { goto ldv_66345; } else { } return; } } void qla2x00_update_fw_options(struct scsi_qla_host *vha ) { uint16_t swing ; uint16_t emphasis ; uint16_t tx_sens ; uint16_t rx_sens ; struct qla_hw_data *ha ; { ha = vha->hw; memset((void *)(& ha->fw_options), 0, 32UL); qla2x00_get_fw_options(vha, (uint16_t *)(& ha->fw_options)); if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { return; } else { } ql_dbg(1073872896U, vha, 277, "Serial link options.\n"); ql_dump_buffer(1073872896U, vha, 265, (uint8_t *)(& ha->fw_seriallink_options), 4U); ha->fw_options[1] = (unsigned int )ha->fw_options[1] & 65279U; if (((int )ha->fw_seriallink_options[3] & 4) != 0) { ha->fw_options[1] = (uint16_t )((unsigned int )ha->fw_options[1] | 256U); swing = (unsigned int )((uint16_t )ha->fw_seriallink_options[2]) & 7U; emphasis = (uint16_t )(((int )ha->fw_seriallink_options[2] & 24) >> 3); tx_sens = (unsigned int )((uint16_t )ha->fw_seriallink_options[0]) & 15U; rx_sens = (uint16_t )((int )ha->fw_seriallink_options[0] >> 4); ha->fw_options[10] = (uint16_t )((int )((short )((int )emphasis << 14)) | (int )((short )((int )swing << 8))); if (((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 32U) != 0U) { if ((unsigned int )rx_sens == 0U) { rx_sens = 3U; } else { } ha->fw_options[10] = (uint16_t )((int )((short )ha->fw_options[10]) | ((int )((short )((int )tx_sens << 4)) | (int )((short )rx_sens))); } else if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { ha->fw_options[10] = (uint16_t )((int )((short )ha->fw_options[10]) | (((int )((short )(((int )rx_sens & 3) << 2)) | 32) | ((int )((short )tx_sens) & 3))); } else { } swing = (uint16_t )((int )ha->fw_seriallink_options[2] >> 5); emphasis = (unsigned int )((uint16_t )ha->fw_seriallink_options[3]) & 3U; tx_sens = (unsigned int )((uint16_t )ha->fw_seriallink_options[1]) & 15U; rx_sens = (uint16_t )((int )ha->fw_seriallink_options[1] >> 4); ha->fw_options[11] = (uint16_t )((int )((short )((int )emphasis << 14)) | (int )((short )((int )swing << 8))); if (((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 32U) != 0U) { if ((unsigned int )rx_sens == 0U) { rx_sens = 3U; } else { } ha->fw_options[11] = (uint16_t )((int )((short )ha->fw_options[11]) | ((int )((short )((int )tx_sens << 4)) | (int )((short )rx_sens))); } else if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { ha->fw_options[11] = (uint16_t )((int )((short )ha->fw_options[11]) | (((int )((short )(((int )rx_sens & 3) << 2)) | 32) | ((int )((short )tx_sens) & 3))); } else { } } else { } ha->fw_options[3] = (uint16_t )((unsigned int )ha->fw_options[3] | 8192U); if (*((unsigned long *)ha + 2UL) != 0UL) { ha->fw_options[2] = (uint16_t )((unsigned int )ha->fw_options[2] | 4096U); } else { } if ((ha->device_type & 32U) != 0U) { ha->fw_options[2] = (uint16_t )((unsigned int )ha->fw_options[2] | 8192U); } else { } qla2x00_set_fw_options(vha, (uint16_t *)(& ha->fw_options)); return; } } void qla24xx_update_fw_options(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; { ha = vha->hw; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return; } else { } if (((int )ha->fw_seriallink_options24[0] & 1) == 0) { return; } else { } rval = qla2x00_set_serdes_params(vha, (int )ha->fw_seriallink_options24[1], (int )ha->fw_seriallink_options24[2], (int )ha->fw_seriallink_options24[3]); if (rval != 0) { ql_log(1U, vha, 260, "Unable to update Serial Link options (%x).\n", rval); } else { } return; } } void qla2x00_config_rings(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; struct req_que *req ; struct rsp_que *rsp ; { ha = vha->hw; reg = & (ha->iobase)->isp; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); (ha->init_cb)->request_q_outpointer = 0U; (ha->init_cb)->response_q_inpointer = 0U; (ha->init_cb)->request_q_length = req->length; (ha->init_cb)->response_q_length = rsp->length; (ha->init_cb)->request_q_address[0] = (unsigned int )req->dma; (ha->init_cb)->request_q_address[1] = (unsigned int )(req->dma >> 32ULL); (ha->init_cb)->response_q_address[0] = (unsigned int )rsp->dma; (ha->init_cb)->response_q_address[1] = (unsigned int )(rsp->dma >> 32ULL); writew(0, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox4 : & reg->u.isp2300.req_q_in)); writew(0, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox4 : & reg->u.isp2300.req_q_out)); writew(0, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox5 : & reg->u.isp2300.rsp_q_in)); writew(0, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox5 : & reg->u.isp2300.rsp_q_out)); readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox5 : & reg->u.isp2300.rsp_q_out)); return; } } void qla24xx_config_rings(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; device_reg_t *reg ; struct device_reg_2xxx *ioreg ; struct qla_msix_entry *msix ; struct init_cb_24xx *icb ; uint16_t rid ; struct req_que *req ; struct rsp_que *rsp ; { ha = vha->hw; reg = ((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) ? ha->mqiobase : ha->iobase; ioreg = & (ha->iobase)->isp; rid = 0U; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); icb = (struct init_cb_24xx *)ha->init_cb; icb->request_q_outpointer = 0U; icb->response_q_inpointer = 0U; icb->request_q_length = req->length; icb->response_q_length = rsp->length; icb->request_q_address[0] = (unsigned int )req->dma; icb->request_q_address[1] = (unsigned int )(req->dma >> 32ULL); icb->response_q_address[0] = (unsigned int )rsp->dma; icb->response_q_address[1] = (unsigned int )(rsp->dma >> 32ULL); icb->atio_q_inpointer = 0U; icb->atio_q_length = ha->tgt.atio_q_length; icb->atio_q_address[0] = (unsigned int )ha->tgt.atio_dma; icb->atio_q_address[1] = (unsigned int )(ha->tgt.atio_dma >> 32ULL); if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { icb->firmware_options_2 = icb->firmware_options_2 | 1610612736U; } else { } if (((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { icb->qos = 5U; icb->rid = rid; if (*((unsigned long *)ha + 2UL) != 0UL) { msix = ha->msix_entries + 1UL; ql_dbg(1073741824U, vha, 253, "Registering vector 0x%x for base que.\n", (int )msix->entry); icb->msix = msix->entry; } else { } if ((unsigned int )((unsigned char )((int )rid >> 8)) != 0U) { icb->firmware_options_2 = icb->firmware_options_2 | 524288U; } else { } if ((unsigned int )((unsigned char )rid) != 0U) { icb->firmware_options_2 = icb->firmware_options_2 | 262144U; } else { } if ((((int )ha->fw_attributes & 64) != 0 && (((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U))) && *((unsigned long *)ha + 2UL) != 0UL) { icb->firmware_options_2 = icb->firmware_options_2 & 4290772991U; ha->flags.disable_msix_handshake = 1U; ql_dbg(1073741824U, vha, 254, "MSIX Handshake Disable Mode turned on.\n"); } else { icb->firmware_options_2 = icb->firmware_options_2 | 4194304U; } icb->firmware_options_2 = icb->firmware_options_2 | 8388608U; writel(0U, (void volatile *)(& reg->isp25mq.req_q_in)); writel(0U, (void volatile *)(& reg->isp25mq.req_q_out)); writel(0U, (void volatile *)(& reg->isp25mq.rsp_q_in)); writel(0U, (void volatile *)(& reg->isp25mq.rsp_q_out)); } else { writel(0U, (void volatile *)(& reg->isp24.req_q_in)); writel(0U, (void volatile *)(& reg->isp24.req_q_out)); writel(0U, (void volatile *)(& reg->isp24.rsp_q_in)); writel(0U, (void volatile *)(& reg->isp24.rsp_q_out)); } qlt_24xx_config_rings(vha); readl((void const volatile *)(& ioreg->hccr)); return; } } int qla2x00_init_rings(scsi_qla_host_t *vha ) { int rval ; unsigned long flags ; int cnt ; int que ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; struct mid_init_cb_24xx *mid_init_cb ; raw_spinlock_t *tmp ; { flags = 0UL; ha = vha->hw; mid_init_cb = (struct mid_init_cb_24xx *)ha->init_cb; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); que = 0; goto ldv_66398; ldv_66397: req = *(ha->req_q_map + (unsigned long )que); if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { goto ldv_66393; } else { } req->out_ptr = (uint16_t *)req->ring + (unsigned long )req->length; *(req->out_ptr) = 0U; cnt = 1; goto ldv_66395; ldv_66394: *(req->outstanding_cmds + (unsigned long )cnt) = (srb_t *)0; cnt = cnt + 1; ldv_66395: ; if ((int )req->num_outstanding_cmds > cnt) { goto ldv_66394; } else { } req->current_outstanding_cmd = 1U; req->ring_ptr = req->ring; req->ring_index = 0U; req->cnt = req->length; ldv_66393: que = que + 1; ldv_66398: ; if ((int )ha->max_req_queues > que) { goto ldv_66397; } else { } que = 0; goto ldv_66402; ldv_66401: rsp = *(ha->rsp_q_map + (unsigned long )que); if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { goto ldv_66400; } else { } rsp->in_ptr = (uint16_t *)rsp->ring + (unsigned long )rsp->length; *(rsp->in_ptr) = 0U; if ((ha->device_type & 131072U) != 0U) { qlafx00_init_response_q_entries(rsp); } else { qla2x00_init_response_q_entries(rsp); } ldv_66400: que = que + 1; ldv_66402: ; if ((int )ha->max_rsp_queues > que) { goto ldv_66401; } else { } ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; ha->tgt.atio_ring_index = 0U; qlt_init_atio_q_entries(vha); (*((ha->isp_ops)->config_rings))(vha); spin_unlock_irqrestore(& ha->hardware_lock, flags); ql_dbg(1073741824U, vha, 209, "Issue init firmware.\n"); if ((ha->device_type & 131072U) != 0U) { rval = qlafx00_init_firmware(vha, (int )((uint16_t )ha->init_cb_size)); goto next_check; } else { } (*((ha->isp_ops)->update_fw_options))(vha); if (*((unsigned long *)ha + 2UL) != 0UL) { if ((unsigned int )ha->operating_mode == 0U && ((((ha->device_type & 8192U) == 0U && (ha->device_type & 16384U) == 0U) && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U)) { ha->max_npiv_vports = 63U; } else { } mid_init_cb->count = ha->max_npiv_vports; } else { } if ((ha->device_type & 134217728U) != 0U) { mid_init_cb->options = 2U; mid_init_cb->init_cb.execution_throttle = ha->fw_xcb_count; if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { mid_init_cb->init_cb.firmware_options_1 = mid_init_cb->init_cb.firmware_options_1 | 128U; } else { } ha->flags.fawwpn_enabled = (mid_init_cb->init_cb.firmware_options_1 & 64U) != 0U; ql_dbg(1073741824U, vha, 321, "FA-WWPN Support: %s.\n", *((unsigned long *)ha + 2UL) != 0UL ? (char *)"enabled" : (char *)"disabled"); } else { } rval = qla2x00_init_firmware(vha, (int )((uint16_t )ha->init_cb_size)); next_check: ; if (rval != 0) { ql_log(0U, vha, 210, "Init Firmware **** FAILED ****.\n"); } else { ql_dbg(1073741824U, vha, 211, "Init Firmware -- success.\n"); } return (rval); } } static int qla2x00_fw_ready(scsi_qla_host_t *vha ) { int rval ; unsigned long wtime ; unsigned long mtime ; unsigned long cs84xx_time ; uint16_t min_wait ; uint16_t wait_time ; uint16_t state[6U] ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = vha->hw; if (((vha->hw)->device_type & 131072U) != 0U) { tmp = qlafx00_fw_ready(vha); return (tmp); } else { } rval = 0; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { min_wait = 30U; } else { min_wait = 20U; } wait_time = (unsigned int )((int )((uint16_t )ha->retry_count) * (int )((uint16_t )ha->login_timeout)) + 5U; if ((int )wait_time < (int )min_wait) { wait_time = min_wait; } else { } mtime = (unsigned long )((int )min_wait * 250) + (unsigned long )jiffies; wtime = (unsigned long )((int )wait_time * 250) + (unsigned long )jiffies; if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(2U, vha, 32798, "Waiting for LIP to complete.\n"); } else { } ldv_66435: memset((void *)(& state), -1, 12UL); rval = qla2x00_get_firmware_state(vha, (uint16_t *)(& state)); if (rval == 0) { if ((unsigned int )state[0] <= 3U) { vha->device_flags = vha->device_flags & 4294967293U; } else { } if ((ha->device_type & 4096U) != 0U && (unsigned int )state[0] != 3U) { ql_dbg(4194304U, vha, 32799, "fw_state=%x 84xx=%x.\n", (int )state[0], (int )state[2]); if (((int )state[2] & 8) != 0 && ((int )state[2] & 16) != 0) { ql_dbg(4194304U, vha, 32808, "Sending verify iocb.\n"); cs84xx_time = jiffies; rval = qla84xx_init_chip(vha); if (rval != 0) { ql_log(1U, vha, 32775, "Init chip failed.\n"); goto ldv_66416; } else { } cs84xx_time = (unsigned long )jiffies - cs84xx_time; wtime = wtime + cs84xx_time; mtime = mtime + cs84xx_time; ql_dbg(4194304U, vha, 32776, "Increasing wait time by %ld. New time %ld.\n", cs84xx_time, wtime); } else { } } else if ((unsigned int )state[0] == 3U) { ql_dbg(4194304U, vha, 32823, "F/W Ready - OK.\n"); qla2x00_get_retry_cnt(vha, & ha->retry_count, & ha->login_timeout, & ha->r_a_tov); rval = 0; goto ldv_66416; } else { } rval = 258; tmp___0 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___0 != 0 && (unsigned int )state[0] != 3U) { if ((long )((unsigned long )jiffies - mtime) >= 0L) { ql_log(2U, vha, 32824, "Cable is unplugged...\n"); vha->device_flags = vha->device_flags | 2U; goto ldv_66416; } else { } } else { } } else if ((long )((unsigned long )jiffies - mtime) >= 0L || *((unsigned long *)ha + 2UL) != 0UL) { goto ldv_66416; } else { } if ((long )((unsigned long )jiffies - wtime) >= 0L) { goto ldv_66416; } else { } msleep(500U); goto ldv_66435; ldv_66416: ql_dbg(4194304U, vha, 32826, "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", (int )state[0], (int )state[1], (int )state[2], (int )state[3], (int )state[4], (int )state[5], jiffies); if (rval != 0 && (vha->device_flags & 2U) == 0U) { ql_log(1U, vha, 32827, "Firmware ready **** FAILED ****.\n"); } else { } return (rval); } } static int qla2x00_configure_hba(scsi_qla_host_t *vha ) { int rval ; uint16_t loop_id ; uint16_t topo ; uint16_t sw_cap ; uint8_t al_pa ; uint8_t area ; uint8_t domain ; char connect_type[22U] ; struct qla_hw_data *ha ; unsigned long flags ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; raw_spinlock_t *tmp___5 ; { ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; rval = qla2x00_get_adapter_id(vha, & loop_id, & al_pa, & area, & domain, & topo, & sw_cap); if (rval != 0) { tmp___1 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { ql_dbg(268435456U, vha, 8200, "Loop is in a transition state.\n"); } else { tmp___2 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 != 0) { ql_dbg(268435456U, vha, 8200, "Loop is in a transition state.\n"); } else { tmp___3 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___3 == 2) { ql_dbg(268435456U, vha, 8200, "Loop is in a transition state.\n"); } else { tmp___4 = atomic_read((atomic_t const *)(& ha->loop_down_timer)); if (tmp___4 != 0) { ql_dbg(268435456U, vha, 8200, "Loop is in a transition state.\n"); } else if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { ql_dbg(268435456U, vha, 8200, "Loop is in a transition state.\n"); } else if (rval == 5 && (unsigned int )loop_id == 7U) { ql_dbg(268435456U, vha, 8200, "Loop is in a transition state.\n"); } else { ql_log(1U, vha, 8201, "Unable to get host loop ID.\n"); if (((ha->device_type & 134217728U) != 0U && (unsigned long )vha == (unsigned long )base_vha) && (rval == 5 && (unsigned int )loop_id == 27U)) { ql_log(1U, vha, 4433, "Doing link init.\n"); tmp___0 = qla24xx_link_initialize(vha); if (tmp___0 == 0) { return (rval); } else { } } else { } set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } } } } return (rval); } else { } if ((unsigned int )topo == 4U) { ql_log(2U, vha, 8202, "Cannot get topology - retrying.\n"); return (258); } else { } vha->loop_id = loop_id; ha->min_external_loopid = 129U; ha->operating_mode = 0U; ha->switch_cap = 0U; switch ((int )topo) { case 0: ql_dbg(268435456U, vha, 8203, "HBA in NL topology.\n"); ha->current_topology = 1U; strcpy((char *)(& connect_type), "(Loop)"); goto ldv_66451; case 1: ql_dbg(268435456U, vha, 8204, "HBA in FL topology.\n"); ha->switch_cap = sw_cap; ha->current_topology = 4U; strcpy((char *)(& connect_type), "(FL_Port)"); goto ldv_66451; case 2: ql_dbg(268435456U, vha, 8205, "HBA in N P2P topology.\n"); ha->operating_mode = 1U; ha->current_topology = 2U; strcpy((char *)(& connect_type), "(N_Port-to-N_Port)"); goto ldv_66451; case 3: ql_dbg(268435456U, vha, 8206, "HBA in F P2P topology.\n"); ha->switch_cap = sw_cap; ha->operating_mode = 1U; ha->current_topology = 8U; strcpy((char *)(& connect_type), "(F_Port)"); goto ldv_66451; default: ql_dbg(268435456U, vha, 8207, "HBA in unknown topology %x, using NL.\n", (int )topo); ha->current_topology = 1U; strcpy((char *)(& connect_type), "(Loop)"); goto ldv_66451; } ldv_66451: vha->d_id.b.domain = domain; vha->d_id.b.area = area; vha->d_id.b.al_pa = al_pa; tmp___5 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___5); qlt_update_vp_map(vha, 2); spin_unlock_irqrestore(& ha->vport_slock, flags); if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(2U, vha, 8208, "Topology - %s, Host Loop address 0x%x.\n", (char *)(& connect_type), (int )vha->loop_id); } else { } return (rval); } } __inline void qla2x00_set_model_info(scsi_qla_host_t *vha , uint8_t *model , size_t len , char *def ) { char *st ; char *en ; uint16_t index ; struct qla_hw_data *ha ; int use_tbl ; char *tmp ; int tmp___0 ; { ha = vha->hw; use_tbl = ((((((ha->device_type & 128U) == 0U && (ha->device_type & 256U) == 0U) && ((ha->device_type & 512U) == 0U && (ha->device_type & 1024U) == 0U)) && (ha->device_type & 4096U) == 0U) && (ha->device_type & 2048U) == 0U) && ((((ha->device_type & 8192U) == 0U && (ha->device_type & 16384U) == 0U) && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U)) && (ha->device_type & 32768U) == 0U; tmp___0 = memcmp((void const *)model, (void const *)"", len); if (tmp___0 != 0) { strncpy((char *)(& ha->model_number), (char const *)model, len); en = (char *)(& ha->model_number); st = en; en = en + (len + 0xffffffffffffffffUL); goto ldv_66472; ldv_66471: ; if ((int )((signed char )*en) != 32 && (int )((signed char )*en) != 0) { goto ldv_66470; } else { } tmp = en; en = en - 1; *tmp = 0; ldv_66472: ; if ((unsigned long )en > (unsigned long )st) { goto ldv_66471; } else { } ldv_66470: index = (unsigned int )(ha->pdev)->subsystem_device & 255U; if ((use_tbl != 0 && (unsigned int )(ha->pdev)->subsystem_vendor == 4215U) && (unsigned int )index <= 91U) { strncpy((char *)(& ha->model_desc), (char const *)qla2x00_model_name[(int )index * 2 + 1], 79UL); } else { } } else { index = (unsigned int )(ha->pdev)->subsystem_device & 255U; if ((use_tbl != 0 && (unsigned int )(ha->pdev)->subsystem_vendor == 4215U) && (unsigned int )index <= 91U) { strcpy((char *)(& ha->model_number), (char const *)qla2x00_model_name[(int )index * 2]); strncpy((char *)(& ha->model_desc), (char const *)qla2x00_model_name[(int )index * 2 + 1], 79UL); } else { strcpy((char *)(& ha->model_number), (char const *)def); } } if ((ha->device_type & 134217728U) != 0U) { qla2xxx_get_vpd_field(vha, (char *)"\202", (char *)(& ha->model_desc), 80UL); } else { } return; } } static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha , nvram_t *nv ) { { return; } } int qla2x00_nvram_config(struct scsi_qla_host *vha ) { int rval ; uint8_t chksum ; uint16_t cnt ; uint8_t *dptr1 ; uint8_t *dptr2 ; struct qla_hw_data *ha ; init_cb_t *icb ; nvram_t *nv ; uint8_t *ptr ; struct device_reg_2xxx *reg ; unsigned short tmp ; uint8_t *tmp___0 ; uint8_t *tmp___1 ; uint8_t *tmp___2 ; uint16_t tmp___3 ; uint8_t *tmp___4 ; uint8_t *tmp___5 ; uint16_t tmp___6 ; { chksum = 0U; ha = vha->hw; icb = ha->init_cb; nv = (nvram_t *)ha->nvram; ptr = (uint8_t *)ha->nvram; reg = & (ha->iobase)->isp; rval = 0; ha->nvram_size = 256U; ha->nvram_base = 0U; if (((ha->device_type & 1U) == 0U && (ha->device_type & 2U) == 0U) && (ha->device_type & 4U) == 0U) { tmp = readw((void const volatile *)(& reg->ctrl_status)); if ((unsigned int )((int )tmp >> 14) == 1U) { ha->nvram_base = 128U; } else { } } else { } (*((ha->isp_ops)->read_nvram))(vha, ptr, (uint32_t )ha->nvram_base, (uint32_t )ha->nvram_size); cnt = 0U; chksum = 0U; goto ldv_66491; ldv_66490: tmp___0 = ptr; ptr = ptr + 1; chksum = (int )*tmp___0 + (int )chksum; cnt = (uint16_t )((int )cnt + 1); ldv_66491: ; if ((int )ha->nvram_size > (int )cnt) { goto ldv_66490; } else { } ql_dbg(1073872896U, vha, 271, "Contents of NVRAM.\n"); ql_dump_buffer(1073872896U, vha, 272, (uint8_t *)nv, (uint32_t )ha->nvram_size); if ((((((unsigned int )chksum != 0U || (unsigned int )nv->id[0] != 73U) || (unsigned int )nv->id[1] != 83U) || (unsigned int )nv->id[2] != 80U) || (unsigned int )nv->id[3] != 32U) || (unsigned int )nv->nvram_version == 0U) { ql_log(1U, vha, 100, "Inconsistent NVRAM detected: checksum=0x%x id=%c version=0x%x.\n", (int )chksum, (int )nv->id[0], (int )nv->nvram_version); ql_log(1U, vha, 101, "Falling back to functioning (yet invalid -- WWPN) defaults.\n"); memset((void *)nv, 0, (size_t )ha->nvram_size); nv->parameter_block_version = 1U; if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { nv->firmware_options[0] = 6U; nv->firmware_options[1] = 160U; nv->add_firmware_options[0] = 32U; nv->add_firmware_options[1] = 48U; nv->frame_payload_size = 2048U; nv->special_options[1] = 128U; } else if ((ha->device_type & 2U) != 0U) { nv->firmware_options[0] = 6U; nv->firmware_options[1] = 160U; nv->add_firmware_options[0] = 32U; nv->add_firmware_options[1] = 48U; nv->frame_payload_size = 1024U; } else if ((int )ha->device_type & 1) { nv->firmware_options[0] = 10U; nv->firmware_options[1] = 32U; nv->frame_payload_size = 1024U; } else { } nv->max_iocb_allocation = 256U; nv->execution_throttle = 16U; nv->retry_count = 8U; nv->retry_delay = 1U; nv->port_name[0] = 33U; nv->port_name[3] = 224U; nv->port_name[4] = 139U; qla2xxx_nvram_wwn_from_ofw(vha, nv); nv->login_timeout = 4U; nv->host_p[1] = 4U; nv->reset_delay = 5U; nv->port_down_retry_count = 8U; nv->max_luns_per_target = 8U; nv->link_down_timeout = 60U; rval = 1; } else { } memset((void *)icb, 0, (size_t )ha->init_cb_size); nv->firmware_options[0] = (uint8_t )((unsigned int )nv->firmware_options[0] | 66U); nv->firmware_options[0] = (unsigned int )nv->firmware_options[0] & 207U; nv->firmware_options[1] = (uint8_t )((unsigned int )nv->firmware_options[1] | 33U); nv->firmware_options[1] = (unsigned int )nv->firmware_options[1] & 239U; if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { nv->firmware_options[0] = (uint8_t )((unsigned int )nv->firmware_options[0] | 4U); nv->firmware_options[0] = (unsigned int )nv->firmware_options[0] & 247U; nv->special_options[0] = (unsigned int )nv->special_options[0] & 191U; nv->add_firmware_options[1] = (uint8_t )((unsigned int )nv->add_firmware_options[1] | 48U); if ((ha->device_type & 4U) != 0U) { if ((unsigned int )ha->fb_rev == 7U) { strcpy((char *)(& ha->model_number), "QLA2310"); } else { strcpy((char *)(& ha->model_number), "QLA2300"); } } else { qla2x00_set_model_info(vha, (uint8_t *)(& nv->model_number), 16UL, (char *)"QLA23xx"); } } else if ((ha->device_type & 2U) != 0U) { nv->firmware_options[0] = (uint8_t )((unsigned int )nv->firmware_options[0] | 4U); if (((int )nv->add_firmware_options[0] & 112) == 48) { nv->add_firmware_options[0] = (unsigned int )nv->add_firmware_options[0] & 143U; nv->add_firmware_options[0] = (uint8_t )((unsigned int )nv->add_firmware_options[0] | 32U); } else { } strcpy((char *)(& ha->model_number), "QLA22xx"); } else { strcpy((char *)(& ha->model_number), "QLA2100"); } dptr1 = (uint8_t *)icb; dptr2 = & nv->parameter_block_version; cnt = 32U; goto ldv_66494; ldv_66493: tmp___1 = dptr1; dptr1 = dptr1 + 1; tmp___2 = dptr2; dptr2 = dptr2 + 1; *tmp___1 = *tmp___2; ldv_66494: tmp___3 = cnt; cnt = (uint16_t )((int )cnt - 1); if ((unsigned int )tmp___3 != 0U) { goto ldv_66493; } else { } dptr1 = (uint8_t *)(& icb->add_firmware_options); cnt = 6U; goto ldv_66497; ldv_66496: tmp___4 = dptr1; dptr1 = dptr1 + 1; tmp___5 = dptr2; dptr2 = dptr2 + 1; *tmp___4 = *tmp___5; ldv_66497: tmp___6 = cnt; cnt = (uint16_t )((int )cnt - 1); if ((unsigned int )tmp___6 != 0U) { goto ldv_66496; } else { } if ((int )((signed char )nv->host_p[1]) < 0) { memcpy((void *)(& icb->node_name), (void const *)(& nv->alternate_node_name), 8UL); memcpy((void *)(& icb->port_name), (void const *)(& nv->alternate_port_name), 8UL); } else { } if (((int )icb->firmware_options[1] & 64) == 0) { memcpy((void *)(& icb->node_name), (void const *)(& icb->port_name), 8UL); icb->node_name[0] = (unsigned int )icb->node_name[0] & 240U; } else { } if ((int )((signed char )nv->host_p[0]) < 0) { ql2xextended_error_logging = 507510784; } else { } ha->flags.disable_risc_code_load = ((int )nv->host_p[0] & 16) != 0; if ((ha->device_type & 1U) == 0U && (ha->device_type & 2U) == 0U) { ha->flags.disable_risc_code_load = 0U; } else { } ha->flags.enable_lip_reset = ((int )nv->host_p[1] & 2) != 0; ha->flags.enable_lip_full_login = ((int )nv->host_p[1] & 4) != 0; ha->flags.enable_target_reset = ((int )nv->host_p[1] & 8) != 0; ha->flags.enable_led_scheme = ((int )nv->special_options[1] & 16) != 0; ha->flags.disable_serdes = 0U; ha->operating_mode = (uint8_t )(((int )icb->add_firmware_options[0] & 112) >> 4); memcpy((void *)(& ha->fw_seriallink_options), (void const *)(& nv->seriallink_options), 4UL); ha->serial0 = icb->port_name[5]; ha->serial1 = icb->port_name[6]; ha->serial2 = icb->port_name[7]; memcpy((void *)(& vha->node_name), (void const *)(& icb->node_name), 8UL); memcpy((void *)(& vha->port_name), (void const *)(& icb->port_name), 8UL); icb->execution_throttle = 65535U; ha->retry_count = nv->retry_count; if ((int )nv->login_timeout != ql2xlogintimeout) { nv->login_timeout = (uint8_t )ql2xlogintimeout; } else { } if ((unsigned int )nv->login_timeout <= 3U) { nv->login_timeout = 4U; } else { } ha->login_timeout = nv->login_timeout; icb->login_timeout = nv->login_timeout; ha->r_a_tov = 100U; ha->loop_reset_delay = (uint16_t )nv->reset_delay; if ((unsigned int )nv->link_down_timeout == 0U) { ha->loop_down_abort_time = 195U; } else { ha->link_down_timeout = nv->link_down_timeout; ha->loop_down_abort_time = ~ ((int )ha->link_down_timeout); } ha->port_down_retry_count = (int )nv->port_down_retry_count; if (qlport_down_retry != 0) { ha->port_down_retry_count = qlport_down_retry; } else { } ha->login_retry_count = (uint32_t )nv->retry_count; if (ha->port_down_retry_count == (int )nv->port_down_retry_count && ha->port_down_retry_count > 3) { ha->login_retry_count = (uint32_t )ha->port_down_retry_count; } else if (ha->port_down_retry_count > (int )ha->login_retry_count) { ha->login_retry_count = (uint32_t )ha->port_down_retry_count; } else { } if (ql2xloginretrycount != 0) { ha->login_retry_count = (uint32_t )ql2xloginretrycount; } else { } icb->lun_enables = 0U; icb->command_resource_count = 0U; icb->immediate_notify_resource_count = 0U; icb->timeout = 0U; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { icb->firmware_options[0] = (unsigned int )icb->firmware_options[0] & 247U; icb->add_firmware_options[0] = (unsigned int )icb->add_firmware_options[0] & 240U; icb->add_firmware_options[0] = (uint8_t )((unsigned int )icb->add_firmware_options[0] | 4U); icb->response_accumulation_timer = 3U; icb->interrupt_delay_timer = 5U; vha->flags.process_response_queue = 1U; } else { if (*((unsigned long *)vha + 19UL) == 0UL) { ha->zio_mode = (unsigned int )((uint16_t )icb->add_firmware_options[0]) & 15U; ha->zio_timer = (unsigned int )icb->interrupt_delay_timer != 0U ? (uint16_t )icb->interrupt_delay_timer : 2U; } else { } icb->add_firmware_options[0] = (unsigned int )icb->add_firmware_options[0] & 240U; vha->flags.process_response_queue = 0U; if ((unsigned int )ha->zio_mode != 0U) { ha->zio_mode = 6U; ql_log(2U, vha, 104, "ZIO mode %d enabled; timer delay (%d us).\n", (int )ha->zio_mode, (int )ha->zio_timer * 100); icb->add_firmware_options[0] = (uint8_t )((int )icb->add_firmware_options[0] | (int )((unsigned char )ha->zio_mode)); icb->interrupt_delay_timer = (unsigned char )ha->zio_timer; vha->flags.process_response_queue = 1U; } else { } } if (rval != 0) { ql_log(1U, vha, 105, "NVRAM configuration failed.\n"); } else { } return (rval); } } static void qla2x00_rport_del(void *data ) { fc_port_t *fcport ; struct fc_rport *rport ; scsi_qla_host_t *vha ; unsigned long flags ; raw_spinlock_t *tmp ; { fcport = (fc_port_t *)data; vha = fcport->vha; tmp = spinlock_check(((fcport->vha)->host)->host_lock); flags = _raw_spin_lock_irqsave(tmp); rport = (unsigned long )fcport->drport != (unsigned long )((struct fc_rport *)0) ? fcport->drport : fcport->rport; fcport->drport = (struct fc_rport *)0; spin_unlock_irqrestore(((fcport->vha)->host)->host_lock, flags); if ((unsigned long )rport != (unsigned long )((struct fc_rport *)0)) { fc_remote_port_delete(rport); qlt_fc_port_deleted(vha, fcport); } else { } return; } } fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *vha , gfp_t flags ) { fc_port_t *fcport ; void *tmp ; { tmp = kzalloc(136UL, flags); fcport = (fc_port_t *)tmp; if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return ((fc_port_t *)0); } else { } fcport->vha = vha; fcport->port_type = 0; fcport->loop_id = 4096U; qla2x00_set_fcport_state___0(fcport, 1); fcport->supported_classes = 0U; return (fcport); } } static int qla2x00_configure_loop(scsi_qla_host_t *vha ) { int rval ; unsigned long flags ; unsigned long save_flags ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; { ha = vha->hw; rval = 0; tmp = constant_test_bit(6L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp != 0) { rval = qla2x00_configure_hba(vha); if (rval != 0) { ql_dbg(268435456U, vha, 8211, "Unable to configure HBA.\n"); return (rval); } else { } } else { } flags = vha->dpc_flags; save_flags = flags; ql_dbg(268435456U, vha, 8212, "Configure loop -- dpc flags = 0x%lx.\n", flags); clear_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); clear_bit(7L, (unsigned long volatile *)(& vha->dpc_flags)); qla2x00_get_data_rate(vha); if ((unsigned int )ha->current_topology == 4U) { tmp___2 = constant_test_bit(6L, (unsigned long const volatile *)(& flags)); if (tmp___2 != 0) { set_bit(7L, (unsigned long volatile *)(& flags)); } else { goto _L___0; } } else _L___0: /* CIL Label */ if ((unsigned int )ha->current_topology == 8U) { tmp___1 = constant_test_bit(6L, (unsigned long const volatile *)(& flags)); if (tmp___1 != 0) { set_bit(7L, (unsigned long volatile *)(& flags)); clear_bit(6L, (unsigned long volatile *)(& flags)); } else { goto _L; } } else _L: /* CIL Label */ if ((unsigned int )ha->current_topology == 2U) { clear_bit(7L, (unsigned long volatile *)(& flags)); } else if (*((unsigned long *)vha + 19UL) == 0UL) { set_bit(7L, (unsigned long volatile *)(& flags)); set_bit(6L, (unsigned long volatile *)(& flags)); } else { tmp___0 = constant_test_bit(3L, (unsigned long const volatile *)(& flags)); if (tmp___0 != 0) { set_bit(7L, (unsigned long volatile *)(& flags)); set_bit(6L, (unsigned long volatile *)(& flags)); } else { } } tmp___4 = constant_test_bit(6L, (unsigned long const volatile *)(& flags)); if (tmp___4 != 0) { tmp___3 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___3 != 0) { ql_dbg(268435456U, vha, 8213, "Loop resync needed, failing.\n"); rval = 258; } else { rval = qla2x00_configure_local_loop(vha); } } else { } if (rval == 0) { tmp___8 = constant_test_bit(7L, (unsigned long const volatile *)(& flags)); if (tmp___8 != 0) { tmp___5 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___5 != 0) { ql_dbg(268435456U, vha, 8222, "Needs RSCN update and loop transition.\n"); rval = 258; } else { tmp___6 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___6 != 0) { ql_dbg(268435456U, vha, 8222, "Needs RSCN update and loop transition.\n"); rval = 258; } else { tmp___7 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___7 == 2) { ql_dbg(268435456U, vha, 8222, "Needs RSCN update and loop transition.\n"); rval = 258; } else { rval = qla2x00_configure_fabric(vha); } } } } else { } } else { } if (rval == 0) { tmp___9 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___9 != 0) { rval = 258; } else { tmp___10 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___10 != 0) { rval = 258; } else { atomic_set(& vha->loop_state, 5); ql_dbg(268435456U, vha, 8297, "LOOP READY.\n"); } } } else { } if (rval != 0) { ql_dbg(268435456U, vha, 8298, "%s *** FAILED ***.\n", "qla2x00_configure_loop"); } else { ql_dbg(268435456U, vha, 8299, "%s: exiting normally.\n", "qla2x00_configure_loop"); } tmp___13 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___13 != 0) { tmp___11 = constant_test_bit(6L, (unsigned long const volatile *)(& save_flags)); if (tmp___11 != 0) { set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } tmp___12 = constant_test_bit(7L, (unsigned long const volatile *)(& save_flags)); if (tmp___12 != 0) { set_bit(7L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } } else { } return (rval); } } static int qla2x00_configure_local_loop(scsi_qla_host_t *vha ) { int rval ; int rval2 ; int found_devs ; int found ; fc_port_t *fcport ; fc_port_t *new_fcport ; uint16_t index ; uint16_t entries ; char *id_iter ; uint16_t loop_id ; uint8_t domain ; uint8_t area ; uint8_t al_pa ; struct qla_hw_data *ha ; int tmp ; struct list_head const *__mptr ; int tmp___0 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int tmp___1 ; struct list_head const *__mptr___2 ; { ha = vha->hw; found_devs = 0; new_fcport = (fc_port_t *)0; entries = 128U; tmp = qla2x00_gid_list_size(ha); memset((void *)ha->gid_list, 0, (size_t )tmp); rval = qla2x00_get_id_list(vha, (void *)ha->gid_list, ha->gid_list_dma, & entries); if (rval != 0) { goto cleanup_allocation; } else { } ql_dbg(268435456U, vha, 8215, "Entries in ID list (%d).\n", (int )entries); ql_dump_buffer(268566528U, vha, 8309, (uint8_t *)ha->gid_list, (uint32_t )entries * 8U); new_fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )new_fcport == (unsigned long )((fc_port_t *)0)) { ql_log(1U, vha, 8216, "Memory allocation failed for fcport.\n"); rval = 259; goto cleanup_allocation; } else { } new_fcport->flags = new_fcport->flags & 4294967294U; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_66545; ldv_66544: tmp___0 = atomic_read((atomic_t const *)(& fcport->state)); if ((tmp___0 == 4 && (unsigned int )fcport->port_type != 3U) && (fcport->flags & 1U) == 0U) { ql_dbg(268435456U, vha, 8217, "Marking port lost loop_id=0x%04x.\n", (int )fcport->loop_id); qla2x00_set_fcport_state___0(fcport, 3); } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_66545: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66544; } else { } id_iter = (char *)ha->gid_list; index = 0U; goto ldv_66557; ldv_66556: domain = ((struct gid_list_info *)id_iter)->domain; area = ((struct gid_list_info *)id_iter)->area; al_pa = ((struct gid_list_info *)id_iter)->al_pa; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { loop_id = (unsigned short )((struct gid_list_info *)id_iter)->loop_id_2100; } else { loop_id = ((struct gid_list_info *)id_iter)->loop_id; } id_iter = id_iter + (unsigned long )ha->gid_list_info_size; if (((int )domain & 240) == 240) { goto ldv_66547; } else { } if (((unsigned int )area != 0U && (unsigned int )domain != 0U) && ((int )vha->d_id.b.area != (int )area || (int )vha->d_id.b.domain != (int )domain)) { goto ldv_66547; } else { } if ((unsigned int )loop_id > 125U) { goto ldv_66547; } else { } memset((void *)new_fcport, 0, 136UL); new_fcport->d_id.b.domain = domain; new_fcport->d_id.b.area = area; new_fcport->d_id.b.al_pa = al_pa; new_fcport->loop_id = loop_id; rval2 = qla2x00_get_port_database(vha, new_fcport, 0); if (rval2 != 0) { ql_dbg(268435456U, vha, 8218, "Failed to retrieve fcport information -- get_port_database=%x, loop_id=0x%04x.\n", rval2, (int )new_fcport->loop_id); ql_dbg(268435456U, vha, 8219, "Scheduling resync.\n"); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_66547; } else { } found = 0; fcport = (fc_port_t *)0; __mptr___1 = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr___1; goto ldv_66555; ldv_66554: tmp___1 = memcmp((void const *)(& new_fcport->port_name), (void const *)(& fcport->port_name), 8UL); if (tmp___1 != 0) { goto ldv_66552; } else { } fcport->flags = fcport->flags & 4294967294U; fcport->loop_id = new_fcport->loop_id; fcport->port_type = new_fcport->port_type; fcport->d_id.b24 = new_fcport->d_id.b24; memcpy((void *)(& fcport->node_name), (void const *)(& new_fcport->node_name), 8UL); found = found + 1; goto ldv_66553; ldv_66552: __mptr___2 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___2; ldv_66555: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66554; } else { } ldv_66553: ; if (found == 0) { list_add_tail(& new_fcport->list, & vha->vp_fcports); fcport = new_fcport; new_fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )new_fcport == (unsigned long )((fc_port_t *)0)) { ql_log(1U, vha, 8220, "Failed to allocate memory for fcport.\n"); rval = 259; goto cleanup_allocation; } else { } new_fcport->flags = new_fcport->flags & 4294967294U; } else { } fcport->fp_speed = ha->link_data_rate; qla2x00_update_fcport(vha, fcport); found_devs = found_devs + 1; ldv_66547: index = (uint16_t )((int )index + 1); ldv_66557: ; if ((int )index < (int )entries) { goto ldv_66556; } else { } cleanup_allocation: kfree((void const *)new_fcport); if (rval != 0) { ql_dbg(268435456U, vha, 8221, "Configure local loop error exit: rval=%x.\n", rval); } else { } return (rval); } } static void qla2x00_iidma_fcport(scsi_qla_host_t *vha , fc_port_t *fcport ) { int rval ; uint16_t mb[32U] ; struct qla_hw_data *ha ; int tmp ; char const *tmp___0 ; { ha = vha->hw; if ((ha->device_type & 67108864U) == 0U) { return; } else { } tmp = atomic_read((atomic_t const *)(& fcport->state)); if (tmp != 4) { return; } else { } if ((unsigned int )fcport->fp_speed == 65535U || (int )fcport->fp_speed > (int )ha->link_data_rate) { return; } else { } rval = qla2x00_set_idma_speed(vha, (int )fcport->loop_id, (int )fcport->fp_speed, (uint16_t *)(& mb)); if (rval != 0) { ql_dbg(268435456U, vha, 8196, "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n", (uint8_t *)(& fcport->port_name), rval, (int )fcport->fp_speed, (int )mb[0], (int )mb[1]); } else { tmp___0 = qla2x00_get_link_speed_str(ha, (int )fcport->fp_speed); ql_dbg(268435456U, vha, 8197, "iIDMA adjusted to %s GB/s on %8phN.\n", tmp___0, (uint8_t *)(& fcport->port_name)); } return; } } static void qla2x00_reg_remote_port(scsi_qla_host_t *vha , fc_port_t *fcport ) { struct fc_rport_identifiers rport_ids ; struct fc_rport *rport ; unsigned long flags ; raw_spinlock_t *tmp ; { rport_ids.node_name = wwn_to_u64((u8 *)(& fcport->node_name)); rport_ids.port_name = wwn_to_u64((u8 *)(& fcport->port_name)); rport_ids.port_id = (u32 )((((int )fcport->d_id.b.domain << 16) | ((int )fcport->d_id.b.area << 8)) | (int )fcport->d_id.b.al_pa); rport_ids.roles = 0U; rport = fc_remote_port_add(vha->host, 0, & rport_ids); fcport->rport = rport; if ((unsigned long )rport == (unsigned long )((struct fc_rport *)0)) { ql_log(1U, vha, 8198, "Unable to allocate fc remote port.\n"); return; } else { } qlt_fc_port_added(vha, fcport); tmp = spinlock_check(((fcport->vha)->host)->host_lock); flags = _raw_spin_lock_irqsave(tmp); *((fc_port_t **)rport->dd_data) = fcport; spin_unlock_irqrestore(((fcport->vha)->host)->host_lock, flags); rport->supported_classes = fcport->supported_classes; rport_ids.roles = 0U; if ((unsigned int )fcport->port_type == 4U) { rport_ids.roles = rport_ids.roles | 2U; } else { } if ((unsigned int )fcport->port_type == 5U) { rport_ids.roles = rport_ids.roles | 1U; } else { } fc_remote_port_rolechg(rport, rport_ids.roles); return; } } void qla2x00_update_fcport(scsi_qla_host_t *vha , fc_port_t *fcport ) { { fcport->vha = vha; if (((vha->hw)->device_type & 131072U) != 0U) { qla2x00_set_fcport_state___0(fcport, 4); qla2x00_reg_remote_port(vha, fcport); return; } else { } fcport->login_retry = 0; fcport->flags = fcport->flags & 4294967285U; qla2x00_set_fcport_state___0(fcport, 4); qla2x00_iidma_fcport(vha, fcport); qla24xx_update_fcport_fcp_prio(vha, fcport); qla2x00_reg_remote_port(vha, fcport); return; } } static int qla2x00_configure_fabric(scsi_qla_host_t *vha ) { int rval ; fc_port_t *fcport ; fc_port_t *fcptemp ; uint16_t next_loopid ; uint16_t mb[32U] ; uint16_t loop_id ; struct list_head new_fcports ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int tmp___6 ; int tmp___7 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; int tmp___8 ; int tmp___9 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; struct list_head const *__mptr___6 ; int tmp___10 ; int tmp___11 ; struct list_head const *__mptr___7 ; struct list_head const *__mptr___8 ; struct list_head const *__mptr___9 ; struct list_head const *__mptr___10 ; { new_fcports.next = & new_fcports; new_fcports.prev = & new_fcports; ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; if ((ha->device_type & 134217728U) != 0U) { loop_id = 2046U; } else { loop_id = 126U; } rval = qla2x00_get_port_name(vha, (int )loop_id, (uint8_t *)(& vha->fabric_node_name), 1); if (rval != 0) { ql_dbg(268435456U, vha, 8223, "MBX_GET_PORT_NAME failed, No FL Port.\n"); vha->device_flags = vha->device_flags & 4294967294U; return (0); } else { } vha->device_flags = vha->device_flags | 1U; if (ql2xfdmienable != 0) { tmp___0 = test_and_clear_bit(12L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { qla2x00_fdmi_register(vha); } else { } } else { } if ((ha->device_type & 134217728U) != 0U) { loop_id = 2044U; } else { loop_id = 128U; } rval = (*((ha->isp_ops)->fabric_login))(vha, (int )loop_id, 255, 255, 252, (uint16_t *)(& mb), 3); if (rval != 0) { set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); return (rval); } else { } if ((unsigned int )mb[0] != 16384U) { ql_dbg(268435456U, vha, 8258, "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", (int )loop_id, (int )mb[0], (int )mb[1], (int )mb[2], (int )mb[6], (int )mb[7]); return (0); } else { } tmp___5 = test_and_clear_bit(9L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___5 != 0) { tmp___1 = qla2x00_rft_id(vha); if (tmp___1 != 0) { ql_dbg(268435456U, vha, 8261, "Register FC-4 TYPE failed.\n"); } else { } tmp___2 = qla2x00_rff_id(vha); if (tmp___2 != 0) { ql_dbg(268435456U, vha, 8265, "Register FC-4 Features failed.\n"); } else { } tmp___4 = qla2x00_rnn_id(vha); if (tmp___4 != 0) { ql_dbg(268435456U, vha, 8271, "Register Node Name failed.\n"); } else { tmp___3 = qla2x00_rsnn_nn(vha); if (tmp___3 != 0) { ql_dbg(268435456U, vha, 8275, "Register Symobilic Node Name failed.\n"); } else { } } } else { } __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_66597; ldv_66596: fcport->scan_state = 1U; __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_66597: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66596; } else { } rval = qla2x00_find_all_fabric_devs(vha, & new_fcports); if (rval != 0) { goto ldv_66599; } else { } __mptr___1 = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr___1; goto ldv_66607; ldv_66606: tmp___6 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___6 != 0) { goto ldv_66604; } else { } if ((fcport->flags & 1U) == 0U) { goto ldv_66605; } else { } if ((unsigned int )fcport->scan_state == 1U) { tmp___7 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___7 == 4) { qla2x00_mark_device_lost(vha, fcport, ql2xplogiabsentdevice, 0); if ((((unsigned int )fcport->loop_id != 4096U && (fcport->flags & 4U) == 0U) && (unsigned int )fcport->port_type != 4U) && (unsigned int )fcport->port_type != 3U) { (*((ha->isp_ops)->fabric_logout))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); qla2x00_clear_loop_id(fcport); } else { } } else { } } else { } ldv_66605: __mptr___2 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___2; ldv_66607: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66606; } else { } ldv_66604: next_loopid = ha->min_external_loopid; __mptr___3 = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr___3; goto ldv_66615; ldv_66614: tmp___8 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___8 != 0) { goto ldv_66612; } else { tmp___9 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___9 != 0) { goto ldv_66612; } else { } } if ((fcport->flags & 1U) == 0U || (fcport->flags & 2U) == 0U) { goto ldv_66613; } else { } if ((unsigned int )fcport->loop_id == 4096U) { fcport->loop_id = next_loopid; rval = qla2x00_find_new_loop_id(base_vha, fcport); if (rval != 0) { goto ldv_66612; } else { } } else { } qla2x00_fabric_dev_login(vha, fcport, & next_loopid); ldv_66613: __mptr___4 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___4; ldv_66615: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66614; } else { } ldv_66612: ; if (rval != 0) { goto ldv_66599; } else { } __mptr___5 = (struct list_head const *)new_fcports.next; fcport = (fc_port_t *)__mptr___5; __mptr___6 = (struct list_head const *)fcport->list.next; fcptemp = (fc_port_t *)__mptr___6; goto ldv_66624; ldv_66623: tmp___10 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___10 != 0) { goto ldv_66622; } else { tmp___11 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___11 != 0) { goto ldv_66622; } else { } } fcport->loop_id = next_loopid; rval = qla2x00_find_new_loop_id(base_vha, fcport); if (rval != 0) { goto ldv_66622; } else { } qla2x00_fabric_dev_login(vha, fcport, & next_loopid); list_move_tail(& fcport->list, & vha->vp_fcports); fcport = fcptemp; __mptr___7 = (struct list_head const *)fcptemp->list.next; fcptemp = (fc_port_t *)__mptr___7; ldv_66624: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& new_fcports)) { goto ldv_66623; } else { } ldv_66622: ; ldv_66599: __mptr___8 = (struct list_head const *)new_fcports.next; fcport = (fc_port_t *)__mptr___8; __mptr___9 = (struct list_head const *)fcport->list.next; fcptemp = (fc_port_t *)__mptr___9; goto ldv_66632; ldv_66631: list_del(& fcport->list); kfree((void const *)fcport); fcport = fcptemp; __mptr___10 = (struct list_head const *)fcptemp->list.next; fcptemp = (fc_port_t *)__mptr___10; ldv_66632: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& new_fcports)) { goto ldv_66631; } else { } if (rval != 0) { ql_dbg(268435456U, vha, 8296, "Configure fabric error exit rval=%d.\n", rval); } else { } return (rval); } } static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha , struct list_head *new_fcports ) { int rval ; uint16_t loop_id ; fc_port_t *fcport ; fc_port_t *new_fcport ; fc_port_t *fcptemp ; int found ; sw_info_t *swl ; int swl_idx ; int first_dev ; int last_dev ; port_id_t wrap ; port_id_t nxt_d_id ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int tmp___9 ; struct list_head const *__mptr___2 ; int tmp___10 ; int tmp___11 ; struct list_head const *__mptr___3 ; { wrap.b24 = 0U; ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; rval = 0; if ((unsigned long )ha->swl == (unsigned long )((void *)0)) { ha->swl = kcalloc((size_t )ha->max_fibre_devices, 32UL, 208U); } else { } swl = (sw_info_t *)ha->swl; if ((unsigned long )swl == (unsigned long )((sw_info_t *)0)) { ql_dbg(268435456U, vha, 8276, "GID_PT allocations failed, fallback on GA_NXT.\n"); } else { memset((void *)swl, 0, (unsigned long )ha->max_fibre_devices * 32UL); tmp___3 = qla2x00_gid_pt(vha, swl); if (tmp___3 != 0) { swl = (sw_info_t *)0; } else { tmp___2 = qla2x00_gpn_id(vha, swl); if (tmp___2 != 0) { swl = (sw_info_t *)0; } else { tmp___1 = qla2x00_gnn_id(vha, swl); if (tmp___1 != 0) { swl = (sw_info_t *)0; } else if (ql2xiidmaenable != 0) { tmp___0 = qla2x00_gfpn_id(vha, swl); if (tmp___0 == 0) { qla2x00_gpsc(vha, swl); } else { } } else { } } } if ((unsigned long )swl != (unsigned long )((sw_info_t *)0)) { qla2x00_gff_id(vha, swl); } else { } } swl_idx = 0; new_fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )new_fcport == (unsigned long )((fc_port_t *)0)) { ql_log(1U, vha, 8286, "Failed to allocate memory for fcport.\n"); return (259); } else { } new_fcport->flags = new_fcport->flags | 3U; first_dev = 1; last_dev = 0; loop_id = ha->min_external_loopid; goto ldv_66672; ldv_66671: tmp___4 = qla2x00_is_reserved_id(vha, (int )loop_id); if (tmp___4 != 0) { goto ldv_66652; } else { } if ((unsigned int )ha->current_topology == 4U) { tmp___5 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___5 != 0) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_66653; } else { tmp___6 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___6 != 0) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_66653; } else { tmp___7 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___7 != 0) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_66653; } else { tmp___8 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___8 == 2) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_66653; } else { } } } } } else { } if ((unsigned long )swl != (unsigned long )((sw_info_t *)0)) { if (last_dev != 0) { wrap.b24 = new_fcport->d_id.b24; } else { new_fcport->d_id.b24 = (swl + (unsigned long )swl_idx)->d_id.b24; memcpy((void *)(& new_fcport->node_name), (void const *)(& (swl + (unsigned long )swl_idx)->node_name), 8UL); memcpy((void *)(& new_fcport->port_name), (void const *)(& (swl + (unsigned long )swl_idx)->port_name), 8UL); memcpy((void *)(& new_fcport->fabric_port_name), (void const *)(& (swl + (unsigned long )swl_idx)->fabric_port_name), 8UL); new_fcport->fp_speed = (swl + (unsigned long )swl_idx)->fp_speed; new_fcport->fc4_type = (swl + (unsigned long )swl_idx)->fc4_type; if ((unsigned int )(swl + (unsigned long )swl_idx)->d_id.b.rsvd_1 != 0U) { last_dev = 1; } else { } swl_idx = swl_idx + 1; } } else { rval = qla2x00_ga_nxt(vha, new_fcport); if (rval != 0) { ql_log(1U, vha, 8292, "SNS scan failed -- assuming zero-entry result.\n"); __mptr = (struct list_head const *)new_fcports->next; fcport = (fc_port_t *)__mptr; __mptr___0 = (struct list_head const *)fcport->list.next; fcptemp = (fc_port_t *)__mptr___0; goto ldv_66661; ldv_66660: list_del(& fcport->list); kfree((void const *)fcport); fcport = fcptemp; __mptr___1 = (struct list_head const *)fcptemp->list.next; fcptemp = (fc_port_t *)__mptr___1; ldv_66661: ; if ((unsigned long )(& fcport->list) != (unsigned long )new_fcports) { goto ldv_66660; } else { } rval = 0; goto ldv_66653; } else { } } if (first_dev != 0) { wrap.b24 = new_fcport->d_id.b24; first_dev = 0; } else if (new_fcport->d_id.b24 == wrap.b24) { ql_dbg(268435456U, vha, 8293, "Device wrap (%02x%02x%02x).\n", (int )new_fcport->d_id.b.domain, (int )new_fcport->d_id.b.area, (int )new_fcport->d_id.b.al_pa); goto ldv_66653; } else { } if (new_fcport->d_id.b24 == base_vha->d_id.b24) { goto ldv_66652; } else { } tmp___9 = qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24); if (tmp___9 != 0) { goto ldv_66652; } else { } if ((((int )new_fcport->d_id.b24 ^ (int )vha->d_id.b24) & 16776960) == 0 && (unsigned int )ha->current_topology == 4U) { goto ldv_66652; } else { } if (((int )new_fcport->d_id.b.domain & 240) == 240) { goto ldv_66652; } else { } if (ql2xgffidenable != 0 && ((unsigned int )new_fcport->fc4_type != 8U && (unsigned int )new_fcport->fc4_type != 255U)) { goto ldv_66652; } else { } found = 0; __mptr___2 = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr___2; goto ldv_66670; ldv_66669: tmp___10 = memcmp((void const *)(& new_fcport->port_name), (void const *)(& fcport->port_name), 8UL); if (tmp___10 != 0) { goto ldv_66667; } else { } fcport->scan_state = 2U; found = found + 1; memcpy((void *)(& fcport->fabric_port_name), (void const *)(& new_fcport->fabric_port_name), 8UL); fcport->fp_speed = new_fcport->fp_speed; if (fcport->d_id.b24 == new_fcport->d_id.b24) { tmp___11 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___11 == 4) { goto ldv_66668; } else { } } else { } if ((fcport->flags & 1U) == 0U) { fcport->d_id.b24 = new_fcport->d_id.b24; qla2x00_clear_loop_id(fcport); fcport->flags = fcport->flags | 3U; goto ldv_66668; } else { } fcport->d_id.b24 = new_fcport->d_id.b24; fcport->flags = fcport->flags | 2U; if (((((unsigned int )fcport->loop_id != 4096U && (fcport->flags & 4U) == 0U) && (fcport->flags & 8U) == 0U) && (unsigned int )fcport->port_type != 4U) && (unsigned int )fcport->port_type != 3U) { (*((ha->isp_ops)->fabric_logout))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); qla2x00_clear_loop_id(fcport); } else { } goto ldv_66668; ldv_66667: __mptr___3 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___3; ldv_66670: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66669; } else { } ldv_66668: ; if (found != 0) { goto ldv_66652; } else { } list_add_tail(& new_fcport->list, new_fcports); nxt_d_id.b24 = new_fcport->d_id.b24; new_fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )new_fcport == (unsigned long )((fc_port_t *)0)) { ql_log(1U, vha, 8294, "Memory allocation failed for fcport.\n"); return (259); } else { } new_fcport->flags = new_fcport->flags | 3U; new_fcport->d_id.b24 = nxt_d_id.b24; ldv_66652: loop_id = (uint16_t )((int )loop_id + 1); ldv_66672: ; if ((int )ha->max_loop_id >= (int )loop_id) { goto ldv_66671; } else { } ldv_66653: kfree((void const *)new_fcport); return (rval); } } int qla2x00_find_new_loop_id(scsi_qla_host_t *vha , fc_port_t *dev ) { int rval ; struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; unsigned long tmp___0 ; int tmp___1 ; { ha = vha->hw; flags = 0UL; rval = 0; tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = find_first_zero_bit((unsigned long const *)ha->loop_id_map, (unsigned long )ha->max_fibre_devices); dev->loop_id = (uint16_t )tmp___0; if ((int )dev->loop_id >= (int )ha->max_fibre_devices) { dev->loop_id = 4096U; rval = 258; } else { tmp___1 = qla2x00_is_reserved_id(vha, (int )dev->loop_id); if (tmp___1 != 0) { dev->loop_id = 4096U; rval = 258; } else { set_bit((long )dev->loop_id, (unsigned long volatile *)ha->loop_id_map); } } spin_unlock_irqrestore(& ha->vport_slock, flags); if (rval == 0) { ql_dbg(268435456U, dev->vha, 8326, "Assigning new loopid=%x, portid=%x.\n", (int )dev->loop_id, (int )dev->d_id.b24); } else { ql_log(1U, dev->vha, 8327, "No loop_id\'s available, portid=%x.\n", (int )dev->d_id.b24); } return (rval); } } static int qla2x00_fabric_dev_login(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t *next_loopid ) { int rval ; int retry ; uint8_t opts ; struct qla_hw_data *ha ; { ha = vha->hw; rval = 0; retry = 0; if ((((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) || (ha->device_type & 134217728U) != 0U) { if ((fcport->flags & 8U) != 0U) { return (rval); } else { } fcport->flags = fcport->flags | 8U; rval = qla2x00_post_async_login_work(vha, fcport, (uint16_t *)0U); if (rval == 0) { return (rval); } else { } } else { } fcport->flags = fcport->flags & 4294967287U; rval = qla2x00_fabric_login(vha, fcport, next_loopid); if (rval == 0) { opts = 0U; if ((fcport->flags & 4U) != 0U) { opts = (uint8_t )((unsigned int )opts | 2U); } else { } rval = qla2x00_get_port_database(vha, fcport, (int )opts); if (rval != 0) { (*((ha->isp_ops)->fabric_logout))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); qla2x00_mark_device_lost(vha, fcport, 1, 0); } else { qla2x00_update_fcport(vha, fcport); } } else { qla2x00_mark_device_lost(vha, fcport, 1, 0); } return (rval); } } int qla2x00_fabric_login(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t *next_loopid ) { int rval ; int retry ; uint16_t tmp_loopid ; uint16_t mb[32U] ; struct qla_hw_data *ha ; { ha = vha->hw; retry = 0; tmp_loopid = 0U; ldv_66703: ql_dbg(268435456U, vha, 8192, "Trying Fabric Login w/loop id 0x%04x for port %02x%02x%02x.\n", (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); rval = (*((ha->isp_ops)->fabric_login))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (uint16_t *)(& mb), 1); if (rval != 0) { return (rval); } else { } if ((unsigned int )mb[0] == 16391U) { retry = retry + 1; tmp_loopid = fcport->loop_id; fcport->loop_id = mb[1]; ql_dbg(268435456U, vha, 8193, "Fabric Login: port in use - next loop id=0x%04x, port id= %02x%02x%02x.\n", (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else if ((unsigned int )mb[0] == 16384U) { if (retry != 0) { *next_loopid = tmp_loopid; } else { *next_loopid = (unsigned int )fcport->loop_id + 1U; } if ((int )mb[1] & 1) { fcport->port_type = 4; } else { fcport->port_type = 5; if (((int )mb[1] & 2) != 0) { fcport->flags = fcport->flags | 4U; } else { } } if ((int )mb[10] & 1) { fcport->supported_classes = fcport->supported_classes | 4U; } else { } if (((int )mb[10] & 2) != 0) { fcport->supported_classes = fcport->supported_classes | 8U; } else { } if ((ha->device_type & 134217728U) != 0U) { if (((int )mb[10] & 128) != 0) { fcport->flags = fcport->flags | 16U; } else { } } else { } rval = 0; goto ldv_66702; } else if ((unsigned int )mb[0] == 16392U) { fcport->loop_id = (uint16_t )((int )fcport->loop_id + 1); rval = qla2x00_find_new_loop_id(vha, fcport); if (rval != 0) { goto ldv_66702; } else { } } else if ((unsigned int )mb[0] == 16389U) { *next_loopid = fcport->loop_id; (*((ha->isp_ops)->fabric_logout))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); qla2x00_mark_device_lost(vha, fcport, 1, 0); rval = 1; goto ldv_66702; } else { ql_dbg(268435456U, vha, 8194, "Failed=%x port_id=%02x%02x%02x loop_id=%x jiffies=%lx.\n", (int )mb[0], (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )fcport->loop_id, jiffies); *next_loopid = fcport->loop_id; (*((ha->isp_ops)->fabric_logout))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); qla2x00_clear_loop_id(fcport); fcport->login_retry = 0; rval = 3; goto ldv_66702; } goto ldv_66703; ldv_66702: ; return (rval); } } int qla2x00_local_device_login(scsi_qla_host_t *vha , fc_port_t *fcport ) { int rval ; uint16_t mb[32U] ; { memset((void *)(& mb), 0, 64UL); rval = qla2x00_login_local_device(vha, fcport, (uint16_t *)(& mb), 1); if (rval == 0) { if ((unsigned int )mb[0] == 16389U) { rval = 1; } else if ((unsigned int )mb[0] == 16390U) { rval = 3; } else { } } else { } return (rval); } } int qla2x00_loop_resync(scsi_qla_host_t *vha ) { int rval ; uint32_t wait_time ; struct req_que *req ; struct rsp_que *rsp ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { rval = 0; if (*((unsigned long *)vha->hw + 2UL) != 0UL) { req = *((vha->hw)->req_q_map); } else { req = vha->req; } rsp = req->rsp; clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); if (*((unsigned long *)vha + 19UL) != 0UL) { rval = qla2x00_fw_ready(vha); if (rval == 0) { wait_time = 256U; ldv_66717: ; if (((vha->hw)->device_type & 131072U) == 0U) { qla2x00_marker(vha, req, rsp, 0, 0ULL, 2); vha->marker_needed = 0U; } else { } clear_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); if (((vha->hw)->device_type & 131072U) != 0U) { qlafx00_configure_devices(vha); } else { qla2x00_configure_loop(vha); } wait_time = wait_time - 1U; tmp = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp == 0) { tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 == 0) { if (wait_time != 0U) { tmp___1 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { goto ldv_66717; } else { goto ldv_66718; } } else { goto ldv_66718; } } else { goto ldv_66718; } } else { } ldv_66718: ; } else { } } else { } tmp___2 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 != 0) { return (258); } else { } if (rval != 0) { ql_dbg(268435456U, vha, 8300, "%s *** FAILED ***.\n", "qla2x00_loop_resync"); } else { } return (rval); } } int qla2x00_perform_loop_resync(scsi_qla_host_t *ha ) { int32_t rval ; int tmp ; { rval = 0; tmp = test_and_set_bit(5L, (unsigned long volatile *)(& ha->dpc_flags)); if (tmp == 0) { atomic_set(& ha->loop_down_timer, 0); if ((ha->device_flags & 2U) == 0U) { atomic_set(& ha->loop_state, 3); set_bit(6L, (unsigned long volatile *)(& ha->dpc_flags)); set_bit(9L, (unsigned long volatile *)(& ha->dpc_flags)); set_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); rval = qla2x00_loop_resync(ha); } else { atomic_set(& ha->loop_state, 6); } clear_bit(5L, (unsigned long volatile *)(& ha->dpc_flags)); } else { } return (rval); } } void qla2x00_update_fcports(scsi_qla_host_t *base_vha ) { fc_port_t *fcport ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; raw_spinlock_t *tmp___0 ; int tmp___1 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; { ha = base_vha->hw; tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)(base_vha->hw)->vp_list.next; vha = (struct scsi_qla_host *)__mptr; goto ldv_66749; ldv_66748: atomic_inc(& vha->vref_count); __mptr___0 = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr___0; goto ldv_66746; ldv_66745: ; if ((unsigned long )fcport->drport != (unsigned long )((struct fc_rport *)0)) { tmp___1 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___1 != 1) { spin_unlock_irqrestore(& ha->vport_slock, flags); qla2x00_rport_del((void *)fcport); tmp___0 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } } else { } __mptr___1 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___1; ldv_66746: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66745; } else { } atomic_dec(& vha->vref_count); __mptr___2 = (struct list_head const *)vha->list.next; vha = (struct scsi_qla_host *)__mptr___2; ldv_66749: ; if ((unsigned long )(& vha->list) != (unsigned long )(& (base_vha->hw)->vp_list)) { goto ldv_66748; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); return; } } void qla83xx_reset_ownership(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t drv_presence ; uint32_t drv_presence_mask ; uint32_t dev_part_info1 ; uint32_t dev_part_info2 ; uint32_t class_type ; uint32_t class_type_mask ; uint16_t fcoe_other_function ; uint16_t i ; int tmp ; int tmp___0 ; int tmp___1 ; { ha = vha->hw; class_type_mask = 3U; fcoe_other_function = 65535U; if ((ha->device_type & 262144U) != 0U) { tmp = qla8044_rd_direct(vha, 3U); drv_presence = (uint32_t )tmp; tmp___0 = qla8044_rd_direct(vha, 7U); dev_part_info1 = (uint32_t )tmp___0; tmp___1 = qla8044_rd_direct(vha, 14308U); dev_part_info2 = (uint32_t )tmp___1; } else { qla83xx_rd_reg(vha, 571483016U, & drv_presence); qla83xx_rd_reg(vha, 571483104U, & dev_part_info1); qla83xx_rd_reg(vha, 571483108U, & dev_part_info2); } i = 0U; goto ldv_66765; ldv_66764: class_type = (dev_part_info1 >> (int )i * 4) & class_type_mask; if (class_type == 2U && (int )ha->portnum != (int )i) { fcoe_other_function = i; goto ldv_66763; } else { } i = (uint16_t )((int )i + 1); ldv_66765: ; if ((unsigned int )i <= 7U) { goto ldv_66764; } else { } ldv_66763: ; if ((unsigned int )fcoe_other_function == 65535U) { i = 0U; goto ldv_66768; ldv_66767: class_type = (dev_part_info2 >> (int )i * 4) & class_type_mask; if (class_type == 2U && (int )i + 8 != (int )ha->portnum) { fcoe_other_function = (unsigned int )i + 8U; goto ldv_66766; } else { } i = (uint16_t )((int )i + 1); ldv_66768: ; if ((unsigned int )i <= 7U) { goto ldv_66767; } else { } ldv_66766: ; } else { } drv_presence_mask = (uint32_t )(~ ((1 << (int )ha->portnum) | ((unsigned int )fcoe_other_function != 65535U ? 1 << (int )fcoe_other_function : 0))); if ((drv_presence & drv_presence_mask) == 0U && (int )ha->portnum < (int )fcoe_other_function) { ql_dbg(524288U, vha, 45183, "This host is Reset owner.\n"); ha->flags.nic_core_reset_owner = 1U; } else { } return; } } static int __qla83xx_set_drv_ack(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; uint32_t drv_ack ; { rval = 0; ha = vha->hw; rval = qla83xx_rd_reg(vha, 571483020U, & drv_ack); if (rval == 0) { drv_ack = (uint32_t )(1 << (int )ha->portnum) | drv_ack; rval = qla83xx_wr_reg(vha, 571483020U, drv_ack); } else { } return (rval); } } static int __qla83xx_clear_drv_ack(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; uint32_t drv_ack ; { rval = 0; ha = vha->hw; rval = qla83xx_rd_reg(vha, 571483020U, & drv_ack); if (rval == 0) { drv_ack = (uint32_t )(~ (1 << (int )ha->portnum)) & drv_ack; rval = qla83xx_wr_reg(vha, 571483020U, drv_ack); } else { } return (rval); } } static char const *qla83xx_dev_state_to_string(uint32_t dev_state ) { { switch (dev_state) { case 1U: ; return ("COLD/RE-INIT"); case 2U: ; return ("INITIALIZING"); case 3U: ; return ("READY"); case 4U: ; return ("NEED RESET"); case 5U: ; return ("NEED QUIESCENT"); case 6U: ; return ("FAILED"); case 7U: ; return ("QUIESCENT"); default: ; return ("Unknown"); } } } void qla83xx_idc_audit(scsi_qla_host_t *vha , int audit_type ) { struct qla_hw_data *ha ; uint32_t idc_audit_reg ; uint32_t duration_secs ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; { ha = vha->hw; idc_audit_reg = 0U; duration_secs = 0U; switch (audit_type) { case 0: tmp = jiffies_to_msecs(jiffies); ha->idc_audit_ts = tmp / 1000U; idc_audit_reg = (uint32_t )ha->portnum | (ha->idc_audit_ts << 8); qla83xx_wr_reg(vha, 571483028U, idc_audit_reg); goto ldv_66800; case 1: tmp___0 = jiffies_to_msecs(jiffies); tmp___1 = jiffies_to_msecs((unsigned long const )ha->idc_audit_ts); duration_secs = (tmp___0 - tmp___1) / 1000U; idc_audit_reg = ((unsigned int )ha->portnum | 128U) | (duration_secs << 8); qla83xx_wr_reg(vha, 571483028U, idc_audit_reg); goto ldv_66800; default: ql_log(1U, vha, 45176, "Invalid audit type specified.\n"); goto ldv_66800; } ldv_66800: ; return; } } static int qla83xx_initiating_reset(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t idc_control ; uint32_t dev_state ; char const *state ; char const *tmp ; { ha = vha->hw; __qla83xx_get_idc_control(vha, & idc_control); if ((int )idc_control & 1) { ql_log(2U, vha, 45184, "NIC Core reset has been disabled. idc-control=0x%x\n", idc_control); return (258); } else { } qla83xx_rd_reg(vha, 571483012U, & dev_state); if (*((unsigned long *)ha + 2UL) != 0UL && dev_state == 3U) { qla83xx_wr_reg(vha, 571483012U, 4U); ql_log(2U, vha, 45142, "HW State: NEED RESET.\n"); qla83xx_idc_audit(vha, 0); } else { tmp = qla83xx_dev_state_to_string(dev_state); state = tmp; ql_log(2U, vha, 45143, "HW State: %s.\n", state); goto ldv_66811; ldv_66810: qla83xx_idc_unlock(vha, 0); msleep(200U); qla83xx_idc_lock(vha, 0); qla83xx_rd_reg(vha, 571483012U, & dev_state); ldv_66811: ; if (dev_state == 3U) { goto ldv_66810; } else { } } __qla83xx_set_drv_ack(vha); return (0); } } int __qla83xx_set_idc_control(scsi_qla_host_t *vha , uint32_t idc_control ) { int tmp ; { tmp = qla83xx_wr_reg(vha, 571483024U, idc_control); return (tmp); } } int __qla83xx_get_idc_control(scsi_qla_host_t *vha , uint32_t *idc_control ) { int tmp ; { tmp = qla83xx_rd_reg(vha, 571483024U, idc_control); return (tmp); } } static int qla83xx_check_driver_presence(scsi_qla_host_t *vha ) { uint32_t drv_presence ; struct qla_hw_data *ha ; { drv_presence = 0U; ha = vha->hw; qla83xx_rd_reg(vha, 571483016U, & drv_presence); if (((uint32_t )(1 << (int )ha->portnum) & drv_presence) != 0U) { return (0); } else { return (3); } } } int qla83xx_nic_core_reset(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; int tmp ; { rval = 0; ha = vha->hw; ql_dbg(524288U, vha, 45144, "Entered %s().\n", "qla83xx_nic_core_reset"); if ((vha->device_flags & 32U) != 0U) { ql_log(1U, vha, 45145, "Device in unrecoverable FAILED state.\n"); return (258); } else { } qla83xx_idc_lock(vha, 0); tmp = qla83xx_check_driver_presence(vha); if (tmp != 0) { ql_log(1U, vha, 45146, "Function=0x%x has been removed from IDC participation.\n", (int )ha->portnum); rval = 258; goto exit; } else { } qla83xx_reset_ownership(vha); rval = qla83xx_initiating_reset(vha); if (rval == 0) { rval = qla83xx_idc_state_handler(vha); if (rval == 0) { ha->flags.nic_core_hung = 0U; } else { } __qla83xx_clear_drv_ack(vha); } else { } exit: qla83xx_idc_unlock(vha, 0); ql_dbg(524288U, vha, 45147, "Exiting %s.\n", "qla83xx_nic_core_reset"); return (rval); } } int qla2xxx_mctp_dump(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int rval ; { ha = vha->hw; rval = 258; if ((ha->device_type & 32768U) == 0U || ((int )ha->fw_attributes_ext[0] & 1) == 0) { ql_log(2U, vha, 20589, "This board is not MCTP capable\n"); return (rval); } else { } if ((unsigned long )ha->mctp_dump == (unsigned long )((void *)0)) { ha->mctp_dump = dma_alloc_attrs(& (ha->pdev)->dev, 548964UL, & ha->mctp_dump_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )ha->mctp_dump == (unsigned long )((void *)0)) { ql_log(1U, vha, 20590, "Failed to allocate memory for mctp dump\n"); return (rval); } else { } } else { } rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, 0U, 137241U); if (rval != 0) { ql_log(1U, vha, 20591, "Failed to capture mctp dump\n"); } else { ql_log(2U, vha, 20592, "Mctp dump capture for host (%ld/%p).\n", vha->host_no, ha->mctp_dump); ha->mctp_dumped = 1; } if (*((unsigned long *)ha + 2UL) == 0UL && (unsigned int )ha->portnum == 0U) { ha->flags.nic_core_reset_hdlr_active = 1U; rval = qla83xx_restart_nic_firmware(vha); if (rval != 0) { ql_log(1U, vha, 20593, "Failed to restart nic firmware\n"); } else { ql_dbg(524288U, vha, 45188, "Restarted NIC firmware successfully.\n"); } ha->flags.nic_core_reset_hdlr_active = 0U; } else { } return (rval); } } void qla2x00_quiesce_io(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct scsi_qla_host *vp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp ; int tmp___0 ; { ha = vha->hw; ql_dbg(67108864U, vha, 16413, "Quiescing I/O - ha=%p.\n", ha); atomic_set(& ha->loop_down_timer, 255); tmp___0 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___0 != 2) { atomic_set(& vha->loop_state, 2); qla2x00_mark_all_devices_lost(vha, 0); __mptr = (struct list_head const *)ha->vp_list.next; vp = (struct scsi_qla_host *)__mptr; goto ldv_66848; ldv_66847: qla2x00_mark_all_devices_lost(vp, 0); __mptr___0 = (struct list_head const *)vp->list.next; vp = (struct scsi_qla_host *)__mptr___0; ldv_66848: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_66847; } else { } } else { tmp = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp == 0) { atomic_set(& vha->loop_down_timer, 255); } else { } } qla2x00_eh_wait_for_pending_commands(vha, 0U, 0ULL, 0); return; } } void qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct scsi_qla_host *vp ; unsigned long flags ; fc_port_t *fcport ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; raw_spinlock_t *tmp___0 ; struct list_head const *__mptr___0 ; int tmp___1 ; int tmp___2 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; raw_spinlock_t *tmp___3 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; raw_spinlock_t *tmp___4 ; struct list_head const *__mptr___6 ; { ha = vha->hw; if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { vha->flags.online = 0U; } else { } ha->flags.chip_reset_done = 0U; clear_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); vha->qla_stats.total_isp_aborts = vha->qla_stats.total_isp_aborts + 1U; ql_log(2U, vha, 175, "Performing ISP error recovery - ha=%p.\n", ha); if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { (*((ha->isp_ops)->reset_chip))(vha); } else { } atomic_set(& vha->loop_down_timer, 255); tmp___2 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___2 != 2) { atomic_set(& vha->loop_state, 2); qla2x00_mark_all_devices_lost(vha, 0); tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)ha->vp_list.next; vp = (struct scsi_qla_host *)__mptr; goto ldv_66868; ldv_66867: atomic_inc(& vp->vref_count); spin_unlock_irqrestore(& ha->vport_slock, flags); qla2x00_mark_all_devices_lost(vp, 0); tmp___0 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___0); atomic_dec(& vp->vref_count); __mptr___0 = (struct list_head const *)vp->list.next; vp = (struct scsi_qla_host *)__mptr___0; ldv_66868: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_66867; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); } else { tmp___1 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___1 == 0) { atomic_set(& vha->loop_down_timer, 255); } else { } } __mptr___1 = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr___1; goto ldv_66875; ldv_66874: fcport->flags = fcport->flags & 4294967285U; __mptr___2 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___2; ldv_66875: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66874; } else { } tmp___3 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___3); __mptr___3 = (struct list_head const *)ha->vp_list.next; vp = (struct scsi_qla_host *)__mptr___3; goto ldv_66895; ldv_66894: atomic_inc(& vp->vref_count); spin_unlock_irqrestore(& ha->vport_slock, flags); __mptr___4 = (struct list_head const *)vp->vp_fcports.next; fcport = (fc_port_t *)__mptr___4; goto ldv_66889; ldv_66888: fcport->flags = fcport->flags & 4294967285U; __mptr___5 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___5; ldv_66889: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vp->vp_fcports)) { goto ldv_66888; } else { } tmp___4 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___4); atomic_dec(& vp->vref_count); __mptr___6 = (struct list_head const *)vp->list.next; vp = (struct scsi_qla_host *)__mptr___6; ldv_66895: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_66894; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); if (*((unsigned long *)ha + 2UL) == 0UL) { if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { qla82xx_chip_reset_cleanup(vha); ql_log(2U, vha, 180, "Done chip reset cleanup.\n"); vha->flags.online = 0U; } else { } qla2x00_abort_all_cmds(vha, 524288); } else { } ha->chip_reset = ha->chip_reset + 1U; __asm__ volatile ("sfence": : : "memory"); return; } } int qla2x00_abort_isp(scsi_qla_host_t *vha ) { int rval ; uint8_t status ; struct qla_hw_data *ha ; struct scsi_qla_host *vp ; struct req_que *req ; unsigned long flags ; int tmp ; int tmp___0 ; long tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; raw_spinlock_t *tmp___5 ; struct list_head const *__mptr ; raw_spinlock_t *tmp___6 ; struct list_head const *__mptr___0 ; int tmp___7 ; { status = 0U; ha = vha->hw; req = *(ha->req_q_map); if (*((unsigned long *)vha + 19UL) != 0UL) { qla2x00_abort_isp_cleanup(vha); if ((ha->device_type & 65536U) != 0U) { ql_dbg(524288U, vha, 45148, "Clearing fcoe driver presence.\n"); tmp = qla83xx_clear_drv_presence(vha); if (tmp != 0) { ql_dbg(524288U, vha, 45171, "Error while clearing DRV-Presence.\n"); } else { } } else { } tmp___0 = pci_channel_offline(ha->pdev); tmp___1 = ldv__builtin_expect((long )(tmp___0 != 0 && *((unsigned long *)ha + 2UL) != 0UL), 0L); if (tmp___1 != 0L) { clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); status = 0U; return ((int )status); } else { } (*((ha->isp_ops)->get_flash_version))(vha, (void *)req->ring); (*((ha->isp_ops)->nvram_config))(vha); tmp___4 = qla2x00_restart_isp(vha); if (tmp___4 == 0) { clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); tmp___2 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___2 == 0) { vha->marker_needed = 1U; } else { } vha->flags.online = 1U; (*((ha->isp_ops)->enable_intrs))(ha); ha->isp_abort_cnt = 0U; clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); if ((ha->device_type & 8192U) != 0U || (ha->device_type & 65536U) != 0U) { qla2x00_get_fw_version(vha); } else { } if ((unsigned long )ha->fce != (unsigned long )((void *)0)) { ha->flags.fce_enabled = 1U; memset(ha->fce, 0, (size_t )(ha->fce_bufs * 1024U)); rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, (int )((uint16_t )ha->fce_bufs), (uint16_t *)(& ha->fce_mb), & ha->fce_bufs); if (rval != 0) { ql_log(1U, vha, 32819, "Unable to reinitialize FCE (%d).\n", rval); ha->flags.fce_enabled = 0U; } else { } } else { } if ((unsigned long )ha->eft != (unsigned long )((void *)0)) { memset(ha->eft, 0, 65536UL); rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, 4); if (rval != 0) { ql_log(1U, vha, 32820, "Unable to reinitialize EFT (%d).\n", rval); } else { } } else { } } else { vha->flags.online = 1U; tmp___3 = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___3 != 0) { if (ha->isp_abort_cnt == 0U) { ql_log(0U, vha, 32821, "ISP error recover failed - board disabled.\n"); (*((ha->isp_ops)->reset_adapter))(vha); vha->flags.online = 0U; clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); status = 0U; } else { ha->isp_abort_cnt = ha->isp_abort_cnt - 1U; ql_dbg(4194304U, vha, 32800, "ISP abort - retry remaining %d.\n", ha->isp_abort_cnt); status = 1U; } } else { ha->isp_abort_cnt = 5U; ql_dbg(4194304U, vha, 32801, "ISP error recovery - retrying (%d) more times.\n", ha->isp_abort_cnt); set_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); status = 1U; } } } else { } if ((unsigned int )status == 0U) { ql_dbg(4194304U, vha, 32802, "%s succeeded.\n", "qla2x00_abort_isp"); tmp___5 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___5); __mptr = (struct list_head const *)ha->vp_list.next; vp = (struct scsi_qla_host *)__mptr; goto ldv_66918; ldv_66917: ; if ((unsigned int )vp->vp_idx != 0U) { atomic_inc(& vp->vref_count); spin_unlock_irqrestore(& ha->vport_slock, flags); qla2x00_vp_abort_isp(vp); tmp___6 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___6); atomic_dec(& vp->vref_count); } else { } __mptr___0 = (struct list_head const *)vp->list.next; vp = (struct scsi_qla_host *)__mptr___0; ldv_66918: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_66917; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); if ((ha->device_type & 65536U) != 0U) { ql_dbg(524288U, vha, 45149, "Setting back fcoe driver presence.\n"); tmp___7 = qla83xx_set_drv_presence(vha); if (tmp___7 != 0) { ql_dbg(524288U, vha, 45172, "Error while setting DRV-Presence.\n"); } else { } } else { } } else { ql_log(1U, vha, 32803, "%s **** FAILED ****.\n", "qla2x00_abort_isp"); } return ((int )status); } } static int qla2x00_restart_isp(scsi_qla_host_t *vha ) { int status ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; unsigned long flags ; int tmp ; raw_spinlock_t *tmp___0 ; bool tmp___1 ; { status = 0; ha = vha->hw; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); tmp = qla2x00_isp_firmware(vha); if (tmp != 0) { vha->flags.online = 0U; status = (*((ha->isp_ops)->chip_diag))(vha); if (status == 0) { status = qla2x00_setup_chip(vha); } else { } } else { } if (status == 0) { status = qla2x00_init_rings(vha); if (status == 0) { clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); ha->flags.chip_reset_done = 1U; qla25xx_init_queues(ha); status = qla2x00_fw_ready(vha); if (status == 0) { qla2x00_marker(vha, req, rsp, 0, 0ULL, 2); vha->flags.online = 1U; tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); tmp___1 = qla_tgt_mode_enabled(vha); if ((int )tmp___1) { qlt_24xx_process_atio_queue(vha); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } if ((vha->device_flags & 2U) != 0U) { status = 0; } else { } } else { } } else { } return (status); } } static int qla25xx_init_queues(struct qla_hw_data *ha ) { struct rsp_que *rsp ; struct req_que *req ; struct scsi_qla_host *base_vha ; void *tmp ; int ret ; int i ; { rsp = (struct rsp_que *)0; req = (struct req_que *)0; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; ret = -1; i = 1; goto ldv_66941; ldv_66940: rsp = *(ha->rsp_q_map + (unsigned long )i); if ((unsigned long )rsp != (unsigned long )((struct rsp_que *)0)) { rsp->options = (unsigned int )rsp->options & 65534U; ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != 0) { ql_dbg(1073741824U, base_vha, 255, "%s Rsp que: %d init failed.\n", "qla25xx_init_queues", (int )rsp->id); } else { ql_dbg(1073741824U, base_vha, 256, "%s Rsp que: %d inited.\n", "qla25xx_init_queues", (int )rsp->id); } } else { } i = i + 1; ldv_66941: ; if ((int )ha->max_rsp_queues > i) { goto ldv_66940; } else { } i = 1; goto ldv_66944; ldv_66943: req = *(ha->req_q_map + (unsigned long )i); if ((unsigned long )req != (unsigned long )((struct req_que *)0)) { req->options = (unsigned int )req->options & 65534U; ret = qla25xx_init_req_que(base_vha, req); if (ret != 0) { ql_dbg(1073741824U, base_vha, 257, "%s Req que: %d init failed.\n", "qla25xx_init_queues", (int )req->id); } else { ql_dbg(1073741824U, base_vha, 258, "%s Req que: %d inited.\n", "qla25xx_init_queues", (int )req->id); } } else { } i = i + 1; ldv_66944: ; if ((int )ha->max_req_queues > i) { goto ldv_66943; } else { } return (ret); } } void qla2x00_reset_adapter(struct scsi_qla_host *vha ) { unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; { flags = 0UL; ha = vha->hw; reg = & (ha->iobase)->isp; vha->flags.online = 0U; (*((ha->isp_ops)->disable_intrs))(ha); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writew(4096, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); writew(12288, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } void qla24xx_reset_adapter(struct scsi_qla_host *vha ) { unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; raw_spinlock_t *tmp ; { flags = 0UL; ha = vha->hw; reg = & (ha->iobase)->isp24; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return; } else { } vha->flags.online = 0U; (*((ha->isp_ops)->disable_intrs))(ha); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writel(268435456U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); writel(1073741824U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((ha->device_type & 8192U) != 0U && *((unsigned long *)ha + 2UL) != 0UL) { (*((ha->isp_ops)->enable_intrs))(ha); } else { } return; } } static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha , struct nvram_24xx *nv ) { { return; } } int qla24xx_nvram_config(struct scsi_qla_host *vha ) { int rval ; struct init_cb_24xx *icb ; struct nvram_24xx *nv ; uint32_t *dptr ; uint8_t *dptr1 ; uint8_t *dptr2 ; uint32_t chksum ; uint16_t cnt ; struct qla_hw_data *ha ; uint32_t *tmp ; bool tmp___0 ; int tmp___1 ; uint8_t *tmp___2 ; uint8_t *tmp___3 ; uint16_t tmp___4 ; uint8_t *tmp___5 ; uint8_t *tmp___6 ; uint16_t tmp___7 ; { ha = vha->hw; rval = 0; icb = (struct init_cb_24xx *)ha->init_cb; nv = (struct nvram_24xx *)ha->nvram; if ((unsigned int )ha->port_no == 0U) { ha->nvram_base = 128U; ha->vpd_base = 0U; } else { ha->nvram_base = 384U; ha->vpd_base = 256U; } ha->nvram_size = 512U; ha->vpd_size = 512U; ha->vpd = ha->nvram + 2048UL; (*((ha->isp_ops)->read_nvram))(vha, (uint8_t *)ha->vpd, (uint32_t )((int )ha->nvram_base + -128), 2048U); dptr = (uint32_t *)nv; (*((ha->isp_ops)->read_nvram))(vha, (uint8_t *)dptr, (uint32_t )ha->nvram_base, (uint32_t )ha->nvram_size); cnt = 0U; chksum = 0U; goto ldv_66981; ldv_66980: tmp = dptr; dptr = dptr + 1; chksum = *tmp + chksum; cnt = (uint16_t )((int )cnt + 1); ldv_66981: ; if ((int )ha->nvram_size >> 2 > (int )cnt) { goto ldv_66980; } else { } ql_dbg(1073872896U, vha, 106, "Contents of NVRAM\n"); ql_dump_buffer(1073872896U, vha, 269, (uint8_t *)nv, (uint32_t )ha->nvram_size); if (((((chksum != 0U || (unsigned int )nv->id[0] != 73U) || (unsigned int )nv->id[1] != 83U) || (unsigned int )nv->id[2] != 80U) || (unsigned int )nv->id[3] != 32U) || (unsigned int )nv->nvram_version == 0U) { ql_log(1U, vha, 107, "Inconsistent NVRAM detected: checksum=0x%x id=%c version=0x%x.\n", chksum, (int )nv->id[0], (int )nv->nvram_version); ql_log(1U, vha, 108, "Falling back to functioning (yet invalid -- WWPN) defaults.\n"); memset((void *)nv, 0, (size_t )ha->nvram_size); nv->nvram_version = 1U; nv->version = 1U; nv->frame_payload_size = 2048U; nv->execution_throttle = 65535U; nv->exchange_count = 0U; nv->hard_address = 124U; nv->port_name[0] = 33U; nv->port_name[1] = (unsigned int )ha->port_no + 1U; nv->port_name[2] = 0U; nv->port_name[3] = 224U; nv->port_name[4] = 139U; nv->port_name[5] = 28U; nv->port_name[6] = 85U; nv->port_name[7] = 134U; nv->node_name[0] = 32U; nv->node_name[1] = 0U; nv->node_name[2] = 0U; nv->node_name[3] = 224U; nv->node_name[4] = 139U; nv->node_name[5] = 28U; nv->node_name[6] = 85U; nv->node_name[7] = 134U; qla24xx_nvram_wwn_from_ofw(vha, nv); nv->login_retry_count = 8U; nv->interrupt_delay_timer = 0U; nv->login_timeout = 0U; nv->firmware_options_1 = 24582U; nv->firmware_options_2 = 32U; nv->firmware_options_2 = nv->firmware_options_2 | 4096U; nv->firmware_options_3 = 16384U; nv->host_p = 3072U; nv->efi_parameters = 0U; nv->reset_delay = 5U; nv->max_luns_per_target = 128U; nv->port_down_retry_count = 30U; nv->link_down_timeout = 30U; rval = 1; } else { } tmp___0 = qla_ini_mode_enabled(vha); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { nv->firmware_options_1 = nv->firmware_options_1 & 4294959103U; nv->host_p = nv->host_p & 4294966271U; } else { } qlt_24xx_config_nvram_stage1(vha, nv); memset((void *)icb, 0, (size_t )ha->init_cb_size); dptr1 = (uint8_t *)icb; dptr2 = (uint8_t *)(& nv->version); cnt = 28U; goto ldv_66984; ldv_66983: tmp___2 = dptr1; dptr1 = dptr1 + 1; tmp___3 = dptr2; dptr2 = dptr2 + 1; *tmp___2 = *tmp___3; ldv_66984: tmp___4 = cnt; cnt = (uint16_t )((int )cnt - 1); if ((unsigned int )tmp___4 != 0U) { goto ldv_66983; } else { } icb->login_retry_count = nv->login_retry_count; icb->link_down_on_nos = nv->link_down_on_nos; dptr1 = (uint8_t *)(& icb->interrupt_delay_timer); dptr2 = (uint8_t *)(& nv->interrupt_delay_timer); cnt = 20U; goto ldv_66987; ldv_66986: tmp___5 = dptr1; dptr1 = dptr1 + 1; tmp___6 = dptr2; dptr2 = dptr2 + 1; *tmp___5 = *tmp___6; ldv_66987: tmp___7 = cnt; cnt = (uint16_t )((int )cnt - 1); if ((unsigned int )tmp___7 != 0U) { goto ldv_66986; } else { } qla2x00_set_model_info(vha, (uint8_t *)(& nv->model_name), 16UL, (char *)"QLA2462"); qlt_24xx_config_nvram_stage2(vha, icb); if ((nv->host_p & 32768U) != 0U) { memcpy((void *)(& icb->node_name), (void const *)(& nv->alternate_node_name), 8UL); memcpy((void *)(& icb->port_name), (void const *)(& nv->alternate_port_name), 8UL); } else { } if ((icb->firmware_options_1 & 16384U) == 0U) { memcpy((void *)(& icb->node_name), (void const *)(& icb->port_name), 8UL); icb->node_name[0] = (unsigned int )icb->node_name[0] & 240U; } else { } ha->flags.disable_risc_code_load = 0U; ha->flags.enable_lip_reset = 0U; ha->flags.enable_lip_full_login = (nv->host_p & 1024U) != 0U; ha->flags.enable_target_reset = (nv->host_p & 2048U) != 0U; ha->flags.enable_led_scheme = 0U; ha->flags.disable_serdes = (nv->host_p & 32U) != 0U; ha->operating_mode = (uint8_t )((icb->firmware_options_2 & 112U) >> 4); memcpy((void *)(& ha->fw_seriallink_options24), (void const *)(& nv->seriallink_options), 8UL); ha->serial0 = icb->port_name[5]; ha->serial1 = icb->port_name[6]; ha->serial2 = icb->port_name[7]; memcpy((void *)(& vha->node_name), (void const *)(& icb->node_name), 8UL); memcpy((void *)(& vha->port_name), (void const *)(& icb->port_name), 8UL); icb->execution_throttle = 65535U; ha->retry_count = (uint8_t )nv->login_retry_count; if ((int )nv->login_timeout < ql2xlogintimeout) { nv->login_timeout = (unsigned short )ql2xlogintimeout; } else { } if ((unsigned int )nv->login_timeout <= 3U) { nv->login_timeout = 4U; } else { } ha->login_timeout = (uint8_t )nv->login_timeout; icb->login_timeout = nv->login_timeout; ha->r_a_tov = 100U; ha->loop_reset_delay = (uint16_t )nv->reset_delay; if ((unsigned int )nv->link_down_timeout == 0U) { ha->loop_down_abort_time = 195U; } else { ha->link_down_timeout = (uint8_t )nv->link_down_timeout; ha->loop_down_abort_time = ~ ((int )ha->link_down_timeout); } ha->port_down_retry_count = (int )nv->port_down_retry_count; if (qlport_down_retry != 0) { ha->port_down_retry_count = qlport_down_retry; } else { } ha->login_retry_count = (uint32_t )nv->login_retry_count; if (ha->port_down_retry_count == (int )nv->port_down_retry_count && ha->port_down_retry_count > 3) { ha->login_retry_count = (uint32_t )ha->port_down_retry_count; } else if (ha->port_down_retry_count > (int )ha->login_retry_count) { ha->login_retry_count = (uint32_t )ha->port_down_retry_count; } else { } if (ql2xloginretrycount != 0) { ha->login_retry_count = (uint32_t )ql2xloginretrycount; } else { } if (*((unsigned long *)vha + 19UL) == 0UL) { ha->zio_mode = (unsigned int )((uint16_t )icb->firmware_options_2) & 15U; ha->zio_timer = (unsigned int )icb->interrupt_delay_timer != 0U ? icb->interrupt_delay_timer : 2U; } else { } icb->firmware_options_2 = icb->firmware_options_2 & 4294967280U; vha->flags.process_response_queue = 0U; if ((unsigned int )ha->zio_mode != 0U) { ha->zio_mode = 6U; ql_log(2U, vha, 111, "ZIO mode %d enabled; timer delay (%d us).\n", (int )ha->zio_mode, (int )ha->zio_timer * 100); icb->firmware_options_2 = icb->firmware_options_2 | (uint32_t )ha->zio_mode; icb->interrupt_delay_timer = ha->zio_timer; vha->flags.process_response_queue = 1U; } else { } if (rval != 0) { ql_log(1U, vha, 112, "NVRAM configuration failed.\n"); } else { } return (rval); } } static int qla24xx_load_risc_flash(scsi_qla_host_t *vha , uint32_t *srisc_addr , uint32_t faddr ) { int rval ; int segments ; int fragment ; uint32_t *dcode ; uint32_t dlen ; uint32_t risc_addr ; uint32_t risc_size ; uint32_t i ; struct qla_hw_data *ha ; struct req_que *req ; __u32 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; __u32 tmp___3 ; int tmp___4 ; ulong tmp___5 ; ulong tmp___6 ; void const *tmp___7 ; __u32 tmp___8 ; int tmp___9 ; ulong tmp___10 ; { rval = 0; ha = vha->hw; req = *(ha->req_q_map); ql_dbg(1073741824U, vha, 139, "FW: Loading firmware from flash (%x).\n", faddr); rval = 0; segments = 2; dcode = (uint32_t *)req->ring; *srisc_addr = 0U; qla24xx_read_flash_data(vha, dcode, faddr + 4U, 4U); i = 0U; goto ldv_67005; ldv_67004: tmp = __fswab32(*(dcode + (unsigned long )i)); *(dcode + (unsigned long )i) = tmp; i = i + 1U; ldv_67005: ; if (i <= 3U) { goto ldv_67004; } else { } if ((((*dcode == 4294967295U && *(dcode + 1UL) == 4294967295U) && *(dcode + 2UL) == 4294967295U) && *(dcode + 3UL) == 4294967295U) || (((*dcode == 0U && *(dcode + 1UL) == 0U) && *(dcode + 2UL) == 0U) && *(dcode + 3UL) == 0U)) { ql_log(0U, vha, 140, "Unable to verify the integrity of flash firmware image.\n"); ql_log(0U, vha, 141, "Firmware data: %08x %08x %08x %08x.\n", *dcode, *(dcode + 1UL), *(dcode + 2UL), *(dcode + 3UL)); return (258); } else { } goto ldv_67014; ldv_67013: qla24xx_read_flash_data(vha, dcode, faddr, 4U); tmp___0 = __fswab32(*(dcode + 2UL)); risc_addr = tmp___0; *srisc_addr = *srisc_addr != 0U ? *srisc_addr : risc_addr; tmp___1 = __fswab32(*(dcode + 3UL)); risc_size = tmp___1; fragment = 0; goto ldv_67011; ldv_67010: dlen = ha->fw_transfer_size >> 2; if (dlen > risc_size) { dlen = risc_size; } else { } ql_dbg(1073741824U, vha, 142, "Loading risc segment@ risc addr %x number of dwords 0x%x offset 0x%x.\n", risc_addr, dlen, faddr); qla24xx_read_flash_data(vha, dcode, faddr, dlen); i = 0U; goto ldv_67008; ldv_67007: tmp___2 = __fswab32(*(dcode + (unsigned long )i)); *(dcode + (unsigned long )i) = tmp___2; i = i + 1U; ldv_67008: ; if (i < dlen) { goto ldv_67007; } else { } rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); if (rval != 0) { ql_log(0U, vha, 143, "Failed to load segment %d of firmware.\n", fragment); return (258); } else { } faddr = faddr + dlen; risc_addr = risc_addr + dlen; risc_size = risc_size - dlen; fragment = fragment + 1; ldv_67011: ; if (risc_size != 0U && rval == 0) { goto ldv_67010; } else { } segments = segments - 1; ldv_67014: ; if (segments != 0 && rval == 0) { goto ldv_67013; } else { } if ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U) { return (rval); } else { } if ((unsigned long )ha->fw_dump_template != (unsigned long )((void *)0)) { vfree((void const *)ha->fw_dump_template); } else { } ha->fw_dump_template = (void *)0; ha->fw_dump_template_len = 0U; ql_dbg(1073741824U, vha, 353, "Loading fwdump template from %x\n", faddr); qla24xx_read_flash_data(vha, dcode, faddr, 7U); tmp___3 = __fswab32(*(dcode + 2UL)); risc_size = tmp___3; ql_dbg(1073741824U, vha, 354, "-> array size %x dwords\n", risc_size); if (risc_size == 0U || risc_size == 4294967295U) { goto default_template; } else { } dlen = (risc_size + 1073741816U) * 4U; ql_dbg(1073741824U, vha, 355, "-> template allocating %x bytes...\n", dlen); ha->fw_dump_template = vmalloc((unsigned long )dlen); if ((unsigned long )ha->fw_dump_template == (unsigned long )((void *)0)) { ql_log(1U, vha, 356, "Failed fwdump template allocate %x bytes.\n", risc_size); goto default_template; } else { } faddr = faddr + 7U; risc_size = risc_size - 8U; dcode = (uint32_t *)ha->fw_dump_template; qla24xx_read_flash_data(vha, dcode, faddr, risc_size); i = 0U; goto ldv_67018; ldv_67017: *(dcode + (unsigned long )i) = *(dcode + (unsigned long )i); i = i + 1U; ldv_67018: ; if (i < risc_size) { goto ldv_67017; } else { } tmp___4 = qla27xx_fwdt_template_valid((void *)dcode); if (tmp___4 == 0) { ql_log(1U, vha, 357, "Failed fwdump template validate\n"); goto default_template; } else { } tmp___5 = qla27xx_fwdt_template_size((void *)dcode); dlen = (uint32_t )tmp___5; ql_dbg(1073741824U, vha, 358, "-> template size %x bytes\n", dlen); if ((unsigned long )dlen > (unsigned long )risc_size * 4UL) { ql_log(1U, vha, 359, "Failed fwdump template exceeds array by %x bytes\n", dlen - risc_size * 4U); goto default_template; } else { } ha->fw_dump_template_len = dlen; return (rval); default_template: ql_log(1U, vha, 360, "Using default fwdump template\n"); if ((unsigned long )ha->fw_dump_template != (unsigned long )((void *)0)) { vfree((void const *)ha->fw_dump_template); } else { } ha->fw_dump_template = (void *)0; ha->fw_dump_template_len = 0U; tmp___6 = qla27xx_fwdt_template_default_size(); dlen = (uint32_t )tmp___6; ql_dbg(1073741824U, vha, 361, "-> template allocating %x bytes...\n", dlen); ha->fw_dump_template = vmalloc((unsigned long )dlen); if ((unsigned long )ha->fw_dump_template == (unsigned long )((void *)0)) { ql_log(1U, vha, 362, "Failed fwdump template allocate %x bytes.\n", risc_size); goto failed_template; } else { } dcode = (uint32_t *)ha->fw_dump_template; risc_size = dlen / 4U; tmp___7 = qla27xx_fwdt_template_default(); memcpy((void *)dcode, tmp___7, (size_t )dlen); i = 0U; goto ldv_67022; ldv_67021: tmp___8 = __fswab32(*(dcode + (unsigned long )i)); *(dcode + (unsigned long )i) = tmp___8; i = i + 1U; ldv_67022: ; if (i < risc_size) { goto ldv_67021; } else { } tmp___9 = qla27xx_fwdt_template_valid(ha->fw_dump_template); if (tmp___9 == 0) { ql_log(1U, vha, 363, "Failed fwdump template validate\n"); goto failed_template; } else { } tmp___10 = qla27xx_fwdt_template_size(ha->fw_dump_template); dlen = (uint32_t )tmp___10; ql_dbg(1073741824U, vha, 364, "-> template size %x bytes\n", dlen); ha->fw_dump_template_len = dlen; return (rval); failed_template: ql_log(1U, vha, 365, "Failed default fwdump template\n"); if ((unsigned long )ha->fw_dump_template != (unsigned long )((void *)0)) { vfree((void const *)ha->fw_dump_template); } else { } ha->fw_dump_template = (void *)0; ha->fw_dump_template_len = 0U; return (rval); } } int qla2x00_load_risc(struct scsi_qla_host *vha , uint32_t *srisc_addr ) { int rval ; int i ; int fragment ; uint16_t *wcode ; uint16_t *fwcode ; uint32_t risc_addr ; uint32_t risc_size ; uint32_t fwclen ; uint32_t wlen ; uint32_t *seg ; struct fw_blob *blob ; struct qla_hw_data *ha ; struct req_que *req ; __u16 tmp ; __u16 tmp___0 ; __u16 tmp___1 ; { ha = vha->hw; req = *(ha->req_q_map); blob = qla2x00_request_firmware(vha); if ((unsigned long )blob == (unsigned long )((struct fw_blob *)0)) { ql_log(2U, vha, 131, "Firmware image unavailable.\n"); ql_log(2U, vha, 132, "Firmware images can be retrieved from: http://ldriver.qlogic.com/firmware/.\n"); return (258); } else { } rval = 0; wcode = (uint16_t *)req->ring; *srisc_addr = 0U; fwcode = (uint16_t *)(blob->fw)->data; fwclen = 0U; if ((unsigned long )(blob->fw)->size <= 15UL) { ql_log(0U, vha, 133, "Unable to verify integrity of firmware image (%Zd).\n", (blob->fw)->size); goto fail_fw_integrity; } else { } i = 0; goto ldv_67043; ldv_67042: tmp = __fswab16((int )*(fwcode + ((unsigned long )i + 4UL))); *(wcode + (unsigned long )i) = tmp; i = i + 1; ldv_67043: ; if (i <= 3) { goto ldv_67042; } else { } if (((((unsigned int )*wcode == 65535U && (unsigned int )*(wcode + 1UL) == 65535U) && (unsigned int )*(wcode + 2UL) == 65535U) && (unsigned int )*(wcode + 3UL) == 65535U) || ((((unsigned int )*wcode == 0U && (unsigned int )*(wcode + 1UL) == 0U) && (unsigned int )*(wcode + 2UL) == 0U) && (unsigned int )*(wcode + 3UL) == 0U)) { ql_log(0U, vha, 134, "Unable to verify integrity of firmware image.\n"); ql_log(0U, vha, 135, "Firmware data: %04x %04x %04x %04x.\n", (int )*wcode, (int )*(wcode + 1UL), (int )*(wcode + 2UL), (int )*(wcode + 3UL)); goto fail_fw_integrity; } else { } seg = (uint32_t *)(& blob->segs); goto ldv_67052; ldv_67051: risc_addr = *seg; *srisc_addr = *srisc_addr == 0U ? *seg : *srisc_addr; tmp___0 = __fswab16((int )*(fwcode + 3UL)); risc_size = (uint32_t )tmp___0; fwclen = risc_size * 2U + fwclen; if ((unsigned long )(blob->fw)->size < (unsigned long )fwclen) { ql_log(0U, vha, 136, "Unable to verify integrity of firmware image (%Zd).\n", (blob->fw)->size); goto fail_fw_integrity; } else { } fragment = 0; goto ldv_67050; ldv_67049: wlen = (uint32_t )((unsigned short )(ha->fw_transfer_size >> 1)); if (wlen > risc_size) { wlen = risc_size; } else { } ql_dbg(1073741824U, vha, 137, "Loading risc segment@ risc addr %x number of words 0x%x.\n", risc_addr, wlen); i = 0; goto ldv_67046; ldv_67045: tmp___1 = __fswab16((int )*(fwcode + (unsigned long )i)); *(wcode + (unsigned long )i) = tmp___1; i = i + 1; ldv_67046: ; if ((uint32_t )i < wlen) { goto ldv_67045; } else { } rval = qla2x00_load_ram(vha, req->dma, risc_addr, wlen); if (rval != 0) { ql_log(0U, vha, 138, "Failed to load segment %d of firmware.\n", fragment); goto ldv_67048; } else { } fwcode = fwcode + (unsigned long )wlen; risc_addr = risc_addr + wlen; risc_size = risc_size - wlen; fragment = fragment + 1; ldv_67050: ; if (risc_size != 0U && rval == 0) { goto ldv_67049; } else { } ldv_67048: seg = seg + 1; ldv_67052: ; if (*seg != 0U && rval == 0) { goto ldv_67051; } else { } return (rval); fail_fw_integrity: ; return (258); } } static int qla24xx_load_risc_blob(scsi_qla_host_t *vha , uint32_t *srisc_addr ) { int rval ; int segments ; int fragment ; uint32_t *dcode ; uint32_t dlen ; uint32_t risc_addr ; uint32_t risc_size ; uint32_t i ; struct fw_blob *blob ; uint32_t const *fwcode ; uint32_t fwclen ; struct qla_hw_data *ha ; struct req_que *req ; __u32 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; __u32 tmp___3 ; int tmp___4 ; ulong tmp___5 ; ulong tmp___6 ; void const *tmp___7 ; __u32 tmp___8 ; int tmp___9 ; ulong tmp___10 ; { ha = vha->hw; req = *(ha->req_q_map); blob = qla2x00_request_firmware(vha); if ((unsigned long )blob == (unsigned long )((struct fw_blob *)0)) { ql_log(1U, vha, 144, "Firmware image unavailable.\n"); ql_log(1U, vha, 145, "Firmware images can be retrieved from: http://ldriver.qlogic.com/firmware/.\n"); return (258); } else { } ql_dbg(1073741824U, vha, 146, "FW: Loading via request-firmware.\n"); rval = 0; segments = 2; dcode = (uint32_t *)req->ring; *srisc_addr = 0U; fwcode = (uint32_t const *)(blob->fw)->data; fwclen = 0U; if ((unsigned long )(blob->fw)->size <= 31UL) { ql_log(0U, vha, 147, "Unable to verify integrity of firmware image (%Zd).\n", (blob->fw)->size); return (258); } else { } i = 0U; goto ldv_67072; ldv_67071: tmp = __fswab32(*(fwcode + (unsigned long )(i + 4U))); *(dcode + (unsigned long )i) = tmp; i = i + 1U; ldv_67072: ; if (i <= 3U) { goto ldv_67071; } else { } if ((((*dcode == 4294967295U && *(dcode + 1UL) == 4294967295U) && *(dcode + 2UL) == 4294967295U) && *(dcode + 3UL) == 4294967295U) || (((*dcode == 0U && *(dcode + 1UL) == 0U) && *(dcode + 2UL) == 0U) && *(dcode + 3UL) == 0U)) { ql_log(0U, vha, 148, "Unable to verify integrity of firmware image (%Zd).\n", (blob->fw)->size); ql_log(0U, vha, 149, "Firmware data: %08x %08x %08x %08x.\n", *dcode, *(dcode + 1UL), *(dcode + 2UL), *(dcode + 3UL)); return (258); } else { } goto ldv_67081; ldv_67080: tmp___0 = __fswab32(*(fwcode + 2UL)); risc_addr = tmp___0; *srisc_addr = *srisc_addr != 0U ? *srisc_addr : risc_addr; tmp___1 = __fswab32(*(fwcode + 3UL)); risc_size = tmp___1; fwclen = risc_size * 4U + fwclen; if ((unsigned long )(blob->fw)->size < (unsigned long )fwclen) { ql_log(0U, vha, 150, "Unable to verify integrity of firmware image (%Zd).\n", (blob->fw)->size); return (258); } else { } fragment = 0; goto ldv_67078; ldv_67077: dlen = ha->fw_transfer_size >> 2; if (dlen > risc_size) { dlen = risc_size; } else { } ql_dbg(1073741824U, vha, 151, "Loading risc segment@ risc addr %x number of dwords 0x%x.\n", risc_addr, dlen); i = 0U; goto ldv_67075; ldv_67074: tmp___2 = __fswab32(*(fwcode + (unsigned long )i)); *(dcode + (unsigned long )i) = tmp___2; i = i + 1U; ldv_67075: ; if (i < dlen) { goto ldv_67074; } else { } rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); if (rval != 0) { ql_log(0U, vha, 152, "Failed to load segment %d of firmware.\n", fragment); return (258); } else { } fwcode = fwcode + (unsigned long )dlen; risc_addr = risc_addr + dlen; risc_size = risc_size - dlen; fragment = fragment + 1; ldv_67078: ; if (risc_size != 0U && rval == 0) { goto ldv_67077; } else { } segments = segments - 1; ldv_67081: ; if (segments != 0 && rval == 0) { goto ldv_67080; } else { } if ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U) { return (rval); } else { } if ((unsigned long )ha->fw_dump_template != (unsigned long )((void *)0)) { vfree((void const *)ha->fw_dump_template); } else { } ha->fw_dump_template = (void *)0; ha->fw_dump_template_len = 0U; ql_dbg(1073741824U, vha, 369, "Loading fwdump template from %x\n", (unsigned int )((long )fwcode) - (unsigned int )((long )(blob->fw)->data)); tmp___3 = __fswab32(*(fwcode + 2UL)); risc_size = tmp___3; ql_dbg(1073741824U, vha, 370, "-> array size %x dwords\n", risc_size); if (risc_size == 0U || risc_size == 4294967295U) { goto default_template; } else { } dlen = (risc_size + 1073741816U) * 4U; ql_dbg(1073741824U, vha, 371, "-> template allocating %x bytes...\n", dlen); ha->fw_dump_template = vmalloc((unsigned long )dlen); if ((unsigned long )ha->fw_dump_template == (unsigned long )((void *)0)) { ql_log(1U, vha, 372, "Failed fwdump template allocate %x bytes.\n", risc_size); goto default_template; } else { } fwcode = fwcode + 7UL; risc_size = risc_size - 8U; dcode = (uint32_t *)ha->fw_dump_template; i = 0U; goto ldv_67085; ldv_67084: *(dcode + (unsigned long )i) = *(fwcode + (unsigned long )i); i = i + 1U; ldv_67085: ; if (i < risc_size) { goto ldv_67084; } else { } tmp___4 = qla27xx_fwdt_template_valid((void *)dcode); if (tmp___4 == 0) { ql_log(1U, vha, 373, "Failed fwdump template validate\n"); goto default_template; } else { } tmp___5 = qla27xx_fwdt_template_size((void *)dcode); dlen = (uint32_t )tmp___5; ql_dbg(1073741824U, vha, 374, "-> template size %x bytes\n", dlen); if ((unsigned long )dlen > (unsigned long )risc_size * 4UL) { ql_log(1U, vha, 375, "Failed fwdump template exceeds array by %x bytes\n", dlen - risc_size * 4U); goto default_template; } else { } ha->fw_dump_template_len = dlen; return (rval); default_template: ql_log(1U, vha, 376, "Using default fwdump template\n"); if ((unsigned long )ha->fw_dump_template != (unsigned long )((void *)0)) { vfree((void const *)ha->fw_dump_template); } else { } ha->fw_dump_template = (void *)0; ha->fw_dump_template_len = 0U; tmp___6 = qla27xx_fwdt_template_default_size(); dlen = (uint32_t )tmp___6; ql_dbg(1073741824U, vha, 377, "-> template allocating %x bytes...\n", dlen); ha->fw_dump_template = vmalloc((unsigned long )dlen); if ((unsigned long )ha->fw_dump_template == (unsigned long )((void *)0)) { ql_log(1U, vha, 378, "Failed fwdump template allocate %x bytes.\n", risc_size); goto failed_template; } else { } dcode = (uint32_t *)ha->fw_dump_template; risc_size = dlen / 4U; tmp___7 = qla27xx_fwdt_template_default(); fwcode = (uint32_t const *)tmp___7; i = 0U; goto ldv_67089; ldv_67088: tmp___8 = __fswab32(*(fwcode + (unsigned long )i)); *(dcode + (unsigned long )i) = tmp___8; i = i + 1U; ldv_67089: ; if (i < risc_size) { goto ldv_67088; } else { } tmp___9 = qla27xx_fwdt_template_valid(ha->fw_dump_template); if (tmp___9 == 0) { ql_log(1U, vha, 379, "Failed fwdump template validate\n"); goto failed_template; } else { } tmp___10 = qla27xx_fwdt_template_size(ha->fw_dump_template); dlen = (uint32_t )tmp___10; ql_dbg(1073741824U, vha, 380, "-> template size %x bytes\n", dlen); ha->fw_dump_template_len = dlen; return (rval); failed_template: ql_log(1U, vha, 381, "Failed default fwdump template\n"); if ((unsigned long )ha->fw_dump_template != (unsigned long )((void *)0)) { vfree((void const *)ha->fw_dump_template); } else { } ha->fw_dump_template = (void *)0; ha->fw_dump_template_len = 0U; return (rval); } } int qla24xx_load_risc(scsi_qla_host_t *vha , uint32_t *srisc_addr ) { int rval ; int tmp ; int tmp___0 ; { if (ql2xfwloadbin == 1) { tmp = qla81xx_load_risc(vha, srisc_addr); return (tmp); } else { } rval = qla24xx_load_risc_blob(vha, srisc_addr); if (rval == 0) { return (rval); } else { } tmp___0 = qla24xx_load_risc_flash(vha, srisc_addr, (vha->hw)->flt_region_fw); return (tmp___0); } } int qla81xx_load_risc(scsi_qla_host_t *vha , uint32_t *srisc_addr ) { int rval ; struct qla_hw_data *ha ; { ha = vha->hw; if (ql2xfwloadbin == 2) { goto try_blob_fw; } else { } rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); if (rval == 0) { return (rval); } else { } try_blob_fw: rval = qla24xx_load_risc_blob(vha, srisc_addr); if (rval == 0 || ha->flt_region_gold_fw == 0U) { return (rval); } else { } ql_log(2U, vha, 153, "Attempting to fallback to golden firmware.\n"); rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); if (rval != 0) { return (rval); } else { } ql_log(2U, vha, 154, "Update operational firmware.\n"); ha->flags.running_gold_fw = 1U; return (rval); } } void qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha ) { int ret ; int retries ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = vha->hw; if (*((unsigned long *)ha + 2UL) != 0UL) { return; } else { } if ((ha->device_type & 134217728U) == 0U) { return; } else { } if ((unsigned int )ha->fw_major_version == 0U) { return; } else { } ret = qla2x00_stop_firmware(vha); retries = 5; goto ldv_67111; ldv_67110: (*((ha->isp_ops)->reset_chip))(vha); tmp = (*((ha->isp_ops)->chip_diag))(vha); if (tmp != 0) { goto ldv_67109; } else { } tmp___0 = qla2x00_setup_chip(vha); if (tmp___0 != 0) { goto ldv_67109; } else { } ql_log(2U, vha, 32789, "Attempting retry of stop-firmware command.\n"); ret = qla2x00_stop_firmware(vha); ldv_67109: retries = retries - 1; ldv_67111: ; if (((ret != 0 && ret != 256) && ret != 1) && retries != 0) { goto ldv_67110; } else { } return; } } int qla24xx_configure_vhba(scsi_qla_host_t *vha ) { int rval ; int rval2 ; uint16_t mb[32U] ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp ; struct req_que *req ; struct rsp_que *rsp ; { rval = 0; ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; if ((unsigned int )vha->vp_idx == 0U) { return (-22); } else { } rval = qla2x00_fw_ready(base_vha); if (*((unsigned long *)ha + 2UL) != 0UL) { req = *(ha->req_q_map); } else { req = vha->req; } rsp = req->rsp; if (rval == 0) { clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); qla2x00_marker(vha, req, rsp, 0, 0ULL, 2); } else { } vha->flags.management_server_logged_in = 0U; rval2 = (*((ha->isp_ops)->fabric_login))(vha, 2044, 255, 255, 252, (uint16_t *)(& mb), 2); if (rval2 != 0 || (unsigned int )mb[0] != 16384U) { if (rval2 == 259) { ql_dbg(1073741824U, vha, 288, "Failed SNS login: loop_id=%x, rval2=%d\n", 2044, rval2); } else { ql_dbg(1073741824U, vha, 259, "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", 2044, (int )mb[0], (int )mb[1], (int )mb[2], (int )mb[6], (int )mb[7]); } return (258); } else { } atomic_set(& vha->loop_down_timer, 0); atomic_set(& vha->loop_state, 3); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); rval = qla2x00_loop_resync(base_vha); return (rval); } } static struct list_head qla_cs84xx_list = {& qla_cs84xx_list, & qla_cs84xx_list}; static struct mutex qla_cs84xx_mutex = {{1}, {{{{{0}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "qla_cs84xx_mutex.wait_lock", 0, 0UL}}}}, {& qla_cs84xx_mutex.wait_list, & qla_cs84xx_mutex.wait_list}, 0, (void *)(& qla_cs84xx_mutex), {0, {0, 0}, "qla_cs84xx_mutex", 0, 0UL}}; static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *vha ) { struct qla_chip_state_84xx *cs84xx ; struct qla_hw_data *ha ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; void *tmp ; struct lock_class_key __key ; struct lock_class_key __key___0 ; { ha = vha->hw; mutex_lock_nested(& qla_cs84xx_mutex, 0U); __mptr = (struct list_head const *)qla_cs84xx_list.next; cs84xx = (struct qla_chip_state_84xx *)__mptr; goto ldv_67137; ldv_67136: ; if ((unsigned long )cs84xx->bus == (unsigned long )((void *)(ha->pdev)->bus)) { kref_get(& cs84xx->kref); goto done; } else { } __mptr___0 = (struct list_head const *)cs84xx->list.next; cs84xx = (struct qla_chip_state_84xx *)__mptr___0; ldv_67137: ; if ((unsigned long )(& cs84xx->list) != (unsigned long )(& qla_cs84xx_list)) { goto ldv_67136; } else { } tmp = kzalloc(288UL, 208U); cs84xx = (struct qla_chip_state_84xx *)tmp; if ((unsigned long )cs84xx == (unsigned long )((struct qla_chip_state_84xx *)0)) { goto done; } else { } kref_init(& cs84xx->kref); spinlock_check(& cs84xx->access_lock); __raw_spin_lock_init(& cs84xx->access_lock.__annonCompField18.rlock, "&(&cs84xx->access_lock)->rlock", & __key); __mutex_init(& cs84xx->fw_update_mutex, "&cs84xx->fw_update_mutex", & __key___0); cs84xx->bus = (void *)(ha->pdev)->bus; list_add_tail(& cs84xx->list, & qla_cs84xx_list); done: mutex_unlock(& qla_cs84xx_mutex); return (cs84xx); } } static void __qla84xx_chip_release(struct kref *kref ) { struct qla_chip_state_84xx *cs84xx ; struct kref const *__mptr ; { __mptr = (struct kref const *)kref; cs84xx = (struct qla_chip_state_84xx *)__mptr + 0xfffffffffffffff0UL; mutex_lock_nested(& qla_cs84xx_mutex, 0U); list_del(& cs84xx->list); mutex_unlock(& qla_cs84xx_mutex); kfree((void const *)cs84xx); return; } } void qla84xx_put_chip(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((unsigned long )ha->cs84xx != (unsigned long )((struct qla_chip_state_84xx *)0)) { kref_put(& (ha->cs84xx)->kref, & __qla84xx_chip_release); } else { } return; } } static int qla84xx_init_chip(scsi_qla_host_t *vha ) { int rval ; uint16_t status[2U] ; struct qla_hw_data *ha ; { ha = vha->hw; mutex_lock_nested(& (ha->cs84xx)->fw_update_mutex, 0U); rval = qla84xx_verify_chip(vha, (uint16_t *)(& status)); mutex_unlock(& (ha->cs84xx)->fw_update_mutex); return (rval != 0 || (unsigned int )status[0] != 0U ? 258 : 0); } } int qla81xx_nvram_config(struct scsi_qla_host *vha ) { int rval ; struct init_cb_81xx *icb ; struct nvram_81xx *nv ; uint32_t *dptr ; uint8_t *dptr1 ; uint8_t *dptr2 ; uint32_t chksum ; uint16_t cnt ; struct qla_hw_data *ha ; uint32_t *tmp ; uint8_t *tmp___0 ; uint8_t *tmp___1 ; uint16_t tmp___2 ; uint8_t *tmp___3 ; uint8_t *tmp___4 ; uint16_t tmp___5 ; int tmp___6 ; { ha = vha->hw; rval = 0; icb = (struct init_cb_81xx *)ha->init_cb; nv = (struct nvram_81xx *)ha->nvram; ha->nvram_size = 512U; ha->vpd_size = 512U; if (((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) || (ha->device_type & 65536U) != 0U) { ha->vpd_size = 1024U; } else { } ha->vpd = ha->nvram + 2048UL; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->vpd, ha->flt_region_vpd << 2, (uint32_t )ha->vpd_size); (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->nvram, ha->flt_region_nvram << 2, (uint32_t )ha->nvram_size); dptr = (uint32_t *)nv; cnt = 0U; chksum = 0U; goto ldv_67170; ldv_67169: tmp = dptr; dptr = dptr + 1; chksum = *tmp + chksum; cnt = (uint16_t )((int )cnt + 1); ldv_67170: ; if ((int )ha->nvram_size >> 2 > (int )cnt) { goto ldv_67169; } else { } ql_dbg(1073872896U, vha, 273, "Contents of NVRAM:\n"); ql_dump_buffer(1073872896U, vha, 274, (uint8_t *)nv, (uint32_t )ha->nvram_size); if (((((chksum != 0U || (unsigned int )nv->id[0] != 73U) || (unsigned int )nv->id[1] != 83U) || (unsigned int )nv->id[2] != 80U) || (unsigned int )nv->id[3] != 32U) || (unsigned int )nv->nvram_version == 0U) { ql_log(2U, vha, 115, "Inconsistent NVRAM detected: checksum=0x%x id=%c version=0x%x.\n", chksum, (int )nv->id[0], (int )nv->nvram_version); ql_log(2U, vha, 116, "Falling back to functioning (yet invalid -- WWPN) defaults.\n"); memset((void *)nv, 0, (size_t )ha->nvram_size); nv->nvram_version = 1U; nv->version = 1U; nv->frame_payload_size = 2048U; nv->execution_throttle = 65535U; nv->exchange_count = 0U; nv->port_name[0] = 33U; nv->port_name[1] = (unsigned int )ha->port_no + 1U; nv->port_name[2] = 0U; nv->port_name[3] = 224U; nv->port_name[4] = 139U; nv->port_name[5] = 28U; nv->port_name[6] = 85U; nv->port_name[7] = 134U; nv->node_name[0] = 32U; nv->node_name[1] = 0U; nv->node_name[2] = 0U; nv->node_name[3] = 224U; nv->node_name[4] = 139U; nv->node_name[5] = 28U; nv->node_name[6] = 85U; nv->node_name[7] = 134U; nv->login_retry_count = 8U; nv->interrupt_delay_timer = 0U; nv->login_timeout = 0U; nv->firmware_options_1 = 24582U; nv->firmware_options_2 = 32U; nv->firmware_options_2 = nv->firmware_options_2 | 4096U; nv->firmware_options_3 = 16384U; nv->host_p = 3072U; nv->efi_parameters = 0U; nv->reset_delay = 5U; nv->max_luns_per_target = 128U; nv->port_down_retry_count = 30U; nv->link_down_timeout = 180U; nv->enode_mac[0] = 0U; nv->enode_mac[1] = 192U; nv->enode_mac[2] = 221U; nv->enode_mac[3] = 4U; nv->enode_mac[4] = 5U; nv->enode_mac[5] = (unsigned int )ha->port_no + 7U; rval = 1; } else { } if ((ha->device_type & 33554432U) != 0U) { nv->frame_payload_size = (unsigned int )nv->frame_payload_size & 65528U; } else { } qlt_81xx_config_nvram_stage1(vha, nv); memset((void *)icb, 0, (size_t )ha->init_cb_size); dptr1 = (uint8_t *)icb; dptr2 = (uint8_t *)(& nv->version); cnt = 28U; goto ldv_67173; ldv_67172: tmp___0 = dptr1; dptr1 = dptr1 + 1; tmp___1 = dptr2; dptr2 = dptr2 + 1; *tmp___0 = *tmp___1; ldv_67173: tmp___2 = cnt; cnt = (uint16_t )((int )cnt - 1); if ((unsigned int )tmp___2 != 0U) { goto ldv_67172; } else { } icb->login_retry_count = nv->login_retry_count; dptr1 = (uint8_t *)(& icb->interrupt_delay_timer); dptr2 = (uint8_t *)(& nv->interrupt_delay_timer); cnt = 16U; goto ldv_67176; ldv_67175: tmp___3 = dptr1; dptr1 = dptr1 + 1; tmp___4 = dptr2; dptr2 = dptr2 + 1; *tmp___3 = *tmp___4; ldv_67176: tmp___5 = cnt; cnt = (uint16_t )((int )cnt - 1); if ((unsigned int )tmp___5 != 0U) { goto ldv_67175; } else { } memcpy((void *)(& icb->enode_mac), (void const *)(& nv->enode_mac), 6UL); tmp___6 = memcmp((void const *)(& icb->enode_mac), (void const *)"", 6UL); if (tmp___6 == 0) { icb->enode_mac[0] = 0U; icb->enode_mac[1] = 192U; icb->enode_mac[2] = 221U; icb->enode_mac[3] = 4U; icb->enode_mac[4] = 5U; icb->enode_mac[5] = (unsigned int )ha->port_no + 7U; } else { } memcpy((void *)ha->ex_init_cb, (void const *)(& nv->ex_version), 64UL); qla2x00_set_model_info(vha, (uint8_t *)(& nv->model_name), 16UL, (char *)"QLE8XXX"); qlt_81xx_config_nvram_stage2(vha, icb); if ((nv->host_p & 32768U) != 0U) { memcpy((void *)(& icb->node_name), (void const *)(& nv->alternate_node_name), 8UL); memcpy((void *)(& icb->port_name), (void const *)(& nv->alternate_port_name), 8UL); } else { } if ((icb->firmware_options_1 & 16384U) == 0U) { memcpy((void *)(& icb->node_name), (void const *)(& icb->port_name), 8UL); icb->node_name[0] = (unsigned int )icb->node_name[0] & 240U; } else { } ha->flags.disable_risc_code_load = 0U; ha->flags.enable_lip_reset = 0U; ha->flags.enable_lip_full_login = (nv->host_p & 1024U) != 0U; ha->flags.enable_target_reset = (nv->host_p & 2048U) != 0U; ha->flags.enable_led_scheme = 0U; ha->flags.disable_serdes = (nv->host_p & 32U) != 0U; ha->operating_mode = (uint8_t )((icb->firmware_options_2 & 112U) >> 4); ha->serial0 = icb->port_name[5]; ha->serial1 = icb->port_name[6]; ha->serial2 = icb->port_name[7]; memcpy((void *)(& vha->node_name), (void const *)(& icb->node_name), 8UL); memcpy((void *)(& vha->port_name), (void const *)(& icb->port_name), 8UL); icb->execution_throttle = 65535U; ha->retry_count = (uint8_t )nv->login_retry_count; if ((int )nv->login_timeout < ql2xlogintimeout) { nv->login_timeout = (unsigned short )ql2xlogintimeout; } else { } if ((unsigned int )nv->login_timeout <= 3U) { nv->login_timeout = 4U; } else { } ha->login_timeout = (uint8_t )nv->login_timeout; icb->login_timeout = nv->login_timeout; ha->r_a_tov = 100U; ha->loop_reset_delay = (uint16_t )nv->reset_delay; if ((unsigned int )nv->link_down_timeout == 0U) { ha->loop_down_abort_time = 195U; } else { ha->link_down_timeout = (uint8_t )nv->link_down_timeout; ha->loop_down_abort_time = ~ ((int )ha->link_down_timeout); } ha->port_down_retry_count = (int )nv->port_down_retry_count; if (qlport_down_retry != 0) { ha->port_down_retry_count = qlport_down_retry; } else { } ha->login_retry_count = (uint32_t )nv->login_retry_count; if (ha->port_down_retry_count == (int )nv->port_down_retry_count && ha->port_down_retry_count > 3) { ha->login_retry_count = (uint32_t )ha->port_down_retry_count; } else if (ha->port_down_retry_count > (int )ha->login_retry_count) { ha->login_retry_count = (uint32_t )ha->port_down_retry_count; } else { } if (ql2xloginretrycount != 0) { ha->login_retry_count = (uint32_t )ql2xloginretrycount; } else { } if (*((unsigned long *)vha->hw + 2UL) == 0UL && (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U))) { icb->firmware_options_2 = icb->firmware_options_2 | 4194304U; } else { } if (*((unsigned long *)vha + 19UL) == 0UL) { ha->zio_mode = (unsigned int )((uint16_t )icb->firmware_options_2) & 15U; ha->zio_timer = (unsigned int )icb->interrupt_delay_timer != 0U ? icb->interrupt_delay_timer : 2U; } else { } icb->firmware_options_2 = icb->firmware_options_2 & 4294967280U; vha->flags.process_response_queue = 0U; if ((unsigned int )ha->zio_mode != 0U) { ha->zio_mode = 6U; ql_log(2U, vha, 117, "ZIO mode %d enabled; timer delay (%d us).\n", (int )ha->zio_mode, (int )ha->zio_timer * 100); icb->firmware_options_2 = icb->firmware_options_2 | (uint32_t )ha->zio_mode; icb->interrupt_delay_timer = ha->zio_timer; vha->flags.process_response_queue = 1U; } else { } if (rval != 0) { ql_log(1U, vha, 118, "NVRAM configuration failed.\n"); } else { } return (rval); } } int qla82xx_restart_isp(scsi_qla_host_t *vha ) { int status ; int rval ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; struct scsi_qla_host *vp ; unsigned long flags ; int tmp ; raw_spinlock_t *tmp___0 ; struct list_head const *__mptr ; raw_spinlock_t *tmp___1 ; struct list_head const *__mptr___0 ; { ha = vha->hw; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); status = qla2x00_init_rings(vha); if (status == 0) { clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); ha->flags.chip_reset_done = 1U; status = qla2x00_fw_ready(vha); if (status == 0) { qla2x00_marker(vha, req, rsp, 0, 0ULL, 2); vha->flags.online = 1U; set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } if ((vha->device_flags & 2U) != 0U) { status = 0; } else { } } else { } if (status == 0) { clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); tmp = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp == 0) { vha->marker_needed = 1U; } else { } (*((ha->isp_ops)->enable_intrs))(ha); ha->isp_abort_cnt = 0U; clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); status = qla82xx_check_md_needed(vha); if ((unsigned long )ha->fce != (unsigned long )((void *)0)) { ha->flags.fce_enabled = 1U; memset(ha->fce, 0, (size_t )(ha->fce_bufs * 1024U)); rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, (int )((uint16_t )ha->fce_bufs), (uint16_t *)(& ha->fce_mb), & ha->fce_bufs); if (rval != 0) { ql_log(1U, vha, 32769, "Unable to reinitialize FCE (%d).\n", rval); ha->flags.fce_enabled = 0U; } else { } } else { } if ((unsigned long )ha->eft != (unsigned long )((void *)0)) { memset(ha->eft, 0, 65536UL); rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, 4); if (rval != 0) { ql_log(1U, vha, 32784, "Unable to reinitialize EFT (%d).\n", rval); } else { } } else { } } else { } if (status == 0) { ql_dbg(4194304U, vha, 32785, "qla82xx_restart_isp succeeded.\n"); tmp___0 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___0); __mptr = (struct list_head const *)ha->vp_list.next; vp = (struct scsi_qla_host *)__mptr; goto ldv_67199; ldv_67198: ; if ((unsigned int )vp->vp_idx != 0U) { atomic_inc(& vp->vref_count); spin_unlock_irqrestore(& ha->vport_slock, flags); qla2x00_vp_abort_isp(vp); tmp___1 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___1); atomic_dec(& vp->vref_count); } else { } __mptr___0 = (struct list_head const *)vp->list.next; vp = (struct scsi_qla_host *)__mptr___0; ldv_67199: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_67198; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); } else { ql_log(1U, vha, 32790, "qla82xx_restart_isp **** FAILED ****.\n"); } return (status); } } void qla81xx_update_fw_options(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; if (ql2xetsenable == 0) { return; } else { } memset((void *)(& ha->fw_options), 0, 32UL); ha->fw_options[2] = (uint16_t )((unsigned int )ha->fw_options[2] | 512U); qla2x00_set_fw_options(vha, (uint16_t *)(& ha->fw_options)); return; } } static int qla24xx_get_fcp_prio(scsi_qla_host_t *vha , fc_port_t *fcport ) { int i ; int entries ; uint8_t pid_match ; uint8_t wwn_match ; int priority ; uint32_t pid1 ; uint32_t pid2 ; uint64_t wwn1 ; uint64_t wwn2 ; struct qla_fcp_prio_entry *pri_entry ; struct qla_hw_data *ha ; { ha = vha->hw; if ((unsigned long )ha->fcp_prio_cfg == (unsigned long )((struct qla_fcp_prio_cfg *)0) || *((unsigned long *)ha + 2UL) == 0UL) { return (-1); } else { } priority = -1; entries = (int )(ha->fcp_prio_cfg)->num_entries; pri_entry = (struct qla_fcp_prio_entry *)(& (ha->fcp_prio_cfg)->entry); i = 0; goto ldv_67223; ldv_67222: wwn_match = 0U; pid_match = wwn_match; if (((int )pri_entry->flags & 1) == 0) { pri_entry = pri_entry + 1; goto ldv_67220; } else { } if (((int )pri_entry->flags & 4) != 0) { pid1 = pri_entry->src_pid & 16777215U; pid2 = vha->d_id.b24; if (pid1 == 16777215U) { pid_match = (uint8_t )((int )pid_match + 1); } else if (pid1 == pid2) { pid_match = (uint8_t )((int )pid_match + 1); } else { } } else { } if (((int )pri_entry->flags & 8) != 0) { pid1 = pri_entry->dst_pid & 16777215U; pid2 = fcport->d_id.b24; if (pid1 == 16777215U) { pid_match = (uint8_t )((int )pid_match + 1); } else if (pid1 == pid2) { pid_match = (uint8_t )((int )pid_match + 1); } else { } } else { } if (((int )pri_entry->flags & 64) != 0) { wwn1 = wwn_to_u64((u8 *)(& vha->port_name)); wwn2 = wwn_to_u64((u8 *)(& pri_entry->src_wwpn)); if (wwn2 == 0xffffffffffffffffULL) { wwn_match = (uint8_t )((int )wwn_match + 1); } else if (wwn1 == wwn2) { wwn_match = (uint8_t )((int )wwn_match + 1); } else { } } else { } if (((int )pri_entry->flags & 128) != 0) { wwn1 = wwn_to_u64((u8 *)(& fcport->port_name)); wwn2 = wwn_to_u64((u8 *)(& pri_entry->dst_wwpn)); if (wwn2 == 0xffffffffffffffffULL) { wwn_match = (uint8_t )((int )wwn_match + 1); } else if (wwn1 == wwn2) { wwn_match = (uint8_t )((int )wwn_match + 1); } else { } } else { } if ((unsigned int )pid_match == 2U || (unsigned int )wwn_match == 2U) { if (((int )pri_entry->flags & 2) != 0) { priority = (int )pri_entry->tag; } else { } goto ldv_67221; } else { } pri_entry = pri_entry + 1; ldv_67220: i = i + 1; ldv_67223: ; if (i < entries) { goto ldv_67222; } else { } ldv_67221: ; return (priority); } } int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha , fc_port_t *fcport ) { int ret ; int priority ; uint16_t mb[5U] ; { if ((unsigned int )fcport->port_type != 5U || (unsigned int )fcport->loop_id == 4096U) { return (258); } else { } priority = qla24xx_get_fcp_prio(vha, fcport); if (priority < 0) { return (258); } else { } if (((vha->hw)->device_type & 16384U) != 0U || ((vha->hw)->device_type & 262144U) != 0U) { fcport->fcp_prio = (unsigned int )((uint8_t )priority) & 15U; return (0); } else { } ret = qla24xx_set_fcp_prio(vha, (int )fcport->loop_id, (int )((uint16_t )priority), (uint16_t *)(& mb)); if (ret == 0) { if ((int )fcport->fcp_prio != priority) { ql_dbg(8388608U, vha, 28830, "Updated FCP_CMND priority - value=%d loop_id=%d port_id=%02x%02x%02x.\n", priority, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } fcport->fcp_prio = (unsigned int )((uint8_t )priority) & 15U; } else { ql_dbg(8388608U, vha, 28751, "Unable to update FCP_CMND priority - ret=0x%x for loop_id=%d port_id=%02x%02x%02x.\n", ret, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } return (ret); } } int qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha ) { int ret ; fc_port_t *fcport ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { ret = 258; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_67241; ldv_67240: ret = qla24xx_update_fcport_fcp_prio(vha, fcport); __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_67241: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_67240; } else { } return (ret); } } int reg_timer_15(struct timer_list *timer ) { { ldv_timer_list_15 = timer; ldv_timer_state_15 = 1; return (0); } } void activate_pending_timer_15(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_15 == (unsigned long )timer) { if (ldv_timer_state_15 == 2 || pending_flag != 0) { ldv_timer_list_15 = timer; ldv_timer_list_15->data = data; ldv_timer_state_15 = 1; } else { } return; } else { } reg_timer_15(timer); ldv_timer_list_15->data = data; return; } } void choose_timer_15(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_15 = 2; return; } } void disable_suitable_timer_15(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_15) { ldv_timer_state_15 = 0; return; } else { } return; } } bool ldv_queue_work_on_61(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_62(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_63(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_64(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_65(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_66(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } int ldv_del_timer_67(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___4 ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_21(ldv_func_arg1); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern struct pv_irq_ops pv_irq_ops ; __inline static __le16 __cpu_to_le16p(__u16 const *p ) { { return ((__le16 )*p); } } extern size_t strlen(char const * ) ; extern void *kmemdup(void const * , size_t , gfp_t ) ; __inline static unsigned long arch_local_save_flags(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static void arch_local_irq_restore(unsigned long f ) { unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.restore_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (836), "i" (12UL)); ldv_4870: ; goto ldv_4870; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (44UL), [paravirt_opptr] "i" (& pv_irq_ops.restore_fl.func), [paravirt_clobber] "i" (1), "D" (f): "memory", "cc"); return; } } __inline static void arch_local_irq_disable(void) { unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.irq_disable.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (841), "i" (12UL)); ldv_4879: ; goto ldv_4879; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (45UL), [paravirt_opptr] "i" (& pv_irq_ops.irq_disable.func), [paravirt_clobber] "i" (1): "memory", "cc"); return; } } __inline static unsigned long arch_local_irq_save(void) { unsigned long f ; { f = arch_local_save_flags(); arch_local_irq_disable(); return (f); } } __inline static int arch_irqs_disabled_flags(unsigned long flags ) { { return ((flags & 512UL) == 0UL); } } extern void trace_hardirqs_on(void) ; extern void trace_hardirqs_off(void) ; extern unsigned long wait_for_completion_timeout(struct completion * , unsigned long ) ; bool ldv_queue_work_on_79(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_81(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_80(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_83(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_82(struct workqueue_struct *ldv_func_arg1 ) ; void disable_suitable_timer_16(struct timer_list *timer ) ; void activate_pending_timer_16(struct timer_list *timer , unsigned long data , int pending_flag ) ; int reg_timer_16(struct timer_list *timer ) ; void choose_timer_16(struct timer_list *timer ) ; extern void int_to_scsilun(u64 , struct scsi_lun * ) ; int ldv_scsi_add_host_with_dma_84(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; __inline static void fc_vport_set_state(struct fc_vport *vport , enum fc_vport_state new_state ) { { if ((unsigned int )new_state != 0U && (unsigned int )new_state != 4U) { vport->vport_last_state = vport->vport_state; } else { } vport->vport_state = new_state; return; } } int qla2x00_get_thermal_temp(scsi_qla_host_t *vha , uint16_t *temp ) ; int qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha ) ; int qla24xx_control_vp(scsi_qla_host_t *vha , int cmd ) ; int qla24xx_modify_vp_config(scsi_qla_host_t *vha ) ; void qla24xx_report_id_acquisition(scsi_qla_host_t *vha , struct vp_rpt_id_entry_24xx *rptid_entry ) ; int qla2x00_dump_ram(scsi_qla_host_t *vha , dma_addr_t req_dma , uint32_t addr , uint32_t size ) ; int qla2x00_issue_iocb(scsi_qla_host_t *vha , void *buffer , dma_addr_t phys_addr , size_t size ) ; int qla2x00_get_node_name_list(scsi_qla_host_t *vha , void **out_data , int *out_len ) ; int qla2x00_send_sns(scsi_qla_host_t *vha , dma_addr_t sns_phys_address , uint16_t cmd_size , size_t buf_size ) ; int qla2x00_get_fcal_position_map(scsi_qla_host_t *vha , char *pos_map ) ; int qla2x00_get_link_status(scsi_qla_host_t *vha , uint16_t loop_id , struct link_statistics *stats , dma_addr_t stats_dma ) ; int qla24xx_get_isp_stats(scsi_qla_host_t *vha , struct link_statistics *stats , dma_addr_t stats_dma ) ; int qla2x00_system_error(scsi_qla_host_t *vha ) ; int qla2x00_write_serdes_word(scsi_qla_host_t *vha , uint16_t addr , uint16_t data ) ; int qla2x00_read_serdes_word(scsi_qla_host_t *vha , uint16_t addr , uint16_t *data ) ; int qla8044_write_serdes_word(scsi_qla_host_t *vha , uint32_t addr , uint32_t data ) ; int qla8044_read_serdes_word(scsi_qla_host_t *vha , uint32_t addr , uint32_t *data ) ; int qla2x00_read_sfp(scsi_qla_host_t *vha , dma_addr_t sfp_dma , uint8_t *sfp , uint16_t dev , uint16_t off , uint16_t len , uint16_t opt ) ; int qla2x00_write_sfp(scsi_qla_host_t *vha , dma_addr_t sfp_dma , uint8_t *sfp , uint16_t dev , uint16_t off , uint16_t len , uint16_t opt ) ; int qla81xx_fac_do_write_enable(scsi_qla_host_t *vha , int enable ) ; int qla81xx_fac_erase_sector(scsi_qla_host_t *vha , uint32_t start , uint32_t finish ) ; int qla2x00_get_xgmac_stats(scsi_qla_host_t *vha , dma_addr_t stats_dma , uint16_t size_in_bytes , uint16_t *actual_size ) ; int qla2x00_get_dcbx_params(scsi_qla_host_t *vha , dma_addr_t tlv_dma , uint16_t size ) ; int qla81xx_set_port_config(scsi_qla_host_t *vha , uint16_t *mb ) ; int qla2x00_port_logout(scsi_qla_host_t *vha , struct fc_port *fcport ) ; void ql_dump_regs(uint32_t level , scsi_qla_host_t *vha , int32_t id ) ; int qla2x00_loopback_test(scsi_qla_host_t *vha , struct msg_echo_lb *mreq , uint16_t *mresp ) ; int qla2x00_echo_test(scsi_qla_host_t *vha , struct msg_echo_lb *mreq , uint16_t *mresp ) ; void qla82xx_poll(int irq , void *dev_id ) ; int qla82xx_mbx_intr_enable(scsi_qla_host_t *vha ) ; int qla82xx_mbx_intr_disable(scsi_qla_host_t *vha ) ; int qla81xx_set_led_config(scsi_qla_host_t *vha , uint16_t *led_cfg ) ; int qla81xx_get_led_config(scsi_qla_host_t *vha , uint16_t *led_cfg ) ; int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha , int enable ) ; int qla82xx_read_temperature(scsi_qla_host_t *vha ) ; int qla8044_read_temperature(scsi_qla_host_t *vha ) ; int qla84xx_reset_chip(scsi_qla_host_t *vha , uint16_t enable_diagnostic ) ; int qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha , void *buffer , dma_addr_t phys_addr , size_t size , uint32_t tov ) ; int qla2x00_get_idma_speed(scsi_qla_host_t *vha , uint16_t loop_id , uint16_t *port_speed , uint16_t *mb ) ; int qla82xx_md_get_template_size(scsi_qla_host_t *vha ) ; int qla82xx_md_get_template(scsi_qla_host_t *vha ) ; int qla8044_md_get_template(scsi_qla_host_t *vha ) ; void qlt_modify_vp_config(struct scsi_qla_host *vha , struct vp_config_entry_24xx *vpmod ) ; __inline static void qla2x00_poll(struct rsp_que *rsp ) { unsigned long flags ; struct qla_hw_data *ha ; int tmp ; { ha = rsp->hw; flags = arch_local_irq_save(); trace_hardirqs_off(); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { qla82xx_poll(0, (void *)rsp); } else { (*((ha->isp_ops)->intr_handler))(0, (void *)rsp); } tmp = arch_irqs_disabled_flags(flags); if (tmp != 0) { arch_local_irq_restore(flags); trace_hardirqs_off(); } else { trace_hardirqs_on(); arch_local_irq_restore(flags); } return; } } __inline static uint8_t *host_to_fcp_swap(uint8_t *fcp , uint32_t bsize ) { uint32_t *ifcp ; uint32_t *ofcp ; uint32_t iter ; uint32_t *tmp ; uint32_t *tmp___0 ; __u32 tmp___1 ; { ifcp = (uint32_t *)fcp; ofcp = (uint32_t *)fcp; iter = bsize >> 2; goto ldv_65657; ldv_65656: tmp = ofcp; ofcp = ofcp + 1; tmp___0 = ifcp; ifcp = ifcp + 1; tmp___1 = __fswab32(*tmp___0); *tmp = tmp___1; iter = iter - 1U; ldv_65657: ; if (iter != 0U) { goto ldv_65656; } else { } return (fcp); } } static int qla2x00_mailbox_command(scsi_qla_host_t *vha , mbx_cmd_t *mcp ) { int rval ; int i ; unsigned long flags ; device_reg_t *reg ; uint8_t abort_active ; uint8_t io_lock_on ; uint16_t command ; uint16_t *iptr ; uint16_t *optr ; uint32_t cnt ; uint32_t mboxes ; uint16_t *mbx_reg ; unsigned long wait_time ; struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; unsigned long tmp___1 ; raw_spinlock_t *tmp___2 ; unsigned int tmp___3 ; raw_spinlock_t *tmp___4 ; unsigned long tmp___5 ; unsigned int tmp___6 ; uint16_t *iptr2 ; uint16_t mb0 ; uint32_t ictrl ; unsigned short tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; unsigned int tmp___15 ; unsigned int tmp___16 ; unsigned int tmp___17 ; uint16_t *tmp___18 ; unsigned short tmp___19 ; { flags = 0UL; command = 0U; ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; ql_dbg(536870912U, vha, 4096, "Entered %s.\n", "qla2x00_mailbox_command"); if ((ha->pdev)->error_state > 2U) { ql_log(1U, vha, 4097, "error_state is greater than pci_channel_io_frozen, exiting.\n"); return (256); } else { } if ((vha->device_flags & 32U) != 0U) { ql_log(1U, vha, 4098, "Device in failed state, exiting.\n"); return (256); } else { } reg = ha->iobase; io_lock_on = (uint8_t )base_vha->flags.init_done; rval = 0; tmp___0 = constant_test_bit(3L, (unsigned long const volatile *)(& base_vha->dpc_flags)); abort_active = (uint8_t )tmp___0; if (*((unsigned long *)ha + 2UL) != 0UL) { ql_log(1U, vha, 4099, "Perm failure on EEH timeout MBX, exiting.\n"); return (256); } else { } if (((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) && *((unsigned long *)ha + 2UL) != 0UL) { mcp->mb[0] = 16395U; ql_log(1U, vha, 4100, "FW hung = %d.\n", (int )ha->flags.isp82xx_fw_hung); return (256); } else { } tmp___1 = wait_for_completion_timeout(& ha->mbx_cmd_comp, (unsigned long )(mcp->tov * 250U)); if (tmp___1 == 0UL) { ql_log(1U, vha, 4101, "Cmd access timeout, cmd=0x%x, Exiting.\n", (int )mcp->mb[0]); return (256); } else { } ha->flags.mbox_busy = 1U; ha->mcp = mcp; ql_dbg(536870912U, vha, 4102, "Prepare to issue mbox cmd=0x%x.\n", (int )mcp->mb[0]); tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { optr = (uint16_t *)(& reg->isp82.mailbox_in); } else if ((ha->device_type & 134217728U) != 0U && ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U)) { optr = & reg->isp24.mailbox0; } else { optr = (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->isp.u.isp2100.mailbox0 : & reg->isp.u.isp2300.mailbox0; } iptr = (uint16_t *)(& mcp->mb); command = mcp->mb[0]; mboxes = mcp->out_mb; ql_dbg(536870912U, vha, 4369, "Mailbox registers (OUT):\n"); cnt = 0U; goto ldv_65775; ldv_65774: ; if ((ha->device_type & 2U) != 0U && cnt == 8U) { optr = (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->isp.u_end.isp2200.mailbox8 : & reg->isp.u.isp2300.mailbox0 + 8UL; } else { } if ((int )mboxes & 1) { ql_dbg(536870912U, vha, 4370, "mbox[%d]<-0x%04x\n", cnt, (int )*iptr); writew((int )*iptr, (void volatile *)optr); } else { } mboxes = mboxes >> 1; optr = optr + 1; iptr = iptr + 1; cnt = cnt + 1U; ldv_65775: ; if ((uint32_t )ha->mbx_count > cnt) { goto ldv_65774; } else { } ql_dbg(537001984U, vha, 4375, "I/O Address = %p.\n", optr); ha->flags.mbox_int = 0U; clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); ql_dbg(536870912U, vha, 4111, "Going to unlock irq & waiting for interrupts. jiffies=%lx.\n", jiffies); if (((unsigned int )abort_active == 0U && (unsigned int )io_lock_on != 0U) || ((ha->device_type & 8192U) != 0U && *((unsigned long *)ha + 2UL) != 0UL)) { set_bit(2L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { tmp___3 = readl((void const volatile *)(& reg->isp82.hint)); if ((int )tmp___3 & 1) { spin_unlock_irqrestore(& ha->hardware_lock, flags); ha->flags.mbox_busy = 0U; ql_dbg(536870912U, vha, 4112, "Pending mailbox timeout, exiting.\n"); rval = 256; goto premature_exit; } else { } writel(1U, (void volatile *)(& reg->isp82.hint)); } else if ((ha->device_type & 134217728U) != 0U) { writel(1342177280U, (void volatile *)(& reg->isp24.hccr)); } else { writew(20480, (void volatile *)(& reg->isp.hccr)); } spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___5 = wait_for_completion_timeout(& ha->mbx_intr_comp, (unsigned long )(mcp->tov * 250U)); if (tmp___5 == 0UL) { ql_dbg(536870912U, vha, 4474, "cmd=%x Timeout.\n", (int )command); tmp___4 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___4); clear_bit(2L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } } else { ql_dbg(536870912U, vha, 4113, "Cmd=%x Polling Mode.\n", (int )command); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { tmp___6 = readl((void const volatile *)(& reg->isp82.hint)); if ((int )tmp___6 & 1) { spin_unlock_irqrestore(& ha->hardware_lock, flags); ha->flags.mbox_busy = 0U; ql_dbg(536870912U, vha, 4114, "Pending mailbox timeout, exiting.\n"); rval = 256; goto premature_exit; } else { } writel(1U, (void volatile *)(& reg->isp82.hint)); } else if ((ha->device_type & 134217728U) != 0U) { writel(1342177280U, (void volatile *)(& reg->isp24.hccr)); } else { writew(20480, (void volatile *)(& reg->isp.hccr)); } spin_unlock_irqrestore(& ha->hardware_lock, flags); wait_time = (unsigned long )(mcp->tov * 250U) + (unsigned long )jiffies; goto ldv_65789; ldv_65788: ; if ((long )(wait_time - (unsigned long )jiffies) < 0L) { goto ldv_65787; } else { } qla2x00_poll(*(ha->rsp_q_map)); if (*((unsigned long *)ha + 2UL) == 0UL && ((ha->device_type & 2U) == 0U || (unsigned int )command != 11U)) { msleep(10U); } else { } ldv_65789: ; if (*((unsigned long *)ha + 2UL) == 0UL) { goto ldv_65788; } else { } ldv_65787: ql_dbg(536870912U, vha, 4115, "Waited %d sec.\n", (unsigned int )((((unsigned long )(mcp->tov * 250U) - wait_time) + (unsigned long )jiffies) / 250UL)); } if (*((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(536870912U, vha, 4116, "Cmd=%x completed.\n", (int )command); ha->flags.mbox_int = 0U; clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) && *((unsigned long *)ha + 2UL) != 0UL) { ha->flags.mbox_busy = 0U; mcp->mb[0] = 16395U; ha->mcp = (mbx_cmd_t *)0; rval = 258; ql_log(1U, vha, 4117, "FW hung = %d.\n", (int )ha->flags.isp82xx_fw_hung); goto premature_exit; } else { } if ((unsigned int )ha->mailbox_out[0] != 16384U) { rval = 258; } else { } iptr2 = (uint16_t *)(& mcp->mb); iptr = (uint16_t *)(& ha->mailbox_out); mboxes = mcp->in_mb; ql_dbg(536870912U, vha, 4371, "Mailbox registers (IN):\n"); cnt = 0U; goto ldv_65792; ldv_65791: ; if ((int )mboxes & 1) { *iptr2 = *iptr; ql_dbg(536870912U, vha, 4372, "mbox[%d]->0x%04x\n", cnt, (int )*iptr2); } else { } mboxes = mboxes >> 1; iptr2 = iptr2 + 1; iptr = iptr + 1; cnt = cnt + 1U; ldv_65792: ; if ((uint32_t )ha->mbx_count > cnt) { goto ldv_65791; } else { } } else { if ((ha->device_type & 134217728U) != 0U) { mb0 = readw((void const volatile *)(& reg->isp24.mailbox0)); ictrl = readl((void const volatile *)(& reg->isp24.ictrl)); } else { mb0 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->isp.u.isp2100.mailbox0 : & reg->isp.u.isp2300.mailbox0)); tmp___7 = readw((void const volatile *)(& reg->isp.ictrl)); ictrl = (uint32_t )tmp___7; } ql_dbg(537001984U, vha, 4377, "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx mb[0]=0x%x\n", (int )command, ictrl, jiffies, (int )mb0); ql_dump_regs(537001984U, vha, 4121); if ((unsigned int )mcp->mb[0] != 42U) { (*((ha->isp_ops)->fw_dump))(vha, 0); } else { } rval = 256; } ha->flags.mbox_busy = 0U; ha->mcp = (mbx_cmd_t *)0; if (((unsigned int )abort_active != 0U || (unsigned int )io_lock_on == 0U) && ((ha->device_type & 8192U) == 0U || *((unsigned long *)ha + 2UL) == 0UL)) { ql_dbg(536870912U, vha, 4122, "Checking for additional resp interrupt.\n"); qla2x00_poll(*(ha->rsp_q_map)); } else { } if (rval == 256 && (unsigned int )mcp->mb[0] != 42U) { if (((unsigned int )io_lock_on == 0U || ((int )mcp->flags & 4) != 0) || *((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(536870912U, vha, 4123, "Timeout, schedule isp_abort_needed.\n"); tmp___8 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___8 == 0) { tmp___9 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___9 == 0) { tmp___10 = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___10 == 0) { if ((ha->device_type & 16384U) != 0U) { ql_dbg(536870912U, vha, 4394, "disabling pause transmit on port 0 & 1.\n"); qla82xx_wr_32(ha, 106954904UL, 9U); } else { } ql_log(2U, base_vha, 4124, "Mailbox cmd timeout occurred, cmd=0x%x, mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP abort.\n", (int )command, (int )mcp->mb[0], (int )ha->flags.eeh_busy); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else { } } else { } } else { } } else if ((unsigned int )abort_active == 0U) { ql_dbg(536870912U, vha, 4125, "Timeout, calling abort_isp.\n"); tmp___12 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___12 == 0) { tmp___13 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___13 == 0) { tmp___14 = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___14 == 0) { if ((ha->device_type & 16384U) != 0U) { ql_dbg(536870912U, vha, 4395, "disabling pause transmit on port 0 & 1.\n"); qla82xx_wr_32(ha, 106954904UL, 9U); } else { } ql_log(2U, base_vha, 4126, "Mailbox cmd timeout occurred, cmd=0x%x, mb[0]=0x%x. Scheduling ISP abort ", (int )command, (int )mcp->mb[0]); set_bit(3L, (unsigned long volatile *)(& vha->dpc_flags)); clear_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); complete(& ha->mbx_cmd_comp); tmp___11 = (*((ha->isp_ops)->abort_isp))(vha); if (tmp___11 != 0) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } clear_bit(3L, (unsigned long volatile *)(& vha->dpc_flags)); ql_dbg(536870912U, vha, 4127, "Finished abort_isp.\n"); goto mbx_done; } else { } } else { } } else { } } else { } } else { } premature_exit: complete(& ha->mbx_cmd_comp); mbx_done: ; if (rval != 0) { ql_dbg(268435456U, base_vha, 4128, "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2], (int )mcp->mb[3], (int )command); tmp___15 = readl((void const volatile *)(& reg->isp24.istatus)); tmp___16 = readl((void const volatile *)(& reg->isp24.ictrl)); tmp___17 = readl((void const volatile *)(& reg->isp24.host_status)); ql_dbg(268435456U, vha, 4373, "host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n", tmp___17, ha->fw_dump_cap_flags, tmp___16, tmp___15); mbx_reg = & reg->isp24.mailbox0; i = 0; goto ldv_65798; ldv_65797: tmp___18 = mbx_reg; mbx_reg = mbx_reg + 1; tmp___19 = readw((void const volatile *)tmp___18); ql_dbg(268468224U, vha, 4374, "mbox[%d] 0x%04x\n", i, (int )tmp___19); i = i + 1; ldv_65798: ; if (i <= 5) { goto ldv_65797; } else { } } else { ql_dbg(536870912U, base_vha, 4129, "Done %s.\n", "qla2x00_mailbox_command"); } return (rval); } } int qla2x00_load_ram(scsi_qla_host_t *vha , dma_addr_t req_dma , uint32_t risc_addr , uint32_t risc_code_size ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; ql_dbg(536903680U, vha, 4130, "Entered %s.\n", "qla2x00_load_ram"); if ((unsigned int )((unsigned short )(risc_addr >> 16)) != 0U || (ha->device_type & 134217728U) != 0U) { mcp->mb[0] = 11U; mcp->mb[8] = (unsigned short )(risc_addr >> 16); mcp->out_mb = 257U; } else { mcp->mb[0] = 9U; mcp->out_mb = 1U; } mcp->mb[1] = (unsigned short )risc_addr; mcp->mb[2] = (unsigned short )((unsigned int )req_dma >> 16); mcp->mb[3] = (unsigned short )req_dma; mcp->mb[6] = (unsigned short )((unsigned int )(req_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(req_dma >> 32ULL); mcp->out_mb = mcp->out_mb | 206U; if ((ha->device_type & 134217728U) != 0U) { mcp->mb[4] = (unsigned short )(risc_code_size >> 16); mcp->mb[5] = (unsigned short )risc_code_size; mcp->out_mb = mcp->out_mb | 48U; } else { mcp->mb[4] = (unsigned short )risc_code_size; mcp->out_mb = mcp->out_mb | 16U; } mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4131, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4132, "Done %s.\n", "qla2x00_load_ram"); } return (rval); } } int qla2x00_execute_fw(scsi_qla_host_t *vha , uint32_t risc_addr ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct nvram_81xx *nv ; { ha = vha->hw; mcp = & mc; ql_dbg(536903680U, vha, 4133, "Entered %s.\n", "qla2x00_execute_fw"); mcp->mb[0] = 2U; mcp->out_mb = 1U; mcp->in_mb = 1U; if ((ha->device_type & 134217728U) != 0U) { mcp->mb[1] = (unsigned short )(risc_addr >> 16); mcp->mb[2] = (unsigned short )risc_addr; mcp->mb[3] = 0U; if ((((ha->device_type & 2048U) != 0U || (ha->device_type & 8192U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { nv = (struct nvram_81xx *)ha->nvram; mcp->mb[4] = (unsigned int )((uint16_t )nv->enhanced_features) & 1U; } else { mcp->mb[4] = 0U; } mcp->out_mb = mcp->out_mb | 30U; mcp->in_mb = mcp->in_mb | 2U; } else { mcp->mb[1] = (unsigned short )risc_addr; mcp->out_mb = mcp->out_mb | 2U; if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { mcp->mb[2] = 0U; mcp->out_mb = mcp->out_mb | 4U; } else { } } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4134, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else if ((ha->device_type & 134217728U) != 0U) { ql_dbg(536903680U, vha, 4135, "Done exchanges=%x.\n", (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4136, "Done %s.\n", "qla2x00_execute_fw"); } return (rval); } } int qla2x00_get_fw_version(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4137, "Entered %s.\n", "qla2x00_get_fw_version"); mcp->mb[0] = 8U; mcp->out_mb = 1U; mcp->in_mb = 127U; if ((((vha->hw)->device_type & 8192U) != 0U || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { mcp->in_mb = mcp->in_mb | 16128U; } else { } if ((ha->device_type & 134217728U) != 0U) { mcp->in_mb = mcp->in_mb | 229376U; } else { } if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { mcp->in_mb = mcp->in_mb | 3932160U; } else { } mcp->flags = 0U; mcp->tov = 30U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { goto failed; } else { } ha->fw_major_version = mcp->mb[1]; ha->fw_minor_version = mcp->mb[2]; ha->fw_subminor_version = mcp->mb[3]; ha->fw_attributes = mcp->mb[6]; if ((int )(vha->hw)->device_type & 1 || ((vha->hw)->device_type & 2U) != 0U) { ha->fw_memory_size = 131071U; } else { ha->fw_memory_size = (uint32_t )(((int )mcp->mb[5] << 16) | (int )mcp->mb[4]); } if ((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { ha->mpi_version[0] = (uint8_t )mcp->mb[10]; ha->mpi_version[1] = (uint8_t )((int )mcp->mb[11] >> 8); ha->mpi_version[2] = (uint8_t )mcp->mb[11]; ha->mpi_capabilities = (uint32_t )(((int )mcp->mb[12] << 16) | (int )mcp->mb[13]); ha->phy_version[0] = (uint8_t )mcp->mb[8]; ha->phy_version[1] = (uint8_t )((int )mcp->mb[9] >> 8); ha->phy_version[2] = (uint8_t )mcp->mb[9]; } else { } if ((ha->device_type & 134217728U) != 0U) { ha->fw_attributes_h = mcp->mb[15]; ha->fw_attributes_ext[0] = mcp->mb[16]; ha->fw_attributes_ext[1] = mcp->mb[17]; ql_dbg(536903680U, vha, 4409, "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", "qla2x00_get_fw_version", (int )mcp->mb[15], (int )mcp->mb[6]); ql_dbg(536903680U, vha, 4399, "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", "qla2x00_get_fw_version", (int )mcp->mb[17], (int )mcp->mb[16]); } else { } if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { ha->fw_shared_ram_start = (uint32_t )(((int )mcp->mb[19] << 16) | (int )mcp->mb[18]); ha->fw_shared_ram_end = (uint32_t )(((int )mcp->mb[21] << 16) | (int )mcp->mb[20]); } else { } failed: ; if (rval != 0) { ql_dbg(536870912U, vha, 4138, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4139, "Done %s.\n", "qla2x00_get_fw_version"); } return (rval); } } int qla2x00_get_fw_options(scsi_qla_host_t *vha , uint16_t *fwopts ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4140, "Entered %s.\n", "qla2x00_get_fw_options"); mcp->mb[0] = 40U; mcp->out_mb = 1U; mcp->in_mb = 15U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4141, "Failed=%x.\n", rval); } else { *fwopts = mcp->mb[0]; *(fwopts + 1UL) = mcp->mb[1]; *(fwopts + 2UL) = mcp->mb[2]; *(fwopts + 3UL) = mcp->mb[3]; ql_dbg(536903680U, vha, 4142, "Done %s.\n", "qla2x00_get_fw_options"); } return (rval); } } int qla2x00_set_fw_options(scsi_qla_host_t *vha , uint16_t *fwopts ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4143, "Entered %s.\n", "qla2x00_set_fw_options"); mcp->mb[0] = 56U; mcp->mb[1] = *(fwopts + 1UL); mcp->mb[2] = *(fwopts + 2UL); mcp->mb[3] = *(fwopts + 3UL); mcp->out_mb = 15U; mcp->in_mb = 1U; if (((vha->hw)->device_type & 134217728U) != 0U) { mcp->in_mb = mcp->in_mb | 2U; } else { mcp->mb[10] = *(fwopts + 10UL); mcp->mb[11] = *(fwopts + 11UL); mcp->mb[12] = 0U; mcp->out_mb = mcp->out_mb | 7168U; } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); *fwopts = mcp->mb[0]; if (rval != 0) { ql_dbg(536870912U, vha, 4144, "Failed=%x (%x/%x).\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4145, "Done %s.\n", "qla2x00_set_fw_options"); } return (rval); } } int qla2x00_mbx_reg_test(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4146, "Entered %s.\n", "qla2x00_mbx_reg_test"); mcp->mb[0] = 6U; mcp->mb[1] = 43690U; mcp->mb[2] = 21845U; mcp->mb[3] = 43605U; mcp->mb[4] = 21930U; mcp->mb[5] = 42405U; mcp->mb[6] = 23130U; mcp->mb[7] = 9509U; mcp->out_mb = 255U; mcp->in_mb = 255U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval == 0) { if ((((unsigned int )mcp->mb[1] != 43690U || (unsigned int )mcp->mb[2] != 21845U) || (unsigned int )mcp->mb[3] != 43605U) || (unsigned int )mcp->mb[4] != 21930U) { rval = 258; } else { } if (((unsigned int )mcp->mb[5] != 42405U || (unsigned int )mcp->mb[6] != 23130U) || (unsigned int )mcp->mb[7] != 9509U) { rval = 258; } else { } } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4147, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4148, "Done %s.\n", "qla2x00_mbx_reg_test"); } return (rval); } } int qla2x00_verify_checksum(scsi_qla_host_t *vha , uint32_t risc_addr ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4149, "Entered %s.\n", "qla2x00_verify_checksum"); mcp->mb[0] = 7U; mcp->out_mb = 1U; mcp->in_mb = 1U; if (((vha->hw)->device_type & 134217728U) != 0U) { mcp->mb[1] = (unsigned short )(risc_addr >> 16); mcp->mb[2] = (unsigned short )risc_addr; mcp->out_mb = mcp->out_mb | 6U; mcp->in_mb = mcp->in_mb | 6U; } else { mcp->mb[1] = (unsigned short )risc_addr; mcp->out_mb = mcp->out_mb | 2U; mcp->in_mb = mcp->in_mb | 2U; } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4150, "Failed=%x chm sum=%x.\n", rval, ((vha->hw)->device_type & 134217728U) != 0U ? ((int )mcp->mb[2] << 16) | (int )mcp->mb[1] : (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4151, "Done %s.\n", "qla2x00_verify_checksum"); } return (rval); } } int qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha , void *buffer , dma_addr_t phys_addr , size_t size , uint32_t tov ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; sts_entry_t *sts_entry ; { mcp = & mc; ql_dbg(536903680U, vha, 4152, "Entered %s.\n", "qla2x00_issue_iocb_timeout"); mcp->mb[0] = 84U; mcp->mb[1] = 0U; mcp->mb[2] = (unsigned short )((unsigned int )phys_addr >> 16); mcp->mb[3] = (unsigned short )phys_addr; mcp->mb[6] = (unsigned short )((unsigned int )(phys_addr >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(phys_addr >> 32ULL); mcp->out_mb = 207U; mcp->in_mb = 5U; mcp->tov = tov; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4153, "Failed=%x.\n", rval); } else { sts_entry = (sts_entry_t *)buffer; sts_entry->entry_status = (uint8_t )((int )((signed char )sts_entry->entry_status) & (((vha->hw)->device_type & 134217728U) != 0U ? 60 : 126)); ql_dbg(536903680U, vha, 4154, "Done %s.\n", "qla2x00_issue_iocb_timeout"); } return (rval); } } int qla2x00_issue_iocb(scsi_qla_host_t *vha , void *buffer , dma_addr_t phys_addr , size_t size ) { int tmp ; { tmp = qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 30U); return (tmp); } } int qla2x00_abort_command(srb_t *sp ) { unsigned long flags ; int rval ; uint32_t handle ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; fc_port_t *fcport ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct req_que *req ; struct scsi_cmnd *cmd ; raw_spinlock_t *tmp ; { flags = 0UL; handle = 0U; mcp = & mc; fcport = sp->fcport; vha = fcport->vha; ha = vha->hw; req = vha->req; cmd = sp->u.scmd.cmd; ql_dbg(536903680U, vha, 4155, "Entered %s.\n", "qla2x00_abort_command"); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); handle = 1U; goto ldv_65898; ldv_65897: ; if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )sp) { goto ldv_65896; } else { } handle = handle + 1U; ldv_65898: ; if ((uint32_t )req->num_outstanding_cmds > handle) { goto ldv_65897; } else { } ldv_65896: spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((uint32_t )req->num_outstanding_cmds == handle) { return (258); } else { } mcp->mb[0] = 21U; if ((int )ha->device_type < 0) { mcp->mb[1] = fcport->loop_id; } else { mcp->mb[1] = (int )fcport->loop_id << 8U; } mcp->mb[2] = (unsigned short )handle; mcp->mb[3] = (unsigned short )(handle >> 16); mcp->mb[6] = (unsigned short )(cmd->device)->lun; mcp->out_mb = 79U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4156, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4157, "Done %s.\n", "qla2x00_abort_command"); } return (rval); } } int qla2x00_abort_target(struct fc_port *fcport , uint64_t l , int tag ) { int rval ; int rval2 ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; scsi_qla_host_t *vha ; struct req_que *req ; struct rsp_que *rsp ; { mcp = & mc; l = l; vha = fcport->vha; ql_dbg(536903680U, vha, 4158, "Entered %s.\n", "qla2x00_abort_target"); req = *((vha->hw)->req_q_map); rsp = req->rsp; mcp->mb[0] = 23U; mcp->out_mb = 519U; if ((int )(vha->hw)->device_type < 0) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = 0U; mcp->out_mb = mcp->out_mb | 1024U; } else { mcp->mb[1] = (int )fcport->loop_id << 8U; } mcp->mb[2] = (vha->hw)->loop_reset_delay; mcp->mb[9] = vha->vp_idx; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536903680U, vha, 4159, "Failed=%x.\n", rval); } else { } rval2 = qla2x00_marker(vha, req, rsp, (int )fcport->loop_id, 0ULL, 1); if (rval2 != 0) { ql_dbg(536870912U, vha, 4160, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(536903680U, vha, 4161, "Done %s.\n", "qla2x00_abort_target"); } return (rval); } } int qla2x00_lun_reset(struct fc_port *fcport , uint64_t l , int tag ) { int rval ; int rval2 ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; scsi_qla_host_t *vha ; struct req_que *req ; struct rsp_que *rsp ; { mcp = & mc; vha = fcport->vha; ql_dbg(536903680U, vha, 4162, "Entered %s.\n", "qla2x00_lun_reset"); req = *((vha->hw)->req_q_map); rsp = req->rsp; mcp->mb[0] = 126U; mcp->out_mb = 527U; if ((int )(vha->hw)->device_type < 0) { mcp->mb[1] = fcport->loop_id; } else { mcp->mb[1] = (int )fcport->loop_id << 8U; } mcp->mb[2] = (uint16_t )l; mcp->mb[3] = 0U; mcp->mb[9] = vha->vp_idx; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4163, "Failed=%x.\n", rval); } else { } rval2 = qla2x00_marker(vha, req, rsp, (int )fcport->loop_id, l, 0); if (rval2 != 0) { ql_dbg(536870912U, vha, 4164, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(536903680U, vha, 4165, "Done %s.\n", "qla2x00_lun_reset"); } return (rval); } } int qla2x00_get_adapter_id(scsi_qla_host_t *vha , uint16_t *id , uint8_t *al_pa , uint8_t *area , uint8_t *domain , uint16_t *top , uint16_t *sw_cap ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; u64 tmp ; { mcp = & mc; ql_dbg(536903680U, vha, 4166, "Entered %s.\n", "qla2x00_get_adapter_id"); mcp->mb[0] = 32U; mcp->mb[9] = vha->vp_idx; mcp->out_mb = 513U; mcp->in_mb = 719U; if (((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 16384U) != 0U) || ((vha->hw)->device_type & 65536U) != 0U) || ((vha->hw)->device_type & 262144U) != 0U) { mcp->in_mb = mcp->in_mb | 15360U; } else { } if (((vha->hw)->device_type & 134217728U) != 0U) { mcp->in_mb = mcp->in_mb | 983040U; } else { } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((unsigned int )mcp->mb[0] == 16389U) { rval = 5; } else if ((unsigned int )mcp->mb[0] == 16385U) { rval = 1; } else { } *id = mcp->mb[1]; *al_pa = (unsigned char )mcp->mb[2]; *area = (unsigned char )((int )mcp->mb[2] >> 8); *domain = (unsigned char )mcp->mb[3]; *top = mcp->mb[6]; *sw_cap = mcp->mb[7]; if (rval != 0) { ql_dbg(536870912U, vha, 4167, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4168, "Done %s.\n", "qla2x00_get_adapter_id"); if (((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 16384U) != 0U) || ((vha->hw)->device_type & 65536U) != 0U) || ((vha->hw)->device_type & 262144U) != 0U) { vha->fcoe_vlan_id = (unsigned int )mcp->mb[9] & 4095U; vha->fcoe_fcf_idx = mcp->mb[10]; vha->fcoe_vn_port_mac[5] = (uint8_t )((int )mcp->mb[11] >> 8); vha->fcoe_vn_port_mac[4] = (uint8_t )mcp->mb[11]; vha->fcoe_vn_port_mac[3] = (uint8_t )((int )mcp->mb[12] >> 8); vha->fcoe_vn_port_mac[2] = (uint8_t )mcp->mb[12]; vha->fcoe_vn_port_mac[1] = (uint8_t )((int )mcp->mb[13] >> 8); vha->fcoe_vn_port_mac[0] = (uint8_t )mcp->mb[13]; } else { } if (((int )mcp->mb[7] & 16384) != 0) { vha->port_name[0] = (unsigned char )((int )mcp->mb[16] >> 8); vha->port_name[1] = (unsigned char )mcp->mb[16]; vha->port_name[2] = (unsigned char )((int )mcp->mb[17] >> 8); vha->port_name[3] = (unsigned char )mcp->mb[17]; vha->port_name[4] = (unsigned char )((int )mcp->mb[18] >> 8); vha->port_name[5] = (unsigned char )mcp->mb[18]; vha->port_name[6] = (unsigned char )((int )mcp->mb[19] >> 8); vha->port_name[7] = (unsigned char )mcp->mb[19]; ((struct fc_host_attrs *)(vha->host)->shost_data)->port_name = wwn_to_u64((u8 *)(& vha->port_name)); tmp = wwn_to_u64((u8 *)(& vha->port_name)); ql_dbg(536870912U, vha, 4298, "FA-WWN acquired %016llx\n", tmp); } else { } } return (rval); } } int qla2x00_get_retry_cnt(scsi_qla_host_t *vha , uint8_t *retry_cnt , uint8_t *tov , uint16_t *r_a_tov ) { int rval ; uint16_t ratov ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4169, "Entered %s.\n", "qla2x00_get_retry_cnt"); mcp->mb[0] = 34U; mcp->out_mb = 1U; mcp->in_mb = 15U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4170, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { *r_a_tov = (uint16_t )((unsigned int )mcp->mb[3] / 2U); ratov = (uint16_t )((unsigned int )mcp->mb[3] / 20U); if ((int )mcp->mb[1] * (int )ratov > (int )*retry_cnt * (int )*tov) { *retry_cnt = (unsigned char )mcp->mb[1]; *tov = (uint8_t )ratov; } else { } ql_dbg(536903680U, vha, 4171, "Done %s mb3=%d ratov=%d.\n", "qla2x00_get_retry_cnt", (int )mcp->mb[3], (int )ratov); } return (rval); } } int qla2x00_init_firmware(scsi_qla_host_t *vha , uint16_t size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4172, "Entered %s.\n", "qla2x00_init_firmware"); if (((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) && ql2xdbwr != 0) { qla82xx_wr_32(ha, ha->nxdb_wr_ptr, (u32 )(((int )ha->portnum << 5) | 4)); } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { mcp->mb[0] = 72U; } else { mcp->mb[0] = 96U; } mcp->mb[1] = 0U; mcp->mb[2] = (unsigned short )((unsigned int )ha->init_cb_dma >> 16); mcp->mb[3] = (unsigned short )ha->init_cb_dma; mcp->mb[6] = (unsigned short )((unsigned int )(ha->init_cb_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(ha->init_cb_dma >> 32ULL); mcp->out_mb = 207U; if ((unsigned long )ha->ex_init_cb != (unsigned long )((struct ex_init_cb_81xx *)0) && (unsigned int )(ha->ex_init_cb)->ex_version != 0U) { mcp->mb[1] = 1U; mcp->mb[10] = (unsigned short )((unsigned int )ha->ex_init_cb_dma >> 16); mcp->mb[11] = (unsigned short )ha->ex_init_cb_dma; mcp->mb[12] = (unsigned short )((unsigned int )(ha->ex_init_cb_dma >> 32ULL) >> 16); mcp->mb[13] = (unsigned short )(ha->ex_init_cb_dma >> 32ULL); mcp->mb[14] = 64U; mcp->out_mb = mcp->out_mb | 31744U; } else { } mcp->in_mb = 7U; if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { mcp->in_mb = mcp->in_mb | 8U; } else { } mcp->buf_size = (long )size; mcp->flags = 2U; mcp->tov = 30U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4173, "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2], (int )mcp->mb[3]); } else { ql_dbg(536903680U, vha, 4174, "Done %s.\n", "qla2x00_init_firmware"); } return (rval); } } int qla2x00_get_node_name_list(scsi_qla_host_t *vha , void **out_data , int *out_len ) { struct qla_hw_data *ha ; struct qla_port_24xx_data *list ; void *pmap ; mbx_cmd_t mc ; dma_addr_t pmap_dma ; ulong dma_size ; int rval ; int left ; void *tmp ; { ha = vha->hw; list = (struct qla_port_24xx_data *)0; left = 1; goto ldv_65976; ldv_65975: dma_size = (unsigned long )left * 12UL; pmap = dma_alloc_attrs(& (ha->pdev)->dev, dma_size, & pmap_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )pmap == (unsigned long )((void *)0)) { ql_log(1U, vha, 4415, "%s(%ld): DMA Alloc failed of %ld\n", "qla2x00_get_node_name_list", vha->host_no, dma_size); rval = 259; goto out; } else { } mc.mb[0] = 117U; mc.mb[1] = 10U; mc.mb[2] = (unsigned short )((unsigned int )pmap_dma >> 16); mc.mb[3] = (unsigned short )pmap_dma; mc.mb[6] = (unsigned short )((unsigned int )(pmap_dma >> 32ULL) >> 16); mc.mb[7] = (unsigned short )(pmap_dma >> 32ULL); mc.mb[8] = (uint16_t )dma_size; mc.out_mb = 463U; mc.in_mb = 3U; mc.tov = 30U; mc.flags = 1U; rval = qla2x00_mailbox_command(vha, & mc); if (rval != 0) { if ((unsigned int )mc.mb[0] == 16389U && (unsigned int )mc.mb[1] == 10U) { left = (int )((unsigned int )mc.mb[2] / 12U + (unsigned int )left); goto restart; } else { } goto out_free; } else { } left = 0; tmp = kmemdup((void const *)pmap, dma_size, 208U); list = (struct qla_port_24xx_data *)tmp; if ((unsigned long )list == (unsigned long )((struct qla_port_24xx_data *)0)) { ql_log(1U, vha, 4416, "%s(%ld): failed to allocate node names list structure.\n", "qla2x00_get_node_name_list", vha->host_no); rval = 259; goto out_free; } else { } restart: dma_free_attrs(& (ha->pdev)->dev, dma_size, pmap, pmap_dma, (struct dma_attrs *)0); ldv_65976: ; if (left > 0) { goto ldv_65975; } else { } *out_data = (void *)list; *out_len = (int )dma_size; out: ; return (rval); out_free: dma_free_attrs(& (ha->pdev)->dev, dma_size, pmap, pmap_dma, (struct dma_attrs *)0); return (rval); } } int qla2x00_get_port_database(scsi_qla_host_t *vha , fc_port_t *fcport , uint8_t opt ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; port_database_t *pd ; struct port_database_24xx *pd24 ; dma_addr_t pd_dma ; struct qla_hw_data *ha ; void *tmp ; int _max1 ; int _max2 ; uint64_t zero ; int tmp___0 ; int tmp___1 ; uint64_t zero___0 ; int tmp___2 ; int tmp___3 ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4175, "Entered %s.\n", "qla2x00_get_port_database"); pd24 = (struct port_database_24xx *)0; tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & pd_dma); pd = (port_database_t *)tmp; if ((unsigned long )pd == (unsigned long )((port_database_t *)0)) { ql_log(1U, vha, 4176, "Failed to allocate port database structure.\n"); return (259); } else { } _max1 = 128; _max2 = 64; memset((void *)pd, 0, (size_t )(_max1 > _max2 ? _max1 : _max2)); mcp->mb[0] = 100U; if ((unsigned int )opt != 0U && (ha->device_type & 134217728U) == 0U) { mcp->mb[0] = 71U; } else { } mcp->mb[2] = (unsigned short )((unsigned int )pd_dma >> 16); mcp->mb[3] = (unsigned short )pd_dma; mcp->mb[6] = (unsigned short )((unsigned int )(pd_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(pd_dma >> 32ULL); mcp->mb[9] = vha->vp_idx; mcp->out_mb = 717U; mcp->in_mb = 1U; if ((ha->device_type & 134217728U) != 0U) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = (uint16_t )opt; mcp->out_mb = mcp->out_mb | 1026U; mcp->in_mb = mcp->in_mb | 2U; } else if ((int )ha->device_type < 0) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = (uint16_t )opt; mcp->out_mb = mcp->out_mb | 1026U; } else { mcp->mb[1] = (uint16_t )((int )((short )((int )fcport->loop_id << 8)) | (int )((short )opt)); mcp->out_mb = mcp->out_mb | 2U; } mcp->buf_size = (ha->device_type & 134217728U) != 0U ? 64L : 128L; mcp->flags = 1U; mcp->tov = (uint32_t )((int )ha->login_timeout * 2 + (int )((unsigned int )ha->login_timeout / 2U)); rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { goto gpd_error_out; } else { } if ((ha->device_type & 134217728U) != 0U) { zero = 0ULL; pd24 = (struct port_database_24xx *)pd; if ((unsigned int )pd24->current_login_state != 6U && (unsigned int )pd24->last_login_state != 6U) { ql_dbg(536870912U, vha, 4177, "Unable to verify login-state (%x/%x) for loop_id %x.\n", (int )pd24->current_login_state, (int )pd24->last_login_state, (int )fcport->loop_id); rval = 258; goto gpd_error_out; } else { } if ((unsigned int )fcport->loop_id == 4096U) { rval = 10; goto gpd_error_out; } else { tmp___0 = memcmp((void const *)(& fcport->port_name), (void const *)(& zero), 8UL); if (tmp___0 != 0) { tmp___1 = memcmp((void const *)(& fcport->port_name), (void const *)(& pd24->port_name), 8UL); if (tmp___1 != 0) { rval = 10; goto gpd_error_out; } else { } } else { } } memcpy((void *)(& fcport->node_name), (void const *)(& pd24->node_name), 8UL); memcpy((void *)(& fcport->port_name), (void const *)(& pd24->port_name), 8UL); fcport->d_id.b.domain = pd24->port_id[0]; fcport->d_id.b.area = pd24->port_id[1]; fcport->d_id.b.al_pa = pd24->port_id[2]; fcport->d_id.b.rsvd_1 = 0U; if (((int )pd24->prli_svc_param_word_3[0] & 16) == 0) { fcport->port_type = 4; } else { fcport->port_type = 5; } fcport->supported_classes = ((int )pd24->flags & 16) != 0 ? 4U : 8U; if ((int )((signed char )pd24->prli_svc_param_word_3[0]) < 0) { fcport->flags = fcport->flags | 16U; } else { } } else { zero___0 = 0ULL; if ((unsigned int )pd->master_state != 6U && (unsigned int )pd->slave_state != 6U) { ql_dbg(536870912U, vha, 4106, "Unable to verify login-state (%x/%x) - portid=%02x%02x%02x.\n", (int )pd->master_state, (int )pd->slave_state, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); rval = 258; goto gpd_error_out; } else { } if ((unsigned int )fcport->loop_id == 4096U) { rval = 10; goto gpd_error_out; } else { tmp___2 = memcmp((void const *)(& fcport->port_name), (void const *)(& zero___0), 8UL); if (tmp___2 != 0) { tmp___3 = memcmp((void const *)(& fcport->port_name), (void const *)(& pd->port_name), 8UL); if (tmp___3 != 0) { rval = 10; goto gpd_error_out; } else { } } else { } } memcpy((void *)(& fcport->node_name), (void const *)(& pd->node_name), 8UL); memcpy((void *)(& fcport->port_name), (void const *)(& pd->port_name), 8UL); fcport->d_id.b.domain = pd->port_id[0]; fcport->d_id.b.area = pd->port_id[3]; fcport->d_id.b.al_pa = pd->port_id[2]; fcport->d_id.b.rsvd_1 = 0U; if (((int )pd->prli_svc_param_word_3[0] & 16) == 0) { fcport->port_type = 4; } else { fcport->port_type = 5; } fcport->supported_classes = ((int )pd->options & 16) != 0 ? 4U : 8U; } gpd_error_out: dma_pool_free(ha->s_dma_pool, (void *)pd, pd_dma); if (rval != 0) { ql_dbg(536870912U, vha, 4178, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4179, "Done %s.\n", "qla2x00_get_port_database"); } return (rval); } } int qla2x00_get_firmware_state(scsi_qla_host_t *vha , uint16_t *states ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4180, "Entered %s.\n", "qla2x00_get_firmware_state"); mcp->mb[0] = 105U; mcp->out_mb = 1U; if (((vha->hw)->device_type & 134217728U) != 0U) { mcp->in_mb = 127U; } else { mcp->in_mb = 3U; } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); *states = mcp->mb[1]; if (((vha->hw)->device_type & 134217728U) != 0U) { *(states + 1UL) = mcp->mb[2]; *(states + 2UL) = mcp->mb[3]; *(states + 3UL) = mcp->mb[4]; *(states + 4UL) = mcp->mb[5]; *(states + 5UL) = mcp->mb[6]; } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4181, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4182, "Done %s.\n", "qla2x00_get_firmware_state"); } return (rval); } } int qla2x00_get_port_name(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t *name , uint8_t opt ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4183, "Entered %s.\n", "qla2x00_get_port_name"); mcp->mb[0] = 106U; mcp->mb[9] = vha->vp_idx; mcp->out_mb = 515U; if ((int )(vha->hw)->device_type < 0) { mcp->mb[1] = loop_id; mcp->mb[10] = (uint16_t )opt; mcp->out_mb = mcp->out_mb | 1024U; } else { mcp->mb[1] = (uint16_t )((int )((short )((int )loop_id << 8)) | (int )((short )opt)); } mcp->in_mb = 207U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4184, "Failed=%x.\n", rval); } else { if ((unsigned long )name != (unsigned long )((uint8_t *)0U)) { *name = (unsigned char )((int )mcp->mb[2] >> 8); *(name + 1UL) = (unsigned char )mcp->mb[2]; *(name + 2UL) = (unsigned char )((int )mcp->mb[3] >> 8); *(name + 3UL) = (unsigned char )mcp->mb[3]; *(name + 4UL) = (unsigned char )((int )mcp->mb[6] >> 8); *(name + 5UL) = (unsigned char )mcp->mb[6]; *(name + 6UL) = (unsigned char )((int )mcp->mb[7] >> 8); *(name + 7UL) = (unsigned char )mcp->mb[7]; } else { } ql_dbg(536903680U, vha, 4185, "Done %s.\n", "qla2x00_get_port_name"); } return (rval); } } int qla24xx_link_initialize(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4434, "Entered %s.\n", "qla24xx_link_initialize"); if (((vha->hw)->device_type & 134217728U) == 0U || (((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 16384U) != 0U) || ((vha->hw)->device_type & 65536U) != 0U) || ((vha->hw)->device_type & 262144U) != 0U)) { return (258); } else { } mcp->mb[0] = 114U; mcp->mb[1] = 16U; if ((unsigned int )(vha->hw)->operating_mode == 0U) { mcp->mb[1] = (uint16_t )((unsigned int )mcp->mb[1] | 64U); } else { mcp->mb[1] = (uint16_t )((unsigned int )mcp->mb[1] | 32U); } mcp->mb[2] = 0U; mcp->mb[3] = 0U; mcp->out_mb = 15U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4435, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4436, "Done %s.\n", "qla24xx_link_initialize"); } return (rval); } } int qla2x00_lip_reset(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4186, "Entered %s.\n", "qla2x00_lip_reset"); if (((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 16384U) != 0U) || ((vha->hw)->device_type & 65536U) != 0U) || ((vha->hw)->device_type & 262144U) != 0U) { mcp->mb[0] = 114U; mcp->mb[1] = 2U; mcp->mb[2] = 0U; mcp->out_mb = 7U; } else if (((vha->hw)->device_type & 134217728U) != 0U) { mcp->mb[0] = 114U; mcp->mb[1] = 64U; mcp->mb[2] = 0U; mcp->mb[3] = (vha->hw)->loop_reset_delay; mcp->out_mb = 15U; } else { mcp->mb[0] = 108U; mcp->out_mb = 15U; if ((int )(vha->hw)->device_type < 0) { mcp->mb[1] = 255U; mcp->mb[10] = 0U; mcp->out_mb = mcp->out_mb | 1024U; } else { mcp->mb[1] = 65280U; } mcp->mb[2] = (vha->hw)->loop_reset_delay; mcp->mb[3] = 0U; } mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4187, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4188, "Done %s.\n", "qla2x00_lip_reset"); } return (rval); } } int qla2x00_send_sns(scsi_qla_host_t *vha , dma_addr_t sns_phys_address , uint16_t cmd_size , size_t buf_size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4189, "Entered %s.\n", "qla2x00_send_sns"); ql_dbg(536903680U, vha, 4190, "Retry cnt=%d ratov=%d total tov=%d.\n", (int )(vha->hw)->retry_count, (int )(vha->hw)->login_timeout, mcp->tov); mcp->mb[0] = 110U; mcp->mb[1] = cmd_size; mcp->mb[2] = (unsigned short )((unsigned int )sns_phys_address >> 16); mcp->mb[3] = (unsigned short )sns_phys_address; mcp->mb[6] = (unsigned short )((unsigned int )(sns_phys_address >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(sns_phys_address >> 32ULL); mcp->out_mb = 207U; mcp->in_mb = 3U; mcp->buf_size = (long )buf_size; mcp->flags = 3U; mcp->tov = (uint32_t )((int )(vha->hw)->login_timeout * 2 + (int )((unsigned int )(vha->hw)->login_timeout / 2U)); rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4191, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4192, "Done %s.\n", "qla2x00_send_sns"); } return (rval); } } int qla24xx_login_fabric(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa , uint16_t *mb , uint8_t opt ) { int rval ; struct logio_entry_24xx *lg ; dma_addr_t lg_dma ; uint32_t iop[2U] ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; void *tmp ; { ha = vha->hw; ql_dbg(536903680U, vha, 4193, "Entered %s.\n", "qla24xx_login_fabric"); if (*((unsigned long *)ha + 2UL) != 0UL) { req = *(ha->req_q_map); } else { req = vha->req; } rsp = req->rsp; tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & lg_dma); lg = (struct logio_entry_24xx *)tmp; if ((unsigned long )lg == (unsigned long )((struct logio_entry_24xx *)0)) { ql_log(1U, vha, 4194, "Failed to allocate login IOCB.\n"); return (259); } else { } memset((void *)lg, 0, 64UL); lg->entry_type = 82U; lg->entry_count = 1U; lg->handle = ((unsigned int )req->id << 16) | lg->handle; lg->nport_handle = loop_id; lg->control_flags = 0U; if ((int )opt & 1) { lg->control_flags = (uint16_t )((unsigned int )lg->control_flags | 16U); } else { } if (((int )opt & 2) != 0) { lg->control_flags = (uint16_t )((unsigned int )lg->control_flags | 32U); } else { } lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; lg->vp_index = (uint8_t )vha->vp_idx; rval = qla2x00_issue_iocb_timeout(vha, (void *)lg, lg_dma, 0UL, (uint32_t )(((int )((unsigned int )ha->r_a_tov / 10U) + 1) * 2)); if (rval != 0) { ql_dbg(536870912U, vha, 4195, "Failed to issue login IOCB (%x).\n", rval); } else if ((unsigned int )lg->entry_status != 0U) { ql_dbg(536870912U, vha, 4196, "Failed to complete IOCB -- error status (%x).\n", (int )lg->entry_status); rval = 258; } else if ((unsigned int )lg->comp_status != 0U) { iop[0] = lg->io_parameter[0]; iop[1] = lg->io_parameter[1]; ql_dbg(536870912U, vha, 4197, "Failed to complete IOCB -- completion status (%x) ioparam=%x/%x.\n", (int )lg->comp_status, iop[0], iop[1]); switch (iop[0]) { case 26U: *mb = 16391U; *(mb + 1UL) = (unsigned short )iop[1]; goto ldv_66057; case 27U: *mb = 16392U; goto ldv_66057; case 1U: ; case 2U: ; case 3U: ; case 4U: ; case 5U: ; case 7U: ; case 9U: ; case 10U: ; case 24U: ; case 25U: ; case 28U: ; case 29U: ; case 31U: ; default: *mb = 16389U; goto ldv_66057; } ldv_66057: ; } else { ql_dbg(536903680U, vha, 4198, "Done %s.\n", "qla24xx_login_fabric"); iop[0] = lg->io_parameter[0]; *mb = 16384U; *(mb + 1UL) = 0U; if ((iop[0] & 16U) != 0U) { if ((iop[0] & 256U) != 0U) { *(mb + 1UL) = (uint16_t )((unsigned int )*(mb + 1UL) | 2U); } else { } } else { *(mb + 1UL) = 1U; } *(mb + 10UL) = 0U; if (lg->io_parameter[7] != 0U || lg->io_parameter[8] != 0U) { *(mb + 10UL) = (uint16_t )((unsigned int )*(mb + 10UL) | 1U); } else { } if (lg->io_parameter[9] != 0U || lg->io_parameter[10] != 0U) { *(mb + 10UL) = (uint16_t )((unsigned int )*(mb + 10UL) | 2U); } else { } if ((lg->io_parameter[0] & 128U) != 0U) { *(mb + 10UL) = (uint16_t )((unsigned int )*(mb + 10UL) | 128U); } else { } } dma_pool_free(ha->s_dma_pool, (void *)lg, lg_dma); return (rval); } } int qla2x00_login_fabric(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa , uint16_t *mb , uint8_t opt ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4199, "Entered %s.\n", "qla2x00_login_fabric"); mcp->mb[0] = 111U; mcp->out_mb = 15U; if ((int )ha->device_type < 0) { mcp->mb[1] = loop_id; mcp->mb[10] = (uint16_t )opt; mcp->out_mb = mcp->out_mb | 1024U; } else { mcp->mb[1] = (uint16_t )((int )((short )((int )loop_id << 8)) | (int )((short )opt)); } mcp->mb[2] = (uint16_t )domain; mcp->mb[3] = (uint16_t )((int )((short )((int )area << 8)) | (int )((short )al_pa)); mcp->in_mb = 199U; mcp->tov = (uint32_t )((int )ha->login_timeout * 2 + (int )((unsigned int )ha->login_timeout / 2U)); mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((unsigned long )mb != (unsigned long )((uint16_t *)0U)) { *mb = mcp->mb[0]; *(mb + 1UL) = mcp->mb[1]; *(mb + 2UL) = mcp->mb[2]; *(mb + 6UL) = mcp->mb[6]; *(mb + 7UL) = mcp->mb[7]; *(mb + 10UL) = 0U; } else { } if (rval != 0) { if (((((unsigned int )mcp->mb[0] == 16385U || (unsigned int )mcp->mb[0] == 16386U) || (unsigned int )mcp->mb[0] == 16387U) || (unsigned int )mcp->mb[0] == 16389U) || (unsigned int )mcp->mb[0] == 16390U) { rval = 0; } else { } ql_dbg(536870912U, vha, 4200, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2]); } else { ql_dbg(536903680U, vha, 4201, "Done %s.\n", "qla2x00_login_fabric"); } return (rval); } } int qla2x00_login_local_device(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t *mb_ret , uint8_t opt ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; int tmp ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4202, "Entered %s.\n", "qla2x00_login_local_device"); if ((ha->device_type & 134217728U) != 0U) { tmp = qla24xx_login_fabric(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, mb_ret, (int )opt); return (tmp); } else { } mcp->mb[0] = 116U; if ((int )ha->device_type < 0) { mcp->mb[1] = fcport->loop_id; } else { mcp->mb[1] = (int )fcport->loop_id << 8U; } mcp->mb[2] = (uint16_t )opt; mcp->out_mb = 7U; mcp->in_mb = 195U; mcp->tov = (uint32_t )((int )ha->login_timeout * 2 + (int )((unsigned int )ha->login_timeout / 2U)); mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((unsigned long )mb_ret != (unsigned long )((uint16_t *)0U)) { *mb_ret = mcp->mb[0]; *(mb_ret + 1UL) = mcp->mb[1]; *(mb_ret + 6UL) = mcp->mb[6]; *(mb_ret + 7UL) = mcp->mb[7]; } else { } if (rval != 0) { if ((unsigned int )mcp->mb[0] == 16389U || (unsigned int )mcp->mb[0] == 16390U) { rval = 0; } else { } ql_dbg(536870912U, vha, 4203, "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[6], (int )mcp->mb[7]); } else { ql_dbg(536903680U, vha, 4204, "Done %s.\n", "qla2x00_login_local_device"); } return (rval); } } int qla24xx_fabric_logout(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa ) { int rval ; struct logio_entry_24xx *lg ; dma_addr_t lg_dma ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; void *tmp ; { ha = vha->hw; ql_dbg(536903680U, vha, 4205, "Entered %s.\n", "qla24xx_fabric_logout"); tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & lg_dma); lg = (struct logio_entry_24xx *)tmp; if ((unsigned long )lg == (unsigned long )((struct logio_entry_24xx *)0)) { ql_log(1U, vha, 4206, "Failed to allocate logout IOCB.\n"); return (259); } else { } memset((void *)lg, 0, 64UL); if (ql2xmaxqueues > 1) { req = *(ha->req_q_map); } else { req = vha->req; } rsp = req->rsp; lg->entry_type = 82U; lg->entry_count = 1U; lg->handle = ((unsigned int )req->id << 16) | lg->handle; lg->nport_handle = loop_id; lg->control_flags = 152U; lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; lg->vp_index = (uint8_t )vha->vp_idx; rval = qla2x00_issue_iocb_timeout(vha, (void *)lg, lg_dma, 0UL, (uint32_t )(((int )((unsigned int )ha->r_a_tov / 10U) + 1) * 2)); if (rval != 0) { ql_dbg(536870912U, vha, 4207, "Failed to issue logout IOCB (%x).\n", rval); } else if ((unsigned int )lg->entry_status != 0U) { ql_dbg(536870912U, vha, 4208, "Failed to complete IOCB -- error status (%x).\n", (int )lg->entry_status); rval = 258; } else if ((unsigned int )lg->comp_status != 0U) { ql_dbg(536870912U, vha, 4209, "Failed to complete IOCB -- completion status (%x) ioparam=%x/%x.\n", (int )lg->comp_status, lg->io_parameter[0], lg->io_parameter[1]); } else { ql_dbg(536903680U, vha, 4210, "Done %s.\n", "qla24xx_fabric_logout"); } dma_pool_free(ha->s_dma_pool, (void *)lg, lg_dma); return (rval); } } int qla2x00_fabric_logout(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4211, "Entered %s.\n", "qla2x00_fabric_logout"); mcp->mb[0] = 113U; mcp->out_mb = 3U; if ((int )(vha->hw)->device_type < 0) { mcp->mb[1] = loop_id; mcp->mb[10] = 0U; mcp->out_mb = mcp->out_mb | 1024U; } else { mcp->mb[1] = (int )loop_id << 8U; } mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4212, "Failed=%x mb[1]=%x.\n", rval, (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4213, "Done %s.\n", "qla2x00_fabric_logout"); } return (rval); } } int qla2x00_full_login_lip(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4214, "Entered %s.\n", "qla2x00_full_login_lip"); mcp->mb[0] = 114U; mcp->mb[1] = ((vha->hw)->device_type & 134217728U) != 0U ? 8U : 0U; mcp->mb[2] = 0U; mcp->mb[3] = 0U; mcp->out_mb = 15U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4215, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4216, "Done %s.\n", "qla2x00_full_login_lip"); } return (rval); } } int qla2x00_get_id_list(scsi_qla_host_t *vha , void *id_list , dma_addr_t id_list_dma , uint16_t *entries ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4217, "Entered %s.\n", "qla2x00_get_id_list"); if ((unsigned long )id_list == (unsigned long )((void *)0)) { return (258); } else { } mcp->mb[0] = 124U; mcp->out_mb = 1U; if (((vha->hw)->device_type & 134217728U) != 0U) { mcp->mb[2] = (unsigned short )((unsigned int )id_list_dma >> 16); mcp->mb[3] = (unsigned short )id_list_dma; mcp->mb[6] = (unsigned short )((unsigned int )(id_list_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(id_list_dma >> 32ULL); mcp->mb[8] = 0U; mcp->mb[9] = vha->vp_idx; mcp->out_mb = mcp->out_mb | 972U; } else { mcp->mb[1] = (unsigned short )((unsigned int )id_list_dma >> 16); mcp->mb[2] = (unsigned short )id_list_dma; mcp->mb[3] = (unsigned short )((unsigned int )(id_list_dma >> 32ULL) >> 16); mcp->mb[6] = (unsigned short )(id_list_dma >> 32ULL); mcp->out_mb = mcp->out_mb | 78U; } mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4218, "Failed=%x.\n", rval); } else { *entries = mcp->mb[1]; ql_dbg(536903680U, vha, 4219, "Done %s.\n", "qla2x00_get_id_list"); } return (rval); } } int qla2x00_get_resource_cnts(scsi_qla_host_t *vha , uint16_t *cur_xchg_cnt , uint16_t *orig_xchg_cnt , uint16_t *cur_iocb_cnt , uint16_t *orig_iocb_cnt , uint16_t *max_npiv_vports , uint16_t *max_fcfs ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4220, "Entered %s.\n", "qla2x00_get_resource_cnts"); mcp->mb[0] = 66U; mcp->out_mb = 1U; mcp->in_mb = 3279U; if ((((vha->hw)->device_type & 8192U) != 0U || (((vha->hw)->device_type & 32768U) != 0U || ((vha->hw)->device_type & 65536U) != 0U)) || (((vha->hw)->device_type & 524288U) != 0U || ((vha->hw)->device_type & 1048576U) != 0U)) { mcp->in_mb = mcp->in_mb | 4096U; } else { } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4221, "Failed mb[0]=%x.\n", (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4222, "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x mb11=%x mb12=%x.\n", "qla2x00_get_resource_cnts", (int )mcp->mb[1], (int )mcp->mb[2], (int )mcp->mb[3], (int )mcp->mb[6], (int )mcp->mb[7], (int )mcp->mb[10], (int )mcp->mb[11], (int )mcp->mb[12]); if ((unsigned long )cur_xchg_cnt != (unsigned long )((uint16_t *)0U)) { *cur_xchg_cnt = mcp->mb[3]; } else { } if ((unsigned long )orig_xchg_cnt != (unsigned long )((uint16_t *)0U)) { *orig_xchg_cnt = mcp->mb[6]; } else { } if ((unsigned long )cur_iocb_cnt != (unsigned long )((uint16_t *)0U)) { *cur_iocb_cnt = mcp->mb[7]; } else { } if ((unsigned long )orig_iocb_cnt != (unsigned long )((uint16_t *)0U)) { *orig_iocb_cnt = mcp->mb[10]; } else { } if (*((unsigned long *)vha->hw + 2UL) != 0UL && (unsigned long )max_npiv_vports != (unsigned long )((uint16_t *)0U)) { *max_npiv_vports = mcp->mb[11]; } else { } if ((((vha->hw)->device_type & 8192U) != 0U || (((vha->hw)->device_type & 32768U) != 0U || ((vha->hw)->device_type & 65536U) != 0U)) && (unsigned long )max_fcfs != (unsigned long )((uint16_t *)0U)) { *max_fcfs = mcp->mb[12]; } else { } } return (rval); } } int qla2x00_get_fcal_position_map(scsi_qla_host_t *vha , char *pos_map ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; char *pmap ; dma_addr_t pmap_dma ; struct qla_hw_data *ha ; void *tmp ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4223, "Entered %s.\n", "qla2x00_get_fcal_position_map"); tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & pmap_dma); pmap = (char *)tmp; if ((unsigned long )pmap == (unsigned long )((char *)0)) { ql_log(1U, vha, 4224, "Memory alloc failed.\n"); return (259); } else { } memset((void *)pmap, 0, 128UL); mcp->mb[0] = 99U; mcp->mb[2] = (unsigned short )((unsigned int )pmap_dma >> 16); mcp->mb[3] = (unsigned short )pmap_dma; mcp->mb[6] = (unsigned short )((unsigned int )(pmap_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(pmap_dma >> 32ULL); mcp->out_mb = 205U; mcp->in_mb = 3U; mcp->buf_size = 128L; mcp->flags = 1U; mcp->tov = (uint32_t )((int )ha->login_timeout * 2 + (int )((unsigned int )ha->login_timeout / 2U)); rval = qla2x00_mailbox_command(vha, mcp); if (rval == 0) { ql_dbg(537001984U, vha, 4225, "mb0/mb1=%x/%X FC/AL position map size (%x).\n", (int )mcp->mb[0], (int )mcp->mb[1], (unsigned int )*pmap); ql_dump_buffer(537001984U, vha, 4381, (uint8_t *)pmap, (uint32_t )((int )*pmap + 1)); if ((unsigned long )pos_map != (unsigned long )((char *)0)) { memcpy((void *)pos_map, (void const *)pmap, 128UL); } else { } } else { } dma_pool_free(ha->s_dma_pool, (void *)pmap, pmap_dma); if (rval != 0) { ql_dbg(536870912U, vha, 4226, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4227, "Done %s.\n", "qla2x00_get_fcal_position_map"); } return (rval); } } int qla2x00_get_link_status(scsi_qla_host_t *vha , uint16_t loop_id , struct link_statistics *stats , dma_addr_t stats_dma ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; uint32_t *siter ; uint32_t *diter ; uint32_t dwords ; struct qla_hw_data *ha ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t tmp___1 ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4228, "Entered %s.\n", "qla2x00_get_link_status"); mcp->mb[0] = 107U; mcp->mb[2] = (unsigned short )((unsigned int )stats_dma >> 16); mcp->mb[3] = (unsigned short )stats_dma; mcp->mb[6] = (unsigned short )((unsigned int )(stats_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(stats_dma >> 32ULL); mcp->out_mb = 205U; mcp->in_mb = 1U; if ((ha->device_type & 134217728U) != 0U) { mcp->mb[1] = loop_id; mcp->mb[4] = 0U; mcp->mb[10] = 0U; mcp->out_mb = mcp->out_mb | 1042U; mcp->in_mb = mcp->in_mb | 2U; } else if ((int )ha->device_type < 0) { mcp->mb[1] = loop_id; mcp->mb[10] = 0U; mcp->out_mb = mcp->out_mb | 1026U; } else { mcp->mb[1] = (int )loop_id << 8U; mcp->out_mb = mcp->out_mb | 2U; } mcp->tov = 30U; mcp->flags = 4U; rval = qla2x00_mailbox_command(vha, mcp); if (rval == 0) { if ((unsigned int )mcp->mb[0] != 16384U) { ql_dbg(536870912U, vha, 4229, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); rval = 258; } else { ql_dbg(536903680U, vha, 4230, "Done %s.\n", "qla2x00_get_link_status"); dwords = 7U; diter = & stats->link_fail_cnt; siter = diter; goto ldv_66179; ldv_66178: tmp = diter; diter = diter + 1; tmp___0 = siter; siter = siter + 1; *tmp = *tmp___0; ldv_66179: tmp___1 = dwords; dwords = dwords - 1U; if (tmp___1 != 0U) { goto ldv_66178; } else { } } } else { ql_dbg(536870912U, vha, 4231, "Failed=%x.\n", rval); } return (rval); } } int qla24xx_get_isp_stats(scsi_qla_host_t *vha , struct link_statistics *stats , dma_addr_t stats_dma ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; uint32_t *siter ; uint32_t *diter ; uint32_t dwords ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t tmp___1 ; { mcp = & mc; ql_dbg(536903680U, vha, 4232, "Entered %s.\n", "qla24xx_get_isp_stats"); mcp->mb[0] = 109U; mcp->mb[2] = (unsigned short )((unsigned int )stats_dma >> 16); mcp->mb[3] = (unsigned short )stats_dma; mcp->mb[6] = (unsigned short )((unsigned int )(stats_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(stats_dma >> 32ULL); mcp->mb[8] = 39U; mcp->mb[9] = vha->vp_idx; mcp->mb[10] = 0U; mcp->out_mb = 1997U; mcp->in_mb = 7U; mcp->tov = 30U; mcp->flags = 4U; rval = qla2x00_mailbox_command(vha, mcp); if (rval == 0) { if ((unsigned int )mcp->mb[0] != 16384U) { ql_dbg(536870912U, vha, 4233, "Failed mb[0]=%x.\n", (int )mcp->mb[0]); rval = 258; } else { ql_dbg(536903680U, vha, 4234, "Done %s.\n", "qla24xx_get_isp_stats"); dwords = 39U; diter = & stats->link_fail_cnt; siter = diter; goto ldv_66194; ldv_66193: tmp = diter; diter = diter + 1; tmp___0 = siter; siter = siter + 1; *tmp = *tmp___0; ldv_66194: tmp___1 = dwords; dwords = dwords - 1U; if (tmp___1 != 0U) { goto ldv_66193; } else { } } } else { ql_dbg(536870912U, vha, 4235, "Failed=%x.\n", rval); } return (rval); } } int qla24xx_abort_command(srb_t *sp ) { int rval ; unsigned long flags ; struct abort_entry_24xx *abt ; dma_addr_t abt_dma ; uint32_t handle ; fc_port_t *fcport ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct req_que *req ; int tmp ; raw_spinlock_t *tmp___0 ; void *tmp___1 ; { flags = 0UL; fcport = sp->fcport; vha = fcport->vha; ha = vha->hw; req = vha->req; ql_dbg(536903680U, vha, 4236, "Entered %s.\n", "qla24xx_abort_command"); if (ql2xasynctmfenable != 0) { tmp = qla24xx_async_abort_command(sp); return (tmp); } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); handle = 1U; goto ldv_66214; ldv_66213: ; if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )sp) { goto ldv_66212; } else { } handle = handle + 1U; ldv_66214: ; if ((uint32_t )req->num_outstanding_cmds > handle) { goto ldv_66213; } else { } ldv_66212: spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((uint32_t )req->num_outstanding_cmds == handle) { return (258); } else { } tmp___1 = dma_pool_alloc(ha->s_dma_pool, 208U, & abt_dma); abt = (struct abort_entry_24xx *)tmp___1; if ((unsigned long )abt == (unsigned long )((struct abort_entry_24xx *)0)) { ql_log(1U, vha, 4237, "Failed to allocate abort IOCB.\n"); return (259); } else { } memset((void *)abt, 0, 64UL); abt->entry_type = 51U; abt->entry_count = 1U; abt->handle = ((unsigned int )req->id << 16) | abt->handle; abt->nport_handle = fcport->loop_id; abt->handle_to_abort = ((unsigned int )req->id << 16) | handle; abt->port_id[0] = fcport->d_id.b.al_pa; abt->port_id[1] = fcport->d_id.b.area; abt->port_id[2] = fcport->d_id.b.domain; abt->vp_index = (uint8_t )(fcport->vha)->vp_idx; abt->req_que_no = req->id; rval = qla2x00_issue_iocb(vha, (void *)abt, abt_dma, 0UL); if (rval != 0) { ql_dbg(536870912U, vha, 4238, "Failed to issue IOCB (%x).\n", rval); } else if ((unsigned int )abt->entry_status != 0U) { ql_dbg(536870912U, vha, 4239, "Failed to complete IOCB -- error status (%x).\n", (int )abt->entry_status); rval = 258; } else if ((unsigned int )abt->nport_handle != 0U) { ql_dbg(536870912U, vha, 4240, "Failed to complete IOCB -- completion status (%x).\n", (int )abt->nport_handle); if ((unsigned int )abt->nport_handle == 49U) { rval = 257; } else { rval = 258; } } else { ql_dbg(536903680U, vha, 4241, "Done %s.\n", "qla24xx_abort_command"); } dma_pool_free(ha->s_dma_pool, (void *)abt, abt_dma); return (rval); } } static int __qla24xx_issue_tmf(char *name , uint32_t type , struct fc_port *fcport , uint64_t l , int tag ) { int rval ; int rval2 ; struct tsk_mgmt_cmd *tsk ; struct sts_entry_24xx *sts ; dma_addr_t tsk_dma ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; void *tmp ; { vha = fcport->vha; ha = vha->hw; req = vha->req; ql_dbg(536903680U, vha, 4242, "Entered %s.\n", "__qla24xx_issue_tmf"); if (*((unsigned long *)ha + 2UL) != 0UL) { rsp = *(ha->rsp_q_map + ((unsigned long )tag + 1UL)); } else { rsp = req->rsp; } tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & tsk_dma); tsk = (struct tsk_mgmt_cmd *)tmp; if ((unsigned long )tsk == (unsigned long )((struct tsk_mgmt_cmd *)0)) { ql_log(1U, vha, 4243, "Failed to allocate task management IOCB.\n"); return (259); } else { } memset((void *)tsk, 0, 64UL); tsk->p.tsk.entry_type = 20U; tsk->p.tsk.entry_count = 1U; tsk->p.tsk.handle = ((unsigned int )req->id << 16) | tsk->p.tsk.handle; tsk->p.tsk.nport_handle = fcport->loop_id; tsk->p.tsk.timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; tsk->p.tsk.control_flags = type; tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; tsk->p.tsk.port_id[1] = fcport->d_id.b.area; tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; tsk->p.tsk.vp_index = (uint8_t )(fcport->vha)->vp_idx; if (type == 16U) { int_to_scsilun(l, & tsk->p.tsk.lun); host_to_fcp_swap((uint8_t *)(& tsk->p.tsk.lun), 8U); } else { } sts = & tsk->p.sts; rval = qla2x00_issue_iocb(vha, (void *)tsk, tsk_dma, 0UL); if (rval != 0) { ql_dbg(536870912U, vha, 4244, "Failed to issue %s reset IOCB (%x).\n", name, rval); } else if ((unsigned int )sts->entry_status != 0U) { ql_dbg(536870912U, vha, 4245, "Failed to complete IOCB -- error status (%x).\n", (int )sts->entry_status); rval = 258; } else if ((unsigned int )sts->comp_status != 0U) { ql_dbg(536870912U, vha, 4246, "Failed to complete IOCB -- completion status (%x).\n", (int )sts->comp_status); rval = 258; } else if (((int )sts->scsi_status & 256) != 0) { if (sts->rsp_data_len <= 3U) { ql_dbg(536903680U, vha, 4247, "Ignoring inconsistent data length -- not enough response info (%d).\n", sts->rsp_data_len); } else if ((unsigned int )sts->data[3] != 0U) { ql_dbg(536870912U, vha, 4248, "Failed to complete IOCB -- response (%x).\n", (int )sts->data[3]); rval = 258; } else { } } else { } rval2 = qla2x00_marker(vha, req, rsp, (int )fcport->loop_id, l, type != 16U); if (rval2 != 0) { ql_dbg(536870912U, vha, 4249, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(536903680U, vha, 4250, "Done %s.\n", "__qla24xx_issue_tmf"); } dma_pool_free(ha->s_dma_pool, (void *)tsk, tsk_dma); return (rval); } } int qla24xx_abort_target(struct fc_port *fcport , uint64_t l , int tag ) { struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = (fcport->vha)->hw; if (ql2xasynctmfenable != 0 && (ha->device_type & 134217728U) != 0U) { tmp = qla2x00_async_tm_cmd(fcport, 2U, (uint32_t )l, (uint32_t )tag); return (tmp); } else { } tmp___0 = __qla24xx_issue_tmf((char *)"Target", 2U, fcport, l, tag); return (tmp___0); } } int qla24xx_lun_reset(struct fc_port *fcport , uint64_t l , int tag ) { struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = (fcport->vha)->hw; if (ql2xasynctmfenable != 0 && (ha->device_type & 134217728U) != 0U) { tmp = qla2x00_async_tm_cmd(fcport, 16U, (uint32_t )l, (uint32_t )tag); return (tmp); } else { } tmp___0 = __qla24xx_issue_tmf((char *)"Lun", 16U, fcport, l, tag); return (tmp___0); } } int qla2x00_system_error(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; if ((((((ha->device_type & 4U) == 0U && (ha->device_type & 8U) == 0U) && (ha->device_type & 16U) == 0U) && (ha->device_type & 32U) == 0U) && (ha->device_type & 64U) == 0U) && (ha->device_type & 134217728U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4251, "Entered %s.\n", "qla2x00_system_error"); mcp->mb[0] = 42U; mcp->out_mb = 1U; mcp->in_mb = 1U; mcp->tov = 5U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4252, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4253, "Done %s.\n", "qla2x00_system_error"); } return (rval); } } int qla2x00_write_serdes_word(scsi_qla_host_t *vha , uint16_t addr , uint16_t data ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if (((vha->hw)->device_type & 32768U) == 0U && (((vha->hw)->device_type & 524288U) == 0U && ((vha->hw)->device_type & 1048576U) == 0U)) { return (258); } else { } ql_dbg(536903680U, vha, 4482, "Entered %s.\n", "qla2x00_write_serdes_word"); mcp->mb[0] = 3U; mcp->mb[1] = addr; if (((vha->hw)->device_type & 32768U) != 0U) { mcp->mb[2] = (unsigned int )data & 255U; } else { mcp->mb[2] = data; } mcp->mb[3] = 0U; mcp->out_mb = 15U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4483, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4484, "Done %s.\n", "qla2x00_write_serdes_word"); } return (rval); } } int qla2x00_read_serdes_word(scsi_qla_host_t *vha , uint16_t addr , uint16_t *data ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if (((vha->hw)->device_type & 32768U) == 0U && (((vha->hw)->device_type & 524288U) == 0U && ((vha->hw)->device_type & 1048576U) == 0U)) { return (258); } else { } ql_dbg(536903680U, vha, 4485, "Entered %s.\n", "qla2x00_read_serdes_word"); mcp->mb[0] = 4U; mcp->mb[1] = addr; mcp->mb[3] = 0U; mcp->out_mb = 11U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (((vha->hw)->device_type & 32768U) != 0U) { *data = (unsigned int )mcp->mb[1] & 255U; } else { *data = mcp->mb[1]; } if (rval != 0) { ql_dbg(536870912U, vha, 4486, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4487, "Done %s.\n", "qla2x00_read_serdes_word"); } return (rval); } } int qla8044_write_serdes_word(scsi_qla_host_t *vha , uint32_t addr , uint32_t data ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if (((vha->hw)->device_type & 262144U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4486, "Entered %s.\n", "qla8044_write_serdes_word"); mcp->mb[0] = 336U; mcp->mb[1] = 3U; mcp->mb[3] = (unsigned short )addr; mcp->mb[4] = (unsigned short )(addr >> 16); mcp->mb[5] = (unsigned short )data; mcp->mb[6] = (unsigned short )(data >> 16); mcp->out_mb = 123U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4487, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4488, "Done %s.\n", "qla8044_write_serdes_word"); } return (rval); } } int qla8044_read_serdes_word(scsi_qla_host_t *vha , uint32_t addr , uint32_t *data ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if (((vha->hw)->device_type & 262144U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4489, "Entered %s.\n", "qla8044_read_serdes_word"); mcp->mb[0] = 336U; mcp->mb[1] = 4U; mcp->mb[3] = (unsigned short )addr; mcp->mb[4] = (unsigned short )(addr >> 16); mcp->out_mb = 27U; mcp->in_mb = 7U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); *data = (uint32_t )(((int )mcp->mb[2] << 16) | (int )mcp->mb[1]); if (rval != 0) { ql_dbg(536870912U, vha, 4490, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4491, "Done %s.\n", "qla8044_read_serdes_word"); } return (rval); } } int qla2x00_set_serdes_params(scsi_qla_host_t *vha , uint16_t sw_em_1g , uint16_t sw_em_2g , uint16_t sw_em_4g ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4254, "Entered %s.\n", "qla2x00_set_serdes_params"); mcp->mb[0] = 16U; mcp->mb[1] = 1U; mcp->mb[2] = (uint16_t )((unsigned int )sw_em_1g | 32768U); mcp->mb[3] = (uint16_t )((unsigned int )sw_em_2g | 32768U); mcp->mb[4] = (uint16_t )((unsigned int )sw_em_4g | 32768U); mcp->out_mb = 31U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4255, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4256, "Done %s.\n", "qla2x00_set_serdes_params"); } return (rval); } } int qla2x00_stop_firmware(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4257, "Entered %s.\n", "qla2x00_stop_firmware"); mcp->mb[0] = 20U; mcp->mb[1] = 0U; mcp->out_mb = 3U; mcp->in_mb = 1U; mcp->tov = 5U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4258, "Failed=%x.\n", rval); if ((unsigned int )mcp->mb[0] == 16385U) { rval = 1; } else { } } else { ql_dbg(536903680U, vha, 4259, "Done %s.\n", "qla2x00_stop_firmware"); } return (rval); } } int qla2x00_enable_eft_trace(scsi_qla_host_t *vha , dma_addr_t eft_dma , uint16_t buffers ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int tmp ; long tmp___0 ; { mcp = & mc; ql_dbg(536903680U, vha, 4260, "Entered %s.\n", "qla2x00_enable_eft_trace"); if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } tmp = pci_channel_offline((vha->hw)->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (258); } else { } mcp->mb[0] = 39U; mcp->mb[1] = 4U; mcp->mb[2] = (unsigned short )eft_dma; mcp->mb[3] = (unsigned short )((unsigned int )eft_dma >> 16); mcp->mb[4] = (unsigned short )(eft_dma >> 32ULL); mcp->mb[5] = (unsigned short )((unsigned int )(eft_dma >> 32ULL) >> 16); mcp->mb[6] = buffers; mcp->mb[7] = 0U; mcp->out_mb = 255U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4261, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4262, "Done %s.\n", "qla2x00_enable_eft_trace"); } return (rval); } } int qla2x00_disable_eft_trace(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int tmp ; long tmp___0 ; { mcp = & mc; ql_dbg(536903680U, vha, 4263, "Entered %s.\n", "qla2x00_disable_eft_trace"); if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } tmp = pci_channel_offline((vha->hw)->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (258); } else { } mcp->mb[0] = 39U; mcp->mb[1] = 5U; mcp->out_mb = 3U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4264, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4265, "Done %s.\n", "qla2x00_disable_eft_trace"); } return (rval); } } int qla2x00_enable_fce_trace(scsi_qla_host_t *vha , dma_addr_t fce_dma , uint16_t buffers , uint16_t *mb , uint32_t *dwords ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int tmp ; long tmp___0 ; { mcp = & mc; ql_dbg(536903680U, vha, 4266, "Entered %s.\n", "qla2x00_enable_fce_trace"); if (((((vha->hw)->device_type & 2048U) == 0U && ((vha->hw)->device_type & 8192U) == 0U) && (((vha->hw)->device_type & 32768U) == 0U && ((vha->hw)->device_type & 65536U) == 0U)) && (((vha->hw)->device_type & 524288U) == 0U && ((vha->hw)->device_type & 1048576U) == 0U)) { return (258); } else { } tmp = pci_channel_offline((vha->hw)->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (258); } else { } mcp->mb[0] = 39U; mcp->mb[1] = 8U; mcp->mb[2] = (unsigned short )fce_dma; mcp->mb[3] = (unsigned short )((unsigned int )fce_dma >> 16); mcp->mb[4] = (unsigned short )(fce_dma >> 32ULL); mcp->mb[5] = (unsigned short )((unsigned int )(fce_dma >> 32ULL) >> 16); mcp->mb[6] = buffers; mcp->mb[7] = 0U; mcp->mb[8] = 0U; mcp->mb[9] = 2112U; mcp->mb[10] = 2112U; mcp->out_mb = 2047U; mcp->in_mb = 127U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4267, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4268, "Done %s.\n", "qla2x00_enable_fce_trace"); if ((unsigned long )mb != (unsigned long )((uint16_t *)0U)) { memcpy((void *)mb, (void const *)(& mcp->mb), 16UL); } else { } if ((unsigned long )dwords != (unsigned long )((uint32_t *)0U)) { *dwords = (uint32_t )buffers; } else { } } return (rval); } } int qla2x00_disable_fce_trace(scsi_qla_host_t *vha , uint64_t *wr , uint64_t *rd ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int tmp ; long tmp___0 ; { mcp = & mc; ql_dbg(536903680U, vha, 4269, "Entered %s.\n", "qla2x00_disable_fce_trace"); if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } tmp = pci_channel_offline((vha->hw)->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (258); } else { } mcp->mb[0] = 39U; mcp->mb[1] = 9U; mcp->mb[2] = 1U; mcp->out_mb = 7U; mcp->in_mb = 1023U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4270, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4271, "Done %s.\n", "qla2x00_disable_fce_trace"); if ((unsigned long )wr != (unsigned long )((uint64_t *)0ULL)) { *wr = ((((unsigned long long )mcp->mb[5] << 48) | ((unsigned long long )mcp->mb[4] << 32)) | ((unsigned long long )mcp->mb[3] << 16)) | (unsigned long long )mcp->mb[2]; } else { } if ((unsigned long )rd != (unsigned long )((uint64_t *)0ULL)) { *rd = ((((unsigned long long )mcp->mb[9] << 48) | ((unsigned long long )mcp->mb[8] << 32)) | ((unsigned long long )mcp->mb[7] << 16)) | (unsigned long long )mcp->mb[6]; } else { } } return (rval); } } int qla2x00_get_idma_speed(scsi_qla_host_t *vha , uint16_t loop_id , uint16_t *port_speed , uint16_t *mb ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; uint16_t tmp ; { mcp = & mc; ql_dbg(536903680U, vha, 4272, "Entered %s.\n", "qla2x00_get_idma_speed"); if (((vha->hw)->device_type & 67108864U) == 0U) { return (258); } else { } mcp->mb[0] = 26U; mcp->mb[1] = loop_id; tmp = 0U; mcp->mb[3] = tmp; mcp->mb[2] = tmp; mcp->mb[9] = vha->vp_idx; mcp->out_mb = 527U; mcp->in_mb = 11U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((unsigned long )mb != (unsigned long )((uint16_t *)0U)) { *mb = mcp->mb[0]; *(mb + 1UL) = mcp->mb[1]; *(mb + 3UL) = mcp->mb[3]; } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4273, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4274, "Done %s.\n", "qla2x00_get_idma_speed"); if ((unsigned long )port_speed != (unsigned long )((uint16_t *)0U)) { *port_speed = mcp->mb[3]; } else { } } return (rval); } } int qla2x00_set_idma_speed(scsi_qla_host_t *vha , uint16_t loop_id , uint16_t port_speed , uint16_t *mb ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4275, "Entered %s.\n", "qla2x00_set_idma_speed"); if (((vha->hw)->device_type & 67108864U) == 0U) { return (258); } else { } mcp->mb[0] = 26U; mcp->mb[1] = loop_id; mcp->mb[2] = 1U; if (((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 16384U) != 0U) || ((vha->hw)->device_type & 65536U) != 0U) || ((vha->hw)->device_type & 262144U) != 0U) { mcp->mb[3] = (unsigned int )port_speed & 63U; } else { mcp->mb[3] = (unsigned int )port_speed & 7U; } mcp->mb[9] = vha->vp_idx; mcp->out_mb = 527U; mcp->in_mb = 11U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((unsigned long )mb != (unsigned long )((uint16_t *)0U)) { *mb = mcp->mb[0]; *(mb + 1UL) = mcp->mb[1]; *(mb + 3UL) = mcp->mb[3]; } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4276, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4277, "Done %s.\n", "qla2x00_set_idma_speed"); } return (rval); } } void qla24xx_report_id_acquisition(scsi_qla_host_t *vha , struct vp_rpt_id_entry_24xx *rptid_entry ) { uint8_t vp_idx ; uint16_t stat ; struct qla_hw_data *ha ; scsi_qla_host_t *vp ; unsigned long flags ; int found ; void *wwpn ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { stat = rptid_entry->vp_idx; ha = vha->hw; ql_dbg(536903680U, vha, 4278, "Entered %s.\n", "qla24xx_report_id_acquisition"); if ((unsigned int )rptid_entry->entry_status != 0U) { return; } else { } if ((unsigned int )rptid_entry->format == 0U) { ql_dbg(536903680U, vha, 4279, "Format 0 : Number of VPs setup %d, number of VPs acquired %d.\n", (int )((unsigned char )((int )rptid_entry->vp_count >> 8)), (int )((unsigned char )rptid_entry->vp_count)); ql_dbg(536903680U, vha, 4280, "Primary port id %02x%02x%02x.\n", (int )rptid_entry->port_id[2], (int )rptid_entry->port_id[1], (int )rptid_entry->port_id[0]); } else if ((unsigned int )rptid_entry->format == 1U) { vp_idx = (unsigned char )stat; ql_dbg(536903680U, vha, 4281, "Format 1: VP[%d] enabled - status %d - with port id %02x%02x%02x.\n", (int )vp_idx, (int )((unsigned char )((int )stat >> 8)), (int )rptid_entry->port_id[2], (int )rptid_entry->port_id[1], (int )rptid_entry->port_id[0]); if ((unsigned int )vp_idx == 0U) { wwpn = (void *)(& (ha->init_cb)->port_name); if ((unsigned int )((unsigned char )((int )stat >> 8)) == 0U) { if (((int )rptid_entry->vp_idx_map[1] & 64) != 0) { wwpn = (void *)(& rptid_entry->reserved_4) + 8U; } else { } } else { } memcpy((void *)(& vha->port_name), (void const *)wwpn, 8UL); ((struct fc_host_attrs *)(vha->host)->shost_data)->port_name = wwn_to_u64((u8 *)(& vha->port_name)); ql_dbg(536870912U, vha, 4120, "FA-WWN portname %016llx (%x)\n", ((struct fc_host_attrs *)(vha->host)->shost_data)->port_name, (int )((unsigned char )((int )stat >> 8))); } else { } vp = vha; if ((unsigned int )vp_idx == 0U) { goto reg_needed; } else { } if ((unsigned int )((unsigned char )((int )stat >> 8)) != 0U && (unsigned int )((unsigned char )((int )stat >> 8)) != 2U) { ql_dbg(536870912U, vha, 4282, "Could not acquire ID for VP[%d].\n", (int )vp_idx); return; } else { } found = 0; tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)ha->vp_list.next; vp = (scsi_qla_host_t *)__mptr; goto ldv_66388; ldv_66387: ; if ((int )((unsigned short )vp_idx) == (int )vp->vp_idx) { found = 1; goto ldv_66386; } else { } __mptr___0 = (struct list_head const *)vp->list.next; vp = (scsi_qla_host_t *)__mptr___0; ldv_66388: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_66387; } else { } ldv_66386: spin_unlock_irqrestore(& ha->vport_slock, flags); if (found == 0) { return; } else { } vp->d_id.b.domain = rptid_entry->port_id[2]; vp->d_id.b.area = rptid_entry->port_id[1]; vp->d_id.b.al_pa = rptid_entry->port_id[0]; set_bit(0L, (unsigned long volatile *)(& vp->vp_flags)); reg_needed: set_bit(9L, (unsigned long volatile *)(& vp->dpc_flags)); set_bit(12L, (unsigned long volatile *)(& vp->dpc_flags)); set_bit(14L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else { } return; } } int qla24xx_modify_vp_config(scsi_qla_host_t *vha ) { int rval ; struct vp_config_entry_24xx *vpmod ; dma_addr_t vpmod_dma ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp ; void *tmp___0 ; { ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; ql_dbg(536903680U, vha, 4283, "Entered %s.\n", "qla24xx_modify_vp_config"); tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & vpmod_dma); vpmod = (struct vp_config_entry_24xx *)tmp___0; if ((unsigned long )vpmod == (unsigned long )((struct vp_config_entry_24xx *)0)) { ql_log(1U, vha, 4284, "Failed to allocate modify VP IOCB.\n"); return (259); } else { } memset((void *)vpmod, 0, 64UL); vpmod->entry_type = 49U; vpmod->entry_count = 1U; vpmod->command = 1U; vpmod->vp_count = 1U; vpmod->vp_index1 = (uint8_t )vha->vp_idx; vpmod->options_idx1 = 56U; qlt_modify_vp_config(vha, vpmod); memcpy((void *)(& vpmod->node_name_idx1), (void const *)(& vha->node_name), 8UL); memcpy((void *)(& vpmod->port_name_idx1), (void const *)(& vha->port_name), 8UL); vpmod->entry_count = 1U; rval = qla2x00_issue_iocb(base_vha, (void *)vpmod, vpmod_dma, 0UL); if (rval != 0) { ql_dbg(536870912U, vha, 4285, "Failed to issue VP config IOCB (%x).\n", rval); } else if ((unsigned int )vpmod->comp_status != 0U) { ql_dbg(536870912U, vha, 4286, "Failed to complete IOCB -- error status (%x).\n", (int )vpmod->comp_status); rval = 258; } else if ((unsigned int )vpmod->comp_status != 0U) { ql_dbg(536870912U, vha, 4287, "Failed to complete IOCB -- completion status (%x).\n", (int )vpmod->comp_status); rval = 258; } else { ql_dbg(536903680U, vha, 4288, "Done %s.\n", "qla24xx_modify_vp_config"); fc_vport_set_state(vha->fc_vport, 4); } dma_pool_free(ha->s_dma_pool, (void *)vpmod, vpmod_dma); return (rval); } } int qla24xx_control_vp(scsi_qla_host_t *vha , int cmd ) { int rval ; int map ; int pos ; struct vp_ctrl_entry_24xx *vce ; dma_addr_t vce_dma ; struct qla_hw_data *ha ; int vp_index ; struct scsi_qla_host *base_vha ; void *tmp ; void *tmp___0 ; { ha = vha->hw; vp_index = (int )vha->vp_idx; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; ql_dbg(536903680U, vha, 4289, "Entered %s enabling index %d.\n", "qla24xx_control_vp", vp_index); if (vp_index == 0 || (int )ha->max_npiv_vports <= vp_index) { return (6); } else { } tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & vce_dma); vce = (struct vp_ctrl_entry_24xx *)tmp___0; if ((unsigned long )vce == (unsigned long )((struct vp_ctrl_entry_24xx *)0)) { ql_log(1U, vha, 4290, "Failed to allocate VP control IOCB.\n"); return (259); } else { } memset((void *)vce, 0, 64UL); vce->entry_type = 48U; vce->entry_count = 1U; vce->command = (unsigned short )cmd; vce->vp_count = 1U; map = (vp_index + -1) / 8; pos = (vp_index + -1) & 7; mutex_lock_nested(& ha->vport_lock, 0U); vce->vp_idx_map[map] = (uint8_t )((int )((signed char )vce->vp_idx_map[map]) | (int )((signed char )(1 << pos))); mutex_unlock(& ha->vport_lock); rval = qla2x00_issue_iocb(base_vha, (void *)vce, vce_dma, 0UL); if (rval != 0) { ql_dbg(536870912U, vha, 4291, "Failed to issue VP control IOCB (%x).\n", rval); } else if ((unsigned int )vce->entry_status != 0U) { ql_dbg(536870912U, vha, 4292, "Failed to complete IOCB -- error status (%x).\n", (int )vce->entry_status); rval = 258; } else if ((unsigned int )vce->comp_status != 0U) { ql_dbg(536870912U, vha, 4293, "Failed to complet IOCB -- completion status (%x).\n", (int )vce->comp_status); rval = 258; } else { ql_dbg(536903680U, vha, 4294, "Done %s.\n", "qla24xx_control_vp"); } dma_pool_free(ha->s_dma_pool, (void *)vce, vce_dma); return (rval); } } int qla2x00_send_change_request(scsi_qla_host_t *vha , uint16_t format , uint16_t vp_idx ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4295, "Entered %s.\n", "qla2x00_send_change_request"); mcp->mb[0] = 112U; mcp->mb[1] = format; mcp->mb[9] = vp_idx; mcp->out_mb = 515U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval == 0) { if ((unsigned int )mcp->mb[0] != 16384U) { rval = 2; } else { } } else { rval = 2; } return (rval); } } int qla2x00_dump_ram(scsi_qla_host_t *vha , dma_addr_t req_dma , uint32_t addr , uint32_t size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4105, "Entered %s.\n", "qla2x00_dump_ram"); if ((unsigned int )((unsigned short )(addr >> 16)) != 0U || ((vha->hw)->device_type & 134217728U) != 0U) { mcp->mb[0] = 12U; mcp->mb[8] = (unsigned short )(addr >> 16); mcp->out_mb = 257U; } else { mcp->mb[0] = 10U; mcp->out_mb = 1U; } mcp->mb[1] = (unsigned short )addr; mcp->mb[2] = (unsigned short )((unsigned int )req_dma >> 16); mcp->mb[3] = (unsigned short )req_dma; mcp->mb[6] = (unsigned short )((unsigned int )(req_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(req_dma >> 32ULL); mcp->out_mb = mcp->out_mb | 206U; if (((vha->hw)->device_type & 134217728U) != 0U) { mcp->mb[4] = (unsigned short )(size >> 16); mcp->mb[5] = (unsigned short )size; mcp->out_mb = mcp->out_mb | 48U; } else { mcp->mb[4] = (unsigned short )size; mcp->out_mb = mcp->out_mb | 16U; } mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4104, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4103, "Done %s.\n", "qla2x00_dump_ram"); } return (rval); } } int qla84xx_verify_chip(struct scsi_qla_host *vha , uint16_t *status ) { int rval ; int retry ; struct cs84xx_mgmt_cmd *mn ; dma_addr_t mn_dma ; uint16_t options ; unsigned long flags ; struct qla_hw_data *ha ; void *tmp ; raw_spinlock_t *tmp___0 ; { ha = vha->hw; ql_dbg(536903680U, vha, 4296, "Entered %s.\n", "qla84xx_verify_chip"); tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & mn_dma); mn = (struct cs84xx_mgmt_cmd *)tmp; if ((unsigned long )mn == (unsigned long )((struct cs84xx_mgmt_cmd *)0)) { return (259); } else { } options = (ha->cs84xx)->fw_update != 0U ? 2U : 0U; options = (uint16_t )((unsigned int )options | 16384U); ldv_66451: retry = 0; memset((void *)mn, 0, 64UL); mn->p.req.entry_type = 27U; mn->p.req.entry_count = 1U; mn->p.req.options = options; ql_dbg(537001984U, vha, 4380, "Dump of Verify Request.\n"); ql_dump_buffer(537001984U, vha, 4382, (uint8_t *)mn, 64U); rval = qla2x00_issue_iocb_timeout(vha, (void *)mn, mn_dma, 0UL, 120U); if (rval != 0) { ql_dbg(536870912U, vha, 4299, "Failed to issue verify IOCB (%x).\n", rval); goto verify_done; } else { } ql_dbg(537001984U, vha, 4368, "Dump of Verify Response.\n"); ql_dump_buffer(537001984U, vha, 4376, (uint8_t *)mn, 64U); *status = mn->p.rsp.comp_status; *(status + 1UL) = (unsigned int )*status == 3U ? mn->p.rsp.failure_code : 0U; ql_dbg(536903680U, vha, 4302, "cs=%x fc=%x.\n", (int )*status, (int )*(status + 1UL)); if ((unsigned int )*status != 0U) { rval = 258; if (((int )options & 1) == 0) { ql_dbg(536870912U, vha, 4303, "Firmware update failed. Retrying without update firmware.\n"); options = (uint16_t )((unsigned int )options | 1U); options = (unsigned int )options & 65533U; retry = 1; } else { } } else { ql_dbg(536903680U, vha, 4304, "Firmware updated to %x.\n", mn->p.rsp.fw_ver); tmp___0 = spinlock_check(& (ha->cs84xx)->access_lock); flags = _raw_spin_lock_irqsave(tmp___0); (ha->cs84xx)->op_fw_version = mn->p.rsp.fw_ver; spin_unlock_irqrestore(& (ha->cs84xx)->access_lock, flags); } if (retry != 0) { goto ldv_66451; } else { } verify_done: dma_pool_free(ha->s_dma_pool, (void *)mn, mn_dma); if (rval != 0) { ql_dbg(536870912U, vha, 4305, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4306, "Done %s.\n", "qla84xx_verify_chip"); } return (rval); } } int qla25xx_init_req_que(struct scsi_qla_host *vha , struct req_que *req ) { int rval ; unsigned long flags ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; uint16_t tmp ; raw_spinlock_t *tmp___0 ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4307, "Entered %s.\n", "qla25xx_init_req_que"); if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { req->options = (uint16_t )((unsigned int )req->options | 8192U); } else { } mcp->mb[0] = 31U; mcp->mb[1] = req->options; mcp->mb[2] = (unsigned short )((unsigned int )req->dma >> 16); mcp->mb[3] = (unsigned short )req->dma; mcp->mb[6] = (unsigned short )((unsigned int )(req->dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(req->dma >> 32ULL); mcp->mb[5] = req->length; if ((unsigned long )req->rsp != (unsigned long )((struct rsp_que *)0)) { mcp->mb[10] = (req->rsp)->id; } else { } mcp->mb[12] = req->qos; mcp->mb[11] = req->vp_idx; mcp->mb[13] = req->rid; if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { mcp->mb[15] = 0U; } else { } mcp->mb[4] = req->id; mcp->mb[8] = 0U; tmp = 0U; *(req->out_ptr) = tmp; mcp->mb[9] = tmp; mcp->out_mb = 32767U; mcp->in_mb = 1U; mcp->flags = 2U; mcp->tov = 60U; if (((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { mcp->in_mb = mcp->in_mb | 2U; } else { } if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { mcp->out_mb = mcp->out_mb | 32768U; mcp->in_mb = mcp->in_mb | 896U; } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); if (((int )req->options & 1) == 0) { writel(0U, (void volatile *)req->req_q_in); if (((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) && ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U)) { writel(0U, (void volatile *)req->req_q_out); } else { } } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4308, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4309, "Done %s.\n", "qla25xx_init_req_que"); } return (rval); } } int qla25xx_init_rsp_que(struct scsi_qla_host *vha , struct rsp_que *rsp ) { int rval ; unsigned long flags ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; uint16_t tmp ; raw_spinlock_t *tmp___0 ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4310, "Entered %s.\n", "qla25xx_init_rsp_que"); if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { rsp->options = (uint16_t )((unsigned int )rsp->options | 8192U); } else { } mcp->mb[0] = 31U; mcp->mb[1] = rsp->options; mcp->mb[2] = (unsigned short )((unsigned int )rsp->dma >> 16); mcp->mb[3] = (unsigned short )rsp->dma; mcp->mb[6] = (unsigned short )((unsigned int )(rsp->dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(rsp->dma >> 32ULL); mcp->mb[5] = rsp->length; mcp->mb[14] = (rsp->msix)->entry; mcp->mb[13] = rsp->rid; if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { mcp->mb[15] = 0U; } else { } mcp->mb[4] = rsp->id; tmp = 0U; *(rsp->in_ptr) = tmp; mcp->mb[8] = tmp; mcp->mb[9] = 0U; mcp->out_mb = 25599U; mcp->in_mb = 1U; mcp->flags = 2U; mcp->tov = 60U; if ((ha->device_type & 8192U) != 0U) { mcp->out_mb = mcp->out_mb | 7168U; mcp->in_mb = mcp->in_mb | 2U; } else if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { mcp->out_mb = mcp->out_mb | 39936U; mcp->in_mb = mcp->in_mb | 2U; mcp->in_mb = mcp->in_mb | 896U; } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); if (((int )rsp->options & 1) == 0) { writel(0U, (void volatile *)rsp->rsp_q_out); if ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) { writel(0U, (void volatile *)rsp->rsp_q_in); } else { } } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4311, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4312, "Done %s.\n", "qla25xx_init_rsp_que"); } return (rval); } } int qla81xx_idc_ack(scsi_qla_host_t *vha , uint16_t *mb ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4313, "Entered %s.\n", "qla81xx_idc_ack"); mcp->mb[0] = 257U; memcpy((void *)(& mcp->mb) + 1U, (void const *)mb, 14UL); mcp->out_mb = 255U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4314, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4315, "Done %s.\n", "qla81xx_idc_ack"); } return (rval); } } int qla81xx_fac_get_sector_size(scsi_qla_host_t *vha , uint32_t *sector_size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4316, "Entered %s.\n", "qla81xx_fac_get_sector_size"); if ((((vha->hw)->device_type & 8192U) == 0U && (((vha->hw)->device_type & 32768U) == 0U && ((vha->hw)->device_type & 65536U) == 0U)) && (((vha->hw)->device_type & 524288U) == 0U && ((vha->hw)->device_type & 1048576U) == 0U)) { return (258); } else { } mcp->mb[0] = 62U; mcp->mb[1] = 5U; mcp->out_mb = 3U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4317, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4318, "Done %s.\n", "qla81xx_fac_get_sector_size"); *sector_size = (uint32_t )mcp->mb[1]; } return (rval); } } int qla81xx_fac_do_write_enable(scsi_qla_host_t *vha , int enable ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if ((((vha->hw)->device_type & 8192U) == 0U && (((vha->hw)->device_type & 32768U) == 0U && ((vha->hw)->device_type & 65536U) == 0U)) && (((vha->hw)->device_type & 524288U) == 0U && ((vha->hw)->device_type & 1048576U) == 0U)) { return (258); } else { } ql_dbg(536903680U, vha, 4319, "Entered %s.\n", "qla81xx_fac_do_write_enable"); mcp->mb[0] = 62U; mcp->mb[1] = enable != 0; mcp->out_mb = 3U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4320, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4321, "Done %s.\n", "qla81xx_fac_do_write_enable"); } return (rval); } } int qla81xx_fac_erase_sector(scsi_qla_host_t *vha , uint32_t start , uint32_t finish ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if ((((vha->hw)->device_type & 8192U) == 0U && (((vha->hw)->device_type & 32768U) == 0U && ((vha->hw)->device_type & 65536U) == 0U)) && (((vha->hw)->device_type & 524288U) == 0U && ((vha->hw)->device_type & 1048576U) == 0U)) { return (258); } else { } ql_dbg(536903680U, vha, 4322, "Entered %s.\n", "qla81xx_fac_erase_sector"); mcp->mb[0] = 62U; mcp->mb[1] = 2U; mcp->mb[2] = (unsigned short )start; mcp->mb[3] = (unsigned short )(start >> 16); mcp->mb[4] = (unsigned short )finish; mcp->mb[5] = (unsigned short )(finish >> 16); mcp->out_mb = 63U; mcp->in_mb = 7U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4323, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2]); } else { ql_dbg(536903680U, vha, 4324, "Done %s.\n", "qla81xx_fac_erase_sector"); } return (rval); } } int qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { rval = 0; mcp = & mc; ql_dbg(536903680U, vha, 4325, "Entered %s.\n", "qla81xx_restart_mpi_firmware"); mcp->mb[0] = 61U; mcp->out_mb = 1U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4326, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4327, "Done %s.\n", "qla81xx_restart_mpi_firmware"); } return (rval); } } int qla82xx_set_driver_version(scsi_qla_host_t *vha , char *version ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int i ; int len ; uint16_t *str ; struct qla_hw_data *ha ; size_t tmp ; { mcp = & mc; ha = vha->hw; if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4475, "Entered %s.\n", "qla82xx_set_driver_version"); str = (uint16_t *)version; tmp = strlen((char const *)version); len = (int )tmp; mcp->mb[0] = 89U; mcp->mb[1] = 2304U; mcp->out_mb = 3U; i = 4; goto ldv_66532; ldv_66531: mcp->mb[i] = __cpu_to_le16p((__u16 const *)str); mcp->out_mb = mcp->out_mb | (uint32_t )(1 << i); i = i + 1; str = str + 1; len = len + -2; ldv_66532: ; if (i <= 15 && len != 0) { goto ldv_66531; } else { } goto ldv_66535; ldv_66534: mcp->mb[i] = 0U; mcp->out_mb = mcp->out_mb | (uint32_t )(1 << i); i = i + 1; ldv_66535: ; if (i <= 15) { goto ldv_66534; } else { } mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4476, "Failed=%x mb[0]=%x,%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4477, "Done %s.\n", "qla82xx_set_driver_version"); } return (rval); } } int qla25xx_set_driver_version(scsi_qla_host_t *vha , char *version ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int len ; uint16_t dwlen ; uint8_t *str ; dma_addr_t str_dma ; struct qla_hw_data *ha ; void *tmp ; size_t tmp___0 ; size_t tmp___1 ; { mcp = & mc; ha = vha->hw; if ((((ha->device_type & 134217728U) == 0U || ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U)) || (ha->device_type & 8192U) != 0U) || ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U)) { return (258); } else { } ql_dbg(536903680U, vha, 4478, "Entered %s.\n", "qla25xx_set_driver_version"); tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & str_dma); str = (uint8_t *)tmp; if ((unsigned long )str == (unsigned long )((uint8_t *)0U)) { ql_log(1U, vha, 4479, "Failed to allocate driver version param.\n"); return (259); } else { } memcpy((void *)str, (void const *)"\a\003\021", 4UL); dwlen = (uint16_t )*str; len = ((int )dwlen + -1) * 4; memset((void *)str + 4U, 0, (size_t )len); tmp___1 = strlen((char const *)version); if ((size_t )len > tmp___1) { tmp___0 = strlen((char const *)version); len = (int )tmp___0; } else { } memcpy((void *)str + 4U, (void const *)version, (size_t )len); mcp->mb[0] = 89U; mcp->mb[1] = (uint16_t )((unsigned int )dwlen | 2304U); mcp->mb[2] = (unsigned short )((unsigned int )str_dma >> 16); mcp->mb[3] = (unsigned short )str_dma; mcp->mb[6] = (unsigned short )((unsigned int )(str_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(str_dma >> 32ULL); mcp->out_mb = 207U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4480, "Failed=%x mb[0]=%x,%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4481, "Done %s.\n", "qla25xx_set_driver_version"); } dma_pool_free(ha->s_dma_pool, (void *)str, str_dma); return (rval); } } static int qla2x00_read_asic_temperature(scsi_qla_host_t *vha , uint16_t *temp ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4441, "Entered %s.\n", "qla2x00_read_asic_temperature"); mcp->mb[0] = 90U; mcp->mb[1] = 3072U; mcp->out_mb = 3U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); *temp = mcp->mb[1]; if (rval != 0) { ql_dbg(536870912U, vha, 4442, "Failed=%x mb[0]=%x,%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4443, "Done %s.\n", "qla2x00_read_asic_temperature"); } return (rval); } } int qla2x00_read_sfp(scsi_qla_host_t *vha , dma_addr_t sfp_dma , uint8_t *sfp , uint16_t dev , uint16_t off , uint16_t len , uint16_t opt ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4328, "Entered %s.\n", "qla2x00_read_sfp"); if ((ha->device_type & 134217728U) == 0U) { return (258); } else { } if ((unsigned int )len == 1U) { opt = (uint16_t )((unsigned int )opt | 1U); } else { } mcp->mb[0] = 49U; mcp->mb[1] = dev; mcp->mb[2] = (unsigned short )((unsigned int )sfp_dma >> 16); mcp->mb[3] = (unsigned short )sfp_dma; mcp->mb[6] = (unsigned short )((unsigned int )(sfp_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(sfp_dma >> 32ULL); mcp->mb[8] = len; mcp->mb[9] = off; mcp->mb[10] = opt; mcp->out_mb = 1999U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((int )opt & 1) { *sfp = (uint8_t )mcp->mb[1]; } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4329, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4330, "Done %s.\n", "qla2x00_read_sfp"); } return (rval); } } int qla2x00_write_sfp(scsi_qla_host_t *vha , dma_addr_t sfp_dma , uint8_t *sfp , uint16_t dev , uint16_t off , uint16_t len , uint16_t opt ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4331, "Entered %s.\n", "qla2x00_write_sfp"); if ((ha->device_type & 134217728U) == 0U) { return (258); } else { } if ((unsigned int )len == 1U) { opt = (uint16_t )((unsigned int )opt | 1U); } else { } if ((int )opt & 1) { len = (uint16_t )*sfp; } else { } mcp->mb[0] = 48U; mcp->mb[1] = dev; mcp->mb[2] = (unsigned short )((unsigned int )sfp_dma >> 16); mcp->mb[3] = (unsigned short )sfp_dma; mcp->mb[6] = (unsigned short )((unsigned int )(sfp_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(sfp_dma >> 32ULL); mcp->mb[8] = len; mcp->mb[9] = off; mcp->mb[10] = opt; mcp->out_mb = 1999U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4332, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4333, "Done %s.\n", "qla2x00_write_sfp"); } return (rval); } } int qla2x00_get_xgmac_stats(scsi_qla_host_t *vha , dma_addr_t stats_dma , uint16_t size_in_bytes , uint16_t *actual_size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4334, "Entered %s.\n", "qla2x00_get_xgmac_stats"); if (((((vha->hw)->device_type & 8192U) == 0U && ((vha->hw)->device_type & 16384U) == 0U) && ((vha->hw)->device_type & 65536U) == 0U) && ((vha->hw)->device_type & 262144U) == 0U) { return (258); } else { } mcp->mb[0] = 122U; mcp->mb[2] = (unsigned short )((unsigned int )stats_dma >> 16); mcp->mb[3] = (unsigned short )stats_dma; mcp->mb[6] = (unsigned short )((unsigned int )(stats_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(stats_dma >> 32ULL); mcp->mb[8] = (uint16_t )((int )size_in_bytes >> 2); mcp->out_mb = 461U; mcp->in_mb = 7U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4335, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2]); } else { ql_dbg(536903680U, vha, 4336, "Done %s.\n", "qla2x00_get_xgmac_stats"); *actual_size = (int )mcp->mb[2] << 2U; } return (rval); } } int qla2x00_get_dcbx_params(scsi_qla_host_t *vha , dma_addr_t tlv_dma , uint16_t size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4337, "Entered %s.\n", "qla2x00_get_dcbx_params"); if (((((vha->hw)->device_type & 8192U) == 0U && ((vha->hw)->device_type & 16384U) == 0U) && ((vha->hw)->device_type & 65536U) == 0U) && ((vha->hw)->device_type & 262144U) == 0U) { return (258); } else { } mcp->mb[0] = 81U; mcp->mb[1] = 0U; mcp->mb[2] = (unsigned short )((unsigned int )tlv_dma >> 16); mcp->mb[3] = (unsigned short )tlv_dma; mcp->mb[6] = (unsigned short )((unsigned int )(tlv_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(tlv_dma >> 32ULL); mcp->mb[8] = size; mcp->out_mb = 463U; mcp->in_mb = 7U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4338, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2]); } else { ql_dbg(536903680U, vha, 4339, "Done %s.\n", "qla2x00_get_dcbx_params"); } return (rval); } } int qla2x00_read_ram_word(scsi_qla_host_t *vha , uint32_t risc_addr , uint32_t *data ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4340, "Entered %s.\n", "qla2x00_read_ram_word"); if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } mcp->mb[0] = 15U; mcp->mb[1] = (unsigned short )risc_addr; mcp->mb[8] = (unsigned short )(risc_addr >> 16); mcp->out_mb = 259U; mcp->in_mb = 13U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4341, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4342, "Done %s.\n", "qla2x00_read_ram_word"); *data = (uint32_t )(((int )mcp->mb[3] << 16) | (int )mcp->mb[2]); } return (rval); } } int qla2x00_loopback_test(scsi_qla_host_t *vha , struct msg_echo_lb *mreq , uint16_t *mresp ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4343, "Entered %s.\n", "qla2x00_loopback_test"); memset((void *)(& mcp->mb), 0, 64UL); mcp->mb[0] = 69U; mcp->mb[1] = (uint16_t )((unsigned int )mreq->options | 64U); mcp->mb[10] = (unsigned short )mreq->transfer_size; mcp->mb[11] = (unsigned short )(mreq->transfer_size >> 16); mcp->mb[14] = (unsigned short )mreq->send_dma; mcp->mb[15] = (unsigned short )((unsigned int )mreq->send_dma >> 16); mcp->mb[20] = (unsigned short )(mreq->send_dma >> 32ULL); mcp->mb[21] = (unsigned short )((unsigned int )(mreq->send_dma >> 32ULL) >> 16); mcp->mb[16] = (unsigned short )mreq->rcv_dma; mcp->mb[17] = (unsigned short )((unsigned int )mreq->rcv_dma >> 16); mcp->mb[6] = (unsigned short )(mreq->rcv_dma >> 32ULL); mcp->mb[7] = (unsigned short )((unsigned int )(mreq->rcv_dma >> 32ULL) >> 16); mcp->mb[18] = (unsigned short )mreq->iteration_count; mcp->mb[19] = (unsigned short )(mreq->iteration_count >> 16); mcp->out_mb = 4193475U; if (((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 16384U) != 0U) || ((vha->hw)->device_type & 65536U) != 0U) || ((vha->hw)->device_type & 262144U) != 0U) { mcp->out_mb = mcp->out_mb | 4U; } else { } mcp->in_mb = 786447U; mcp->buf_size = (long )mreq->transfer_size; mcp->tov = 30U; mcp->flags = 7U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4344, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x mb[19]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2], (int )mcp->mb[3], (int )mcp->mb[18], (int )mcp->mb[19]); } else { ql_dbg(536903680U, vha, 4345, "Done %s.\n", "qla2x00_loopback_test"); } memcpy((void *)mresp, (void const *)(& mcp->mb), 64UL); return (rval); } } int qla2x00_echo_test(scsi_qla_host_t *vha , struct msg_echo_lb *mreq , uint16_t *mresp ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4346, "Entered %s.\n", "qla2x00_echo_test"); memset((void *)(& mcp->mb), 0, 64UL); mcp->mb[0] = 68U; mcp->mb[1] = (uint16_t )((unsigned int )mreq->options | 64U); if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { mcp->mb[1] = (uint16_t )((unsigned int )mcp->mb[1] | 32768U); mcp->mb[2] = vha->fcoe_fcf_idx; } else { } mcp->mb[16] = (unsigned short )mreq->rcv_dma; mcp->mb[17] = (unsigned short )((unsigned int )mreq->rcv_dma >> 16); mcp->mb[6] = (unsigned short )(mreq->rcv_dma >> 32ULL); mcp->mb[7] = (unsigned short )((unsigned int )(mreq->rcv_dma >> 32ULL) >> 16); mcp->mb[10] = (unsigned short )mreq->transfer_size; mcp->mb[14] = (unsigned short )mreq->send_dma; mcp->mb[15] = (unsigned short )((unsigned int )mreq->send_dma >> 16); mcp->mb[20] = (unsigned short )(mreq->send_dma >> 32ULL); mcp->mb[21] = (unsigned short )((unsigned int )(mreq->send_dma >> 32ULL) >> 16); mcp->out_mb = 3392707U; if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { mcp->out_mb = mcp->out_mb | 4U; } else { } mcp->in_mb = 1U; if (((((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) || ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U)) || (ha->device_type & 32768U) != 0U) { mcp->in_mb = mcp->in_mb | 2U; } else { } if (((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) || (ha->device_type & 32768U) != 0U) { mcp->in_mb = mcp->in_mb | 8U; } else { } mcp->tov = 30U; mcp->flags = 7U; mcp->buf_size = (long )mreq->transfer_size; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4347, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4348, "Done %s.\n", "qla2x00_echo_test"); } memcpy((void *)mresp, (void const *)(& mcp->mb), 64UL); return (rval); } } int qla84xx_reset_chip(scsi_qla_host_t *vha , uint16_t enable_diagnostic ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4349, "Entered %s enable_diag=%d.\n", "qla84xx_reset_chip", (int )enable_diagnostic); mcp->mb[0] = 58U; mcp->mb[1] = enable_diagnostic; mcp->out_mb = 3U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 7U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4350, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4351, "Done %s.\n", "qla84xx_reset_chip"); } return (rval); } } int qla2x00_write_ram_word(scsi_qla_host_t *vha , uint32_t risc_addr , uint32_t data ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4352, "Entered %s.\n", "qla2x00_write_ram_word"); if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } mcp->mb[0] = 13U; mcp->mb[1] = (unsigned short )risc_addr; mcp->mb[2] = (unsigned short )data; mcp->mb[3] = (unsigned short )(data >> 16); mcp->mb[8] = (unsigned short )(risc_addr >> 16); mcp->out_mb = 271U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4353, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4354, "Done %s.\n", "qla2x00_write_ram_word"); } return (rval); } } int qla81xx_write_mpi_register(scsi_qla_host_t *vha , uint16_t *mb ) { int rval ; uint32_t stat ; uint32_t timer ; uint16_t mb0 ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; int tmp ; { mb0 = 0U; ha = vha->hw; reg = & (ha->iobase)->isp24; rval = 0; ql_dbg(536903680U, vha, 4355, "Entered %s.\n", "qla81xx_write_mpi_register"); clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); writew(1, (void volatile *)(& reg->mailbox0)); writew((int )*mb, (void volatile *)(& reg->mailbox1)); writew((int )*(mb + 1UL), (void volatile *)(& reg->mailbox2)); writew((int )*(mb + 2UL), (void volatile *)(& reg->mailbox3)); writew((int )*(mb + 3UL), (void volatile *)(& reg->mailbox4)); writel(1342177280U, (void volatile *)(& reg->hccr)); timer = 6000000U; goto ldv_66663; ldv_66662: stat = readl((void const volatile *)(& reg->host_status)); if ((stat & 32768U) != 0U) { stat = stat & 255U; if (((stat == 1U || stat == 2U) || stat == 16U) || stat == 17U) { set_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); mb0 = readw((void const volatile *)(& reg->mailbox0)); writel(2684354560U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); goto ldv_66661; } else { } } else { } __const_udelay(21475UL); timer = timer - 1U; ldv_66663: ; if (timer != 0U) { goto ldv_66662; } else { } ldv_66661: tmp = test_and_clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (tmp != 0) { rval = (int )mb0 & 16383; } else { rval = 258; } if (rval != 0) { ql_dbg(536870912U, vha, 4356, "Failed=%x mb[0]=%x.\n", rval, (int )*mb); } else { ql_dbg(536903680U, vha, 4357, "Done %s.\n", "qla81xx_write_mpi_register"); } return (rval); } } int qla2x00_get_data_rate(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4358, "Entered %s.\n", "qla2x00_get_data_rate"); if ((ha->device_type & 134217728U) == 0U) { return (258); } else { } mcp->mb[0] = 93U; mcp->mb[1] = 0U; mcp->out_mb = 3U; mcp->in_mb = 7U; if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { mcp->in_mb = mcp->in_mb | 8U; } else { } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4359, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4360, "Done %s.\n", "qla2x00_get_data_rate"); if ((unsigned int )mcp->mb[1] != 7U) { ha->link_data_rate = mcp->mb[1]; } else { } } return (rval); } } int qla81xx_get_port_config(scsi_qla_host_t *vha , uint16_t *mb ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4361, "Entered %s.\n", "qla81xx_get_port_config"); if ((((ha->device_type & 8192U) == 0U && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) && (ha->device_type & 262144U) == 0U) && ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U)) { return (258); } else { } mcp->mb[0] = 291U; mcp->out_mb = 1U; mcp->in_mb = 31U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4362, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { memcpy((void *)mb, (void const *)(& mcp->mb) + 1U, 8UL); ql_dbg(536903680U, vha, 4363, "Done %s.\n", "qla81xx_get_port_config"); } return (rval); } } int qla81xx_set_port_config(scsi_qla_host_t *vha , uint16_t *mb ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4364, "Entered %s.\n", "qla81xx_set_port_config"); mcp->mb[0] = 290U; memcpy((void *)(& mcp->mb) + 1U, (void const *)mb, 8UL); mcp->out_mb = 31U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4365, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4366, "Done %s.\n", "qla81xx_set_port_config"); } return (rval); } } int qla24xx_set_fcp_prio(scsi_qla_host_t *vha , uint16_t loop_id , uint16_t priority , uint16_t *mb ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4367, "Entered %s.\n", "qla24xx_set_fcp_prio"); if (((((ha->device_type & 128U) == 0U && (ha->device_type & 256U) == 0U) && ((ha->device_type & 512U) == 0U && (ha->device_type & 1024U) == 0U)) && (ha->device_type & 4096U) == 0U) && (ha->device_type & 2048U) == 0U) { return (258); } else { } mcp->mb[0] = 26U; mcp->mb[1] = loop_id; if (*((unsigned long *)ha + 2UL) != 0UL) { mcp->mb[2] = 2U; } else { mcp->mb[2] = 4U; } mcp->mb[4] = (unsigned int )priority & 15U; mcp->mb[9] = vha->vp_idx; mcp->out_mb = 543U; mcp->in_mb = 27U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((unsigned long )mb != (unsigned long )((uint16_t *)0U)) { *mb = mcp->mb[0]; *(mb + 1UL) = mcp->mb[1]; *(mb + 3UL) = mcp->mb[3]; *(mb + 4UL) = mcp->mb[4]; } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4301, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4300, "Done %s.\n", "qla24xx_set_fcp_prio"); } return (rval); } } int qla2x00_get_thermal_temp(scsi_qla_host_t *vha , uint16_t *temp ) { int rval ; struct qla_hw_data *ha ; uint8_t byte ; int tmp ; int tmp___0 ; { rval = 258; ha = vha->hw; if (((ha->device_type & 134217728U) == 0U || ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U)) || (ha->device_type & 8192U) != 0U) { ql_dbg(536870912U, vha, 4432, "Thermal not supported by this card.\n"); return (rval); } else { } if ((ha->device_type & 2048U) != 0U) { if ((unsigned int )(ha->pdev)->subsystem_vendor == 4215U && (unsigned int )(ha->pdev)->subsystem_device == 373U) { rval = qla2x00_read_sfp(vha, 0ULL, & byte, 152, 1, 1, 8193); *temp = (uint16_t )byte; return (rval); } else { } if ((unsigned int )(ha->pdev)->subsystem_vendor == 4156U && (unsigned int )(ha->pdev)->subsystem_device == 13198U) { rval = qla2x00_read_sfp(vha, 0ULL, & byte, 152, 1, 1, 49153); *temp = (uint16_t )byte; return (rval); } else { } ql_dbg(536870912U, vha, 4297, "Thermal not supported by this card.\n"); return (rval); } else { } if ((ha->device_type & 16384U) != 0U) { tmp = qla82xx_read_temperature(vha); *temp = (uint16_t )tmp; rval = 0; return (rval); } else if ((ha->device_type & 262144U) != 0U) { tmp___0 = qla8044_read_temperature(vha); *temp = (uint16_t )tmp___0; rval = 0; return (rval); } else { } rval = qla2x00_read_asic_temperature(vha, temp); return (rval); } } int qla82xx_mbx_intr_enable(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; ql_dbg(536903680U, vha, 4119, "Entered %s.\n", "qla82xx_mbx_intr_enable"); if ((ha->device_type & 134217728U) == 0U) { return (258); } else { } memset((void *)mcp, 0, 96UL); mcp->mb[0] = 16U; mcp->mb[1] = 1U; mcp->out_mb = 3U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4118, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4110, "Done %s.\n", "qla82xx_mbx_intr_enable"); } return (rval); } } int qla82xx_mbx_intr_disable(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; ql_dbg(536903680U, vha, 4109, "Entered %s.\n", "qla82xx_mbx_intr_disable"); if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { return (258); } else { } memset((void *)mcp, 0, 96UL); mcp->mb[0] = 16U; mcp->mb[1] = 0U; mcp->out_mb = 3U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4108, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4107, "Done %s.\n", "qla82xx_mbx_intr_disable"); } return (rval); } } int qla82xx_md_get_template_size(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int rval ; { ha = vha->hw; mcp = & mc; rval = 258; ql_dbg(536903680U, vha, 4383, "Entered %s.\n", "qla82xx_md_get_template_size"); memset((void *)(& mcp->mb), 0, 64UL); mcp->mb[0] = 297U; mcp->mb[1] = 0U; mcp->mb[2] = 0U; mcp->mb[3] = 0U; mcp->out_mb = 15U; mcp->in_mb = 32767U; mcp->flags = 7U; mcp->tov = 30U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4384, "mailbox command FAILED=0x%x, subcode=%x.\n", ((int )mcp->mb[1] << 16) | (int )mcp->mb[0], ((int )mcp->mb[3] << 16) | (int )mcp->mb[2]); } else { ql_dbg(536903680U, vha, 4385, "Done %s.\n", "qla82xx_md_get_template_size"); ha->md_template_size = (uint32_t )(((int )mcp->mb[3] << 16) | (int )mcp->mb[2]); if (ha->md_template_size == 0U) { ql_dbg(536870912U, vha, 4386, "Null template size obtained.\n"); rval = 258; } else { } } return (rval); } } int qla82xx_md_get_template(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int rval ; { ha = vha->hw; mcp = & mc; rval = 258; ql_dbg(536903680U, vha, 4387, "Entered %s.\n", "qla82xx_md_get_template"); ha->md_tmplt_hdr = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )ha->md_template_size, & ha->md_tmplt_hdr_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )ha->md_tmplt_hdr == (unsigned long )((void *)0)) { ql_log(1U, vha, 4388, "Unable to allocate memory for Minidump template.\n"); return (rval); } else { } memset((void *)(& mcp->mb), 0, 64UL); mcp->mb[0] = 297U; mcp->mb[1] = 0U; mcp->mb[2] = 1U; mcp->mb[3] = 0U; mcp->mb[4] = (unsigned short )ha->md_tmplt_hdr_dma; mcp->mb[5] = (unsigned short )((unsigned int )ha->md_tmplt_hdr_dma >> 16); mcp->mb[6] = (unsigned short )(ha->md_tmplt_hdr_dma >> 32ULL); mcp->mb[7] = (unsigned short )((unsigned int )(ha->md_tmplt_hdr_dma >> 32ULL) >> 16); mcp->mb[8] = (unsigned short )ha->md_template_size; mcp->mb[9] = (unsigned short )(ha->md_template_size >> 16); mcp->flags = 7U; mcp->tov = 30U; mcp->out_mb = 4095U; mcp->in_mb = 15U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4389, "mailbox command FAILED=0x%x, subcode=%x.\n", ((int )mcp->mb[1] << 16) | (int )mcp->mb[0], ((int )mcp->mb[3] << 16) | (int )mcp->mb[2]); } else { ql_dbg(536903680U, vha, 4390, "Done %s.\n", "qla82xx_md_get_template"); } return (rval); } } int qla8044_md_get_template(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int rval ; int offset ; int size ; { ha = vha->hw; mcp = & mc; rval = 258; offset = 0; size = 36864; ql_dbg(536903680U, vha, 45343, "Entered %s.\n", "qla8044_md_get_template"); ha->md_tmplt_hdr = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )ha->md_template_size, & ha->md_tmplt_hdr_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )ha->md_tmplt_hdr == (unsigned long )((void *)0)) { ql_log(1U, vha, 45339, "Unable to allocate memory for Minidump template.\n"); return (rval); } else { } memset((void *)(& mcp->mb), 0, 64UL); goto ldv_66750; ldv_66749: mcp->mb[0] = 297U; mcp->mb[1] = 0U; mcp->mb[2] = 1U; mcp->mb[3] = 0U; mcp->mb[4] = (int )((unsigned short )ha->md_tmplt_hdr_dma) + (int )((unsigned short )offset); mcp->mb[5] = (unsigned short )(((unsigned int )ha->md_tmplt_hdr_dma + (unsigned int )offset) >> 16); mcp->mb[6] = (unsigned short )((ha->md_tmplt_hdr_dma + (dma_addr_t )offset) >> 32ULL); mcp->mb[7] = (unsigned short )((unsigned int )((ha->md_tmplt_hdr_dma + (dma_addr_t )offset) >> 32ULL) >> 16); mcp->mb[8] = (unsigned short )size; mcp->mb[9] = (unsigned short )((unsigned int )size >> 16); mcp->mb[10] = (uint16_t )offset; mcp->mb[11] = 0U; mcp->flags = 7U; mcp->tov = 30U; mcp->out_mb = 4095U; mcp->in_mb = 15U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 45340, "mailbox command FAILED=0x%x, subcode=%x.\n", ((int )mcp->mb[1] << 16) | (int )mcp->mb[0], ((int )mcp->mb[3] << 16) | (int )mcp->mb[2]); return (rval); } else { ql_dbg(536903680U, vha, 45341, "Done %s.\n", "qla8044_md_get_template"); } offset = offset + size; ldv_66750: ; if ((uint32_t )offset < ha->md_template_size) { goto ldv_66749; } else { } return (rval); } } int qla81xx_set_led_config(scsi_qla_host_t *vha , uint16_t *led_cfg ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; if ((ha->device_type & 8192U) == 0U && (ha->device_type & 65536U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4403, "Entered %s.\n", "qla81xx_set_led_config"); memset((void *)mcp, 0, 96UL); mcp->mb[0] = 293U; mcp->mb[1] = *led_cfg; mcp->mb[2] = *(led_cfg + 1UL); if ((ha->device_type & 65536U) != 0U) { mcp->mb[3] = *(led_cfg + 2UL); mcp->mb[4] = *(led_cfg + 3UL); mcp->mb[5] = *(led_cfg + 4UL); mcp->mb[6] = *(led_cfg + 5UL); } else { } mcp->out_mb = 7U; if ((ha->device_type & 65536U) != 0U) { mcp->out_mb = mcp->out_mb | 120U; } else { } mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4404, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4405, "Done %s.\n", "qla81xx_set_led_config"); } return (rval); } } int qla81xx_get_led_config(scsi_qla_host_t *vha , uint16_t *led_cfg ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; if ((ha->device_type & 8192U) == 0U && (ha->device_type & 65536U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4406, "Entered %s.\n", "qla81xx_get_led_config"); memset((void *)mcp, 0, 96UL); mcp->mb[0] = 294U; mcp->out_mb = 1U; mcp->in_mb = 7U; if ((ha->device_type & 65536U) != 0U) { mcp->in_mb = mcp->in_mb | 120U; } else { } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4407, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { *led_cfg = mcp->mb[1]; *(led_cfg + 1UL) = mcp->mb[2]; if ((ha->device_type & 65536U) != 0U) { *(led_cfg + 2UL) = mcp->mb[3]; *(led_cfg + 3UL) = mcp->mb[4]; *(led_cfg + 4UL) = mcp->mb[5]; *(led_cfg + 5UL) = mcp->mb[6]; } else { } ql_dbg(536903680U, vha, 4408, "Done %s.\n", "qla81xx_get_led_config"); } return (rval); } } int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha , int enable ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4391, "Entered %s.\n", "qla82xx_mbx_beacon_ctl"); memset((void *)mcp, 0, 96UL); mcp->mb[0] = 293U; if (enable != 0) { mcp->mb[7] = 14U; } else { mcp->mb[7] = 13U; } mcp->out_mb = 129U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4392, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4393, "Done %s.\n", "qla82xx_mbx_beacon_ctl"); } return (rval); } } int qla83xx_wr_reg(scsi_qla_host_t *vha , uint32_t reg , uint32_t data ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; if (((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) && ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U)) { return (258); } else { } ql_dbg(536903680U, vha, 4400, "Entered %s.\n", "qla83xx_wr_reg"); mcp->mb[0] = 1U; mcp->mb[1] = (unsigned short )reg; mcp->mb[2] = (unsigned short )(reg >> 16); mcp->mb[3] = (unsigned short )data; mcp->mb[4] = (unsigned short )(data >> 16); mcp->out_mb = 31U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4401, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4402, "Done %s.\n", "qla83xx_wr_reg"); } return (rval); } } int qla2x00_port_logout(scsi_qla_host_t *vha , struct fc_port *fcport ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { ql_dbg(536903680U, vha, 4411, "Implicit LOGO Unsupported.\n"); return (258); } else { } ql_dbg(536903680U, vha, 4412, "Entering %s.\n", "qla2x00_port_logout"); mcp->mb[0] = 86U; mcp->mb[1] = fcport->loop_id; mcp->mb[10] = 32768U; mcp->out_mb = 1027U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4413, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4414, "Done %s.\n", "qla2x00_port_logout"); } return (rval); } } int qla83xx_rd_reg(scsi_qla_host_t *vha , uint32_t reg , uint32_t *data ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; unsigned long retry_max_time ; { mcp = & mc; ha = vha->hw; retry_max_time = (unsigned long )jiffies + 500UL; if (((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) && ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U)) { return (258); } else { } ql_dbg(536870912U, vha, 4427, "Entered %s.\n", "qla83xx_rd_reg"); retry_rd_reg: mcp->mb[0] = 9U; mcp->mb[1] = (unsigned short )reg; mcp->mb[2] = (unsigned short )(reg >> 16); mcp->out_mb = 7U; mcp->in_mb = 27U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4428, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { *data = (uint32_t )((int )mcp->mb[3] | ((int )mcp->mb[4] << 16)); if (*data == 3134241488U) { if ((long )(retry_max_time - (unsigned long )jiffies) < 0L) { ql_dbg(536870912U, vha, 4417, "Failure to read CAMRAM register. data=0x%x.\n", *data); return (258); } else { } msleep(100U); goto retry_rd_reg; } else { } ql_dbg(536870912U, vha, 4418, "Done %s.\n", "qla83xx_rd_reg"); } return (rval); } } int qla83xx_restart_nic_firmware(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; if ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) { return (258); } else { } ql_dbg(536870912U, vha, 4419, "Entered %s.\n", "qla83xx_restart_nic_firmware"); mcp->mb[0] = 61U; mcp->out_mb = 1U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4420, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); (*((ha->isp_ops)->fw_dump))(vha, 0); } else { ql_dbg(536870912U, vha, 4421, "Done %s.\n", "qla83xx_restart_nic_firmware"); } return (rval); } } int qla83xx_access_control(scsi_qla_host_t *vha , uint16_t options , uint32_t start_addr , uint32_t end_addr , uint16_t *sector_size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; uint8_t subcode ; struct qla_hw_data *ha ; { mcp = & mc; subcode = (unsigned char )options; ha = vha->hw; if ((ha->device_type & 65536U) == 0U) { return (258); } else { } ql_dbg(536870912U, vha, 4422, "Entered %s.\n", "qla83xx_access_control"); mcp->mb[0] = 62U; mcp->mb[1] = options; mcp->out_mb = 3U; if (((int )subcode & 4) != 0) { mcp->mb[2] = (unsigned short )start_addr; mcp->mb[3] = (unsigned short )(start_addr >> 16); mcp->mb[4] = (unsigned short )end_addr; mcp->mb[5] = (unsigned short )(end_addr >> 16); mcp->out_mb = mcp->out_mb | 60U; } else { } mcp->in_mb = 7U; if (((int )subcode & 36) == 0) { mcp->in_mb = mcp->in_mb | 24U; } else { } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4423, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2], (int )mcp->mb[3], (int )mcp->mb[4]); (*((ha->isp_ops)->fw_dump))(vha, 0); } else { if (((int )subcode & 32) != 0) { *sector_size = mcp->mb[1]; } else if (((int )subcode & 192) != 0) { ql_dbg(536870912U, vha, 4424, "Driver-lock id=%x%x", (int )mcp->mb[4], (int )mcp->mb[3]); } else if (((int )subcode & 24) != 0) { ql_dbg(536870912U, vha, 4425, "Flash-lock id=%x%x", (int )mcp->mb[4], (int )mcp->mb[3]); } else { } ql_dbg(536870912U, vha, 4426, "Done %s.\n", "qla83xx_access_control"); } return (rval); } } int qla2x00_dump_mctp_data(scsi_qla_host_t *vha , dma_addr_t req_dma , uint32_t addr , uint32_t size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if (((vha->hw)->device_type & 32768U) == 0U || ((int )(vha->hw)->fw_attributes_ext[0] & 1) == 0) { return (258); } else { } ql_dbg(536903680U, vha, 4431, "Entered %s.\n", "qla2x00_dump_mctp_data"); mcp->mb[0] = 12U; mcp->mb[1] = (unsigned short )addr; mcp->mb[2] = (unsigned short )((unsigned int )req_dma >> 16); mcp->mb[3] = (unsigned short )req_dma; mcp->mb[4] = (unsigned short )(size >> 16); mcp->mb[5] = (unsigned short )size; mcp->mb[6] = (unsigned short )((unsigned int )(req_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(req_dma >> 32ULL); mcp->mb[8] = (unsigned short )(addr >> 16); mcp->mb[10] = (uint16_t )((unsigned int )mcp->mb[10] | 128U); mcp->mb[10] = (uint16_t )((unsigned int )mcp->mb[10] | 64U); mcp->out_mb = mcp->out_mb | 1535U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4430, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4429, "Done %s.\n", "qla2x00_dump_mctp_data"); } return (rval); } } void disable_suitable_timer_16(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_16) { ldv_timer_state_16 = 0; return; } else { } return; } } void activate_pending_timer_16(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_16 == (unsigned long )timer) { if (ldv_timer_state_16 == 2 || pending_flag != 0) { ldv_timer_list_16 = timer; ldv_timer_list_16->data = data; ldv_timer_state_16 = 1; } else { } return; } else { } reg_timer_16(timer); ldv_timer_list_16->data = data; return; } } int reg_timer_16(struct timer_list *timer ) { { ldv_timer_list_16 = timer; ldv_timer_state_16 = 1; return (0); } } void choose_timer_16(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_16 = 2; return; } } bool ldv_queue_work_on_79(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_80(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_81(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_82(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_83(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_84(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_95(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_97(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_96(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_99(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_98(struct workqueue_struct *ldv_func_arg1 ) ; __inline static unsigned short __readw(void const volatile *addr ) { unsigned short ret ; { __asm__ volatile ("movw %1,%0": "=r" (ret): "m" (*((unsigned short volatile *)addr))); return (ret); } } __inline static unsigned int __readl(void const volatile *addr ) { unsigned int ret ; { __asm__ volatile ("movl %1,%0": "=r" (ret): "m" (*((unsigned int volatile *)addr))); return (ret); } } __inline static void writeb(unsigned char val , void volatile *addr ) { { __asm__ volatile ("movb %0,%1": : "q" (val), "m" (*((unsigned char volatile *)addr)): "memory"); return; } } void disable_suitable_timer_17(struct timer_list *timer ) ; void choose_timer_17(struct timer_list *timer ) ; int reg_timer_17(struct timer_list *timer ) ; void activate_pending_timer_17(struct timer_list *timer , unsigned long data , int pending_flag ) ; __inline static void *lowmem_page_address(struct page const *page ) { { return ((void *)((unsigned long )((unsigned long long )(((long )page + 24189255811072L) / 64L) << 12) + 0xffff880000000000UL)); } } __inline static struct page *sg_page(struct scatterlist *sg ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (123), "i" (12UL)); ldv_26177: ; goto ldv_26177; } else { } tmp___0 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (124), "i" (12UL)); ldv_26178: ; goto ldv_26178; } else { } return ((struct page *)(sg->page_link & 0xfffffffffffffffcUL)); } } __inline static void *sg_virt(struct scatterlist *sg ) { struct page *tmp ; void *tmp___0 ; { tmp = sg_page(sg); tmp___0 = lowmem_page_address((struct page const *)tmp); return (tmp___0 + (unsigned long )sg->offset); } } extern struct scatterlist *sg_next(struct scatterlist * ) ; __inline static void kmemcheck_mark_initialized(void *address , unsigned int n ) { { return; } } extern void debug_dma_map_sg(struct device * , struct scatterlist * , int , int , int ) ; __inline static int dma_map_sg_attrs(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int i ; int ents ; struct scatterlist *s ; void *tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; { tmp = get_dma_ops(dev); ops = tmp; i = 0; s = sg; goto ldv_26969; ldv_26968: tmp___0 = sg_virt(s); kmemcheck_mark_initialized(tmp___0, s->length); i = i + 1; s = sg_next(s); ldv_26969: ; if (i < nents) { goto ldv_26968; } else { } tmp___1 = valid_dma_direction((int )dir); tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (56), "i" (12UL)); ldv_26971: ; goto ldv_26971; } else { } ents = (*(ops->map_sg))(dev, sg, nents, dir, attrs); tmp___3 = ldv__builtin_expect(ents < 0, 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (58), "i" (12UL)); ldv_26972: ; goto ldv_26972; } else { } debug_dma_map_sg(dev, sg, nents, ents, (int )dir); return (ents); } } __inline static sector_t blk_rq_pos(struct request const *rq ) { { return ((sector_t )rq->__sector); } } int ldv_scsi_add_host_with_dma_100(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; __inline static unsigned char scsi_host_get_guard(struct Scsi_Host *shost ) { { return (shost->prot_guard_type); } } __inline static unsigned int scsi_sg_count(struct scsi_cmnd *cmd ) { { return (cmd->sdb.table.nents); } } __inline static struct scatterlist *scsi_sglist(struct scsi_cmnd *cmd ) { { return (cmd->sdb.table.sgl); } } __inline static unsigned int scsi_bufflen(struct scsi_cmnd *cmd ) { { return (cmd->sdb.length); } } __inline static unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd ) { { return (scmd->prot_type); } } __inline static sector_t scsi_get_lba(struct scsi_cmnd *scmd ) { sector_t tmp ; { tmp = blk_rq_pos((struct request const *)scmd->request); return (tmp); } } void *qla2x00_alloc_iocbs(struct scsi_qla_host *vha , srb_t *sp ) ; void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *vha , srb_t *sp ) ; int qla2x00_start_bidir(srb_t *sp , struct scsi_qla_host *vha , uint32_t tot_dsds ) ; int qla2x00_issue_marker(scsi_qla_host_t *vha , int ha_locked ) ; int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha , srb_t *sp , uint32_t *dsd , uint16_t tot_dsds , struct qla_tgt_cmd *tc ) ; int qla24xx_walk_and_build_sglist(struct qla_hw_data *ha , srb_t *sp , uint32_t *dsd , uint16_t tot_dsds , struct qla_tgt_cmd *tc ) ; int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha , srb_t *sp , uint32_t *dsd , uint16_t tot_dsds , struct qla_tgt_cmd *tc ) ; void qla2x00_process_response_queue(struct rsp_que *rsp ) ; void qla24xx_process_response_queue(struct scsi_qla_host *vha , struct rsp_que *rsp ) ; void qlafx00_tm_iocb(srb_t *sp , struct tsk_mgmt_entry_fx00 *ptm_iocb ) ; void qlafx00_abort_iocb(srb_t *sp , struct abort_iocb_entry_fx00 *pabt_iocb ) ; void qlafx00_fxdisc_iocb(srb_t *sp , struct fxdisc_entry_fx00 *pfxiocb ) ; void qla2x00_start_iocbs(struct scsi_qla_host *vha , struct req_que *req ) ; void qla82xx_start_iocbs(scsi_qla_host_t *vha ) ; __inline static uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *vha , uint16_t dsds ) { uint16_t iocbs ; { iocbs = 1U; if ((unsigned int )dsds > 1U) { iocbs = (int )((uint16_t )(((int )dsds + -1) / 5)) + (int )iocbs; if (((int )dsds + -1) % 5 != 0) { iocbs = (uint16_t )((int )iocbs + 1); } else { } } else { } return (iocbs); } } __inline static uint16_t qla2x00_debounce_register___0(uint16_t volatile *addr ) { uint16_t volatile first ; uint16_t volatile second ; unsigned short tmp ; unsigned short tmp___0 ; { ldv_65629: tmp = readw((void const volatile *)addr); first = tmp; __asm__ volatile ("": : : "memory"); cpu_relax(); tmp___0 = readw((void const volatile *)addr); second = tmp___0; if ((int )((unsigned short )first) != (int )((unsigned short )second)) { goto ldv_65629; } else { } return ((uint16_t )first); } } __inline static int qla2x00_hba_err_chk_enabled(srb_t *sp ) { unsigned char tmp ; { tmp = scsi_get_prot_op(sp->u.scmd.cmd); switch ((int )tmp) { case 3: ; case 4: ; if (ql2xenablehba_err_chk > 0) { return (1); } else { } goto ldv_65714; case 5: ; case 6: ; if (ql2xenablehba_err_chk > 1) { return (1); } else { } goto ldv_65714; case 1: ; case 2: ; return (1); } ldv_65714: ; return (0); } } static void qla25xx_set_que(srb_t *sp , struct rsp_que **rsp ) ; __inline static uint16_t qla2x00_get_cmd_direction(srb_t *sp ) { uint16_t cflags ; struct scsi_cmnd *cmd ; struct scsi_qla_host *vha ; unsigned int tmp ; unsigned int tmp___0 ; { cmd = sp->u.scmd.cmd; vha = (sp->fcport)->vha; cflags = 0U; if ((unsigned int )cmd->sc_data_direction == 1U) { cflags = 64U; tmp = scsi_bufflen(cmd); vha->qla_stats.output_bytes = vha->qla_stats.output_bytes + (uint64_t )tmp; vha->qla_stats.output_requests = vha->qla_stats.output_requests + 1ULL; } else if ((unsigned int )cmd->sc_data_direction == 2U) { cflags = 32U; tmp___0 = scsi_bufflen(cmd); vha->qla_stats.input_bytes = vha->qla_stats.input_bytes + (uint64_t )tmp___0; vha->qla_stats.input_requests = vha->qla_stats.input_requests + 1ULL; } else { } return (cflags); } } uint16_t qla2x00_calc_iocbs_32(uint16_t dsds ) { uint16_t iocbs ; { iocbs = 1U; if ((unsigned int )dsds > 3U) { iocbs = (int )((uint16_t )(((int )dsds + -3) / 7)) + (int )iocbs; if (((int )dsds + -3) % 7 != 0) { iocbs = (uint16_t )((int )iocbs + 1); } else { } } else { } return (iocbs); } } uint16_t qla2x00_calc_iocbs_64(uint16_t dsds ) { uint16_t iocbs ; { iocbs = 1U; if ((unsigned int )dsds > 2U) { iocbs = (int )((uint16_t )(((int )dsds + -2) / 5)) + (int )iocbs; if (((int )dsds + -2) % 5 != 0) { iocbs = (uint16_t )((int )iocbs + 1); } else { } } else { } return (iocbs); } } __inline static cont_entry_t *qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha ) { cont_entry_t *cont_pkt ; struct req_que *req ; { req = vha->req; req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } cont_pkt = (cont_entry_t *)req->ring_ptr; *((uint32_t *)(& cont_pkt->entry_type)) = 2U; return (cont_pkt); } } __inline static cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha , struct req_que *req ) { cont_a64_entry_t *cont_pkt ; { req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } cont_pkt = (cont_a64_entry_t *)req->ring_ptr; *((uint32_t *)(& cont_pkt->entry_type)) = ((vha->hw)->device_type & 131072U) != 0U ? 3U : 10U; return (cont_pkt); } } __inline static int qla24xx_configure_prot_mode(srb_t *sp , uint16_t *fw_prot_opts ) { struct scsi_cmnd *cmd ; uint8_t guard ; unsigned char tmp ; unsigned char tmp___0 ; unsigned int tmp___1 ; { cmd = sp->u.scmd.cmd; tmp = scsi_host_get_guard((cmd->device)->host); guard = tmp; *fw_prot_opts = 0U; tmp___0 = scsi_get_prot_op(cmd); switch ((int )tmp___0) { case 3: *fw_prot_opts = (uint16_t )((unsigned int )*fw_prot_opts | 1U); goto ldv_65805; case 4: *fw_prot_opts = *fw_prot_opts; goto ldv_65805; case 1: *fw_prot_opts = *fw_prot_opts; goto ldv_65805; case 2: *fw_prot_opts = (uint16_t )((unsigned int )*fw_prot_opts | 1U); goto ldv_65805; case 5: ; case 6: ; if (((int )guard & 2) != 0) { *fw_prot_opts = (uint16_t )((unsigned int )*fw_prot_opts | 6U); } else { *fw_prot_opts = (uint16_t )((unsigned int )*fw_prot_opts | 2U); } goto ldv_65805; default: *fw_prot_opts = (uint16_t )((unsigned int )*fw_prot_opts | 2U); goto ldv_65805; } ldv_65805: tmp___1 = scsi_prot_sg_count(cmd); return ((int )tmp___1); } } void qla2x00_build_scsi_iocbs_32(srb_t *sp , cmd_entry_t *cmd_pkt , uint16_t tot_dsds ) { uint16_t avail_dsds ; uint32_t *cur_dsd ; scsi_qla_host_t *vha ; struct scsi_cmnd *cmd ; struct scatterlist *sg ; int i ; unsigned int tmp ; uint16_t tmp___0 ; cont_entry_t *cont_pkt ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; { cmd = sp->u.scmd.cmd; *((uint32_t *)(& cmd_pkt->entry_type)) = 17U; tmp = scsi_bufflen(cmd); if (tmp == 0U || (unsigned int )cmd->sc_data_direction == 3U) { cmd_pkt->byte_count = 0U; return; } else { } vha = (sp->fcport)->vha; tmp___0 = qla2x00_get_cmd_direction(sp); cmd_pkt->control_flags = (uint16_t )((int )cmd_pkt->control_flags | (int )tmp___0); avail_dsds = 3U; cur_dsd = & cmd_pkt->dseg_0_address; i = 0; sg = scsi_sglist(cmd); goto ldv_65825; ldv_65824: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt = qla2x00_prep_cont_type0_iocb(vha); cur_dsd = & cont_pkt->dseg_0_address; avail_dsds = 7U; } else { } tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = (unsigned int )sg->dma_address; tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = sg->dma_length; avail_dsds = (uint16_t )((int )avail_dsds - 1); i = i + 1; sg = sg_next(sg); ldv_65825: ; if ((int )tot_dsds > i) { goto ldv_65824; } else { } return; } } void qla2x00_build_scsi_iocbs_64(srb_t *sp , cmd_entry_t *cmd_pkt , uint16_t tot_dsds ) { uint16_t avail_dsds ; uint32_t *cur_dsd ; scsi_qla_host_t *vha ; struct scsi_cmnd *cmd ; struct scatterlist *sg ; int i ; unsigned int tmp ; uint16_t tmp___0 ; dma_addr_t sle_dma ; cont_a64_entry_t *cont_pkt ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; uint32_t *tmp___3 ; { cmd = sp->u.scmd.cmd; *((uint32_t *)(& cmd_pkt->entry_type)) = 25U; tmp = scsi_bufflen(cmd); if (tmp == 0U || (unsigned int )cmd->sc_data_direction == 3U) { cmd_pkt->byte_count = 0U; return; } else { } vha = (sp->fcport)->vha; tmp___0 = qla2x00_get_cmd_direction(sp); cmd_pkt->control_flags = (uint16_t )((int )cmd_pkt->control_flags | (int )tmp___0); avail_dsds = 2U; cur_dsd = & cmd_pkt->dseg_0_address; i = 0; sg = scsi_sglist(cmd); goto ldv_65841; ldv_65840: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); cur_dsd = (uint32_t *)(& cont_pkt->dseg_0_address); avail_dsds = 5U; } else { } sle_dma = sg->dma_address; tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = (unsigned int )sle_dma; tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = (unsigned int )(sle_dma >> 32ULL); tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = sg->dma_length; avail_dsds = (uint16_t )((int )avail_dsds - 1); i = i + 1; sg = sg_next(sg); ldv_65841: ; if ((int )tot_dsds > i) { goto ldv_65840; } else { } return; } } int qla2x00_start_scsi(srb_t *sp ) { int ret ; int nseg ; unsigned long flags ; scsi_qla_host_t *vha ; struct scsi_cmnd *cmd ; uint32_t *clr_ptr ; uint32_t index ; uint32_t handle ; cmd_entry_t *cmd_pkt ; uint16_t cnt ; uint16_t req_cnt ; uint16_t tot_dsds ; struct device_reg_2xxx *reg ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; int tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; struct scatterlist *tmp___2 ; long tmp___3 ; unsigned int tmp___4 ; { ret = 0; vha = (sp->fcport)->vha; ha = vha->hw; reg = & (ha->iobase)->isp; cmd = sp->u.scmd.cmd; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); tot_dsds = 0U; if ((unsigned int )vha->marker_needed != 0U) { tmp = qla2x00_marker(vha, req, rsp, 0, 0ULL, 2); if (tmp != 0) { return (258); } else { } vha->marker_needed = 0U; } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); handle = req->current_outstanding_cmd; index = 1U; goto ldv_65867; ldv_65866: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_65865; } else { } index = index + 1U; ldv_65867: ; if ((uint32_t )req->num_outstanding_cmds > index) { goto ldv_65866; } else { } ldv_65865: ; if ((uint32_t )req->num_outstanding_cmds == index) { goto queuing_error; } else { } tmp___4 = scsi_sg_count(cmd); if (tmp___4 != 0U) { tmp___1 = scsi_sg_count(cmd); tmp___2 = scsi_sglist(cmd); nseg = dma_map_sg_attrs(& (ha->pdev)->dev, tmp___2, (int )tmp___1, cmd->sc_data_direction, (struct dma_attrs *)0); tmp___3 = ldv__builtin_expect(nseg == 0, 0L); if (tmp___3 != 0L) { goto queuing_error; } else { } } else { nseg = 0; } tot_dsds = (uint16_t )nseg; req_cnt = (*((ha->isp_ops)->calc_req_entries))((int )tot_dsds); if ((int )req->cnt < (int )req_cnt + 2) { cnt = __readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox4 : & reg->u.isp2300.req_q_out)); if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } if ((int )req->cnt < (int )req_cnt + 2) { goto queuing_error; } else { } } else { } req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)((unsigned long )handle); req->cnt = (int )req->cnt - (int )req_cnt; cmd_pkt = (cmd_entry_t *)req->ring_ptr; cmd_pkt->handle = handle; clr_ptr = (uint32_t *)cmd_pkt + 2UL; memset((void *)clr_ptr, 0, 56UL); cmd_pkt->dseg_count = tot_dsds; if ((int )ha->device_type < 0) { cmd_pkt->target.extended = (sp->fcport)->loop_id; } else { cmd_pkt->target.id.standard = (unsigned char )(sp->fcport)->loop_id; } cmd_pkt->lun = (unsigned short )(cmd->device)->lun; cmd_pkt->control_flags = 8U; memcpy((void *)(& cmd_pkt->scsi_cdb), (void const *)cmd->cmnd, (size_t )cmd->cmd_len); cmd_pkt->byte_count = scsi_bufflen(cmd); (*((ha->isp_ops)->build_iocbs))(sp, cmd_pkt, (int )tot_dsds); cmd_pkt->entry_count = (unsigned char )req_cnt; __asm__ volatile ("sfence": : : "memory"); req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } sp->flags = (uint16_t )((unsigned int )sp->flags | 1U); writew((int )req->ring_index, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox4 : & reg->u.isp2300.req_q_in)); __readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox4 : & reg->u.isp2300.req_q_in)); if (*((unsigned long *)vha + 19UL) != 0UL && (rsp->ring_ptr)->signature != 3735936685U) { qla2x00_process_response_queue(rsp); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); queuing_error: ; if ((unsigned int )tot_dsds != 0U) { scsi_dma_unmap(cmd); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (258); } } void qla2x00_start_iocbs(struct scsi_qla_host *vha , struct req_que *req ) { struct qla_hw_data *ha ; device_reg_t *reg ; { ha = vha->hw; reg = ((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) ? ha->mqiobase + (unsigned long )((int )req->id * 4096) : ha->iobase; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { qla82xx_start_iocbs(vha); } else { req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } if (((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { writel((unsigned int )req->ring_index, (void volatile *)req->req_q_in); __readl((void const volatile *)(& (ha->iobase)->isp24.hccr)); } else if ((ha->device_type & 131072U) != 0U) { writel((unsigned int )req->ring_index, (void volatile *)(& reg->ispfx00.req_q_in)); __readl((void const volatile *)(& reg->ispfx00.req_q_in)); writel(ha->rqstq_intr_code, (void volatile *)ha->cregbase + 133636U); } else if ((ha->device_type & 134217728U) != 0U) { writel((unsigned int )req->ring_index, (void volatile *)(& reg->isp24.req_q_in)); __readl((void const volatile *)(& reg->isp24.req_q_in)); } else { writew((int )req->ring_index, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->isp.u.isp2100.mailbox4 : & reg->isp.u.isp2300.req_q_in)); __readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->isp.u.isp2100.mailbox4 : & reg->isp.u.isp2300.req_q_in)); } } return; } } static int __qla2x00_marker(struct scsi_qla_host *vha , struct req_que *req , struct rsp_que *rsp , uint16_t loop_id , uint64_t lun , uint8_t type ) { mrk_entry_t *mrk ; struct mrk_entry_24xx *mrk24 ; struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; void *tmp___0 ; { mrk24 = (struct mrk_entry_24xx *)0; ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; req = *(ha->req_q_map); tmp___0 = qla2x00_alloc_iocbs(vha, (srb_t *)0); mrk = (mrk_entry_t *)tmp___0; if ((unsigned long )mrk == (unsigned long )((mrk_entry_t *)0)) { ql_log(1U, base_vha, 12326, "Failed to allocate Marker IOCB.\n"); return (258); } else { } mrk->entry_type = 4U; mrk->modifier = type; if ((unsigned int )type != 2U) { if ((ha->device_type & 134217728U) != 0U) { mrk24 = (struct mrk_entry_24xx *)mrk; mrk24->nport_handle = loop_id; int_to_scsilun(lun, (struct scsi_lun *)(& mrk24->lun)); host_to_fcp_swap((uint8_t *)(& mrk24->lun), 8U); mrk24->vp_index = (uint8_t )vha->vp_idx; mrk24->handle = ((unsigned int )req->id << 16) | mrk24->handle; } else { if ((int )ha->device_type < 0) { mrk->target.extended = loop_id; } else { mrk->target.id.standard = (unsigned char )loop_id; } mrk->lun = (unsigned short )lun; } } else { } __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs(vha, req); return (0); } } int qla2x00_marker(struct scsi_qla_host *vha , struct req_que *req , struct rsp_que *rsp , uint16_t loop_id , uint64_t lun , uint8_t type ) { int ret ; unsigned long flags ; raw_spinlock_t *tmp ; { flags = 0UL; tmp = spinlock_check(& (vha->hw)->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ret = __qla2x00_marker(vha, req, rsp, (int )loop_id, lun, (int )type); spin_unlock_irqrestore(& (vha->hw)->hardware_lock, flags); return (ret); } } int qla2x00_issue_marker(scsi_qla_host_t *vha , int ha_locked ) { int tmp ; int tmp___0 ; { if (ha_locked != 0) { tmp = __qla2x00_marker(vha, vha->req, (vha->req)->rsp, 0, 0ULL, 2); if (tmp != 0) { return (258); } else { } } else { tmp___0 = qla2x00_marker(vha, vha->req, (vha->req)->rsp, 0, 0ULL, 2); if (tmp___0 != 0) { return (258); } else { } } vha->marker_needed = 0U; return (0); } } __inline static int qla24xx_build_scsi_type_6_iocbs(srb_t *sp , struct cmd_type_6 *cmd_pkt , uint16_t tot_dsds ) { uint32_t *cur_dsd ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct scsi_cmnd *cmd ; struct scatterlist *cur_seg ; uint32_t *dsd_seg ; void *next_dsd ; uint8_t avail_dsds ; uint8_t first_iocb ; uint32_t dsd_list_len ; struct dsd_dma *dsd_ptr ; struct ct6_dsd *ctx ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; struct list_head const *__mptr ; uint32_t *tmp___2 ; uint32_t *tmp___3 ; uint32_t *tmp___4 ; uint32_t *tmp___5 ; uint32_t *tmp___6 ; dma_addr_t sle_dma ; uint32_t *tmp___7 ; uint32_t *tmp___8 ; uint32_t *tmp___9 ; uint32_t *tmp___10 ; uint32_t *tmp___11 ; uint32_t *tmp___12 ; { cur_dsd = (uint32_t *)0U; first_iocb = 1U; cmd = sp->u.scmd.cmd; *((uint32_t *)(& cmd_pkt->entry_type)) = 72U; tmp = scsi_bufflen(cmd); if (tmp == 0U || (unsigned int )cmd->sc_data_direction == 3U) { cmd_pkt->byte_count = 0U; return (0); } else { } vha = (sp->fcport)->vha; ha = vha->hw; if ((unsigned int )cmd->sc_data_direction == 1U) { cmd_pkt->control_flags = 1U; tmp___0 = scsi_bufflen(cmd); vha->qla_stats.output_bytes = vha->qla_stats.output_bytes + (uint64_t )tmp___0; vha->qla_stats.output_requests = vha->qla_stats.output_requests + 1ULL; } else if ((unsigned int )cmd->sc_data_direction == 2U) { cmd_pkt->control_flags = 2U; tmp___1 = scsi_bufflen(cmd); vha->qla_stats.input_bytes = vha->qla_stats.input_bytes + (uint64_t )tmp___1; vha->qla_stats.input_requests = vha->qla_stats.input_requests + 1ULL; } else { } cur_seg = scsi_sglist(cmd); ctx = (struct ct6_dsd *)sp->u.scmd.ctx; goto ldv_65928; ldv_65927: avail_dsds = (unsigned int )tot_dsds <= 37U ? (uint8_t )tot_dsds : 37U; tot_dsds = (int )tot_dsds - (int )((uint16_t )avail_dsds); dsd_list_len = (uint32_t )(((int )avail_dsds + 1) * 12); __mptr = (struct list_head const *)ha->gbl_dsd_list.next; dsd_ptr = (struct dsd_dma *)__mptr; next_dsd = dsd_ptr->dsd_addr; list_del(& dsd_ptr->list); ha->gbl_dsd_avail = (uint16_t )((int )ha->gbl_dsd_avail - 1); list_add_tail(& dsd_ptr->list, & ctx->dsd_list); ctx->dsd_use_cnt = ctx->dsd_use_cnt + 1; ha->gbl_dsd_inuse = (uint16_t )((int )ha->gbl_dsd_inuse + 1); if ((unsigned int )first_iocb != 0U) { first_iocb = 0U; dsd_seg = (uint32_t *)(& cmd_pkt->fcp_data_dseg_address); tmp___2 = dsd_seg; dsd_seg = dsd_seg + 1; *tmp___2 = (unsigned int )dsd_ptr->dsd_list_dma; tmp___3 = dsd_seg; dsd_seg = dsd_seg + 1; *tmp___3 = (unsigned int )(dsd_ptr->dsd_list_dma >> 32ULL); cmd_pkt->fcp_data_dseg_len = dsd_list_len; } else { tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = (unsigned int )dsd_ptr->dsd_list_dma; tmp___5 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___5 = (unsigned int )(dsd_ptr->dsd_list_dma >> 32ULL); tmp___6 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___6 = dsd_list_len; } cur_dsd = (uint32_t *)next_dsd; goto ldv_65925; ldv_65924: sle_dma = cur_seg->dma_address; tmp___7 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___7 = (unsigned int )sle_dma; tmp___8 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___8 = (unsigned int )(sle_dma >> 32ULL); tmp___9 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___9 = cur_seg->dma_length; cur_seg = sg_next(cur_seg); avail_dsds = (uint8_t )((int )avail_dsds - 1); ldv_65925: ; if ((unsigned int )avail_dsds != 0U) { goto ldv_65924; } else { } ldv_65928: ; if ((unsigned int )tot_dsds != 0U) { goto ldv_65927; } else { } tmp___10 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___10 = 0U; tmp___11 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___11 = 0U; tmp___12 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___12 = 0U; cmd_pkt->control_flags = (uint16_t )((unsigned int )cmd_pkt->control_flags | 4U); return (0); } } __inline uint16_t qla24xx_calc_dsd_lists(uint16_t dsds ) { uint16_t dsd_lists ; { dsd_lists = 0U; dsd_lists = (uint16_t )((unsigned int )dsds / 37U); if ((unsigned int )dsds % 37U != 0U) { dsd_lists = (uint16_t )((int )dsd_lists + 1); } else { } return (dsd_lists); } } __inline void qla24xx_build_scsi_iocbs(srb_t *sp , struct cmd_type_7 *cmd_pkt , uint16_t tot_dsds ) { uint16_t avail_dsds ; uint32_t *cur_dsd ; scsi_qla_host_t *vha ; struct scsi_cmnd *cmd ; struct scatterlist *sg ; int i ; struct req_que *req ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; dma_addr_t sle_dma ; cont_a64_entry_t *cont_pkt ; uint32_t *tmp___2 ; uint32_t *tmp___3 ; uint32_t *tmp___4 ; { cmd = sp->u.scmd.cmd; *((uint32_t *)(& cmd_pkt->entry_type)) = 24U; tmp = scsi_bufflen(cmd); if (tmp == 0U || (unsigned int )cmd->sc_data_direction == 3U) { cmd_pkt->byte_count = 0U; return; } else { } vha = (sp->fcport)->vha; req = vha->req; if ((unsigned int )cmd->sc_data_direction == 1U) { cmd_pkt->task_mgmt_flags = 1U; tmp___0 = scsi_bufflen(cmd); vha->qla_stats.output_bytes = vha->qla_stats.output_bytes + (uint64_t )tmp___0; vha->qla_stats.output_requests = vha->qla_stats.output_requests + 1ULL; } else if ((unsigned int )cmd->sc_data_direction == 2U) { cmd_pkt->task_mgmt_flags = 2U; tmp___1 = scsi_bufflen(cmd); vha->qla_stats.input_bytes = vha->qla_stats.input_bytes + (uint64_t )tmp___1; vha->qla_stats.input_requests = vha->qla_stats.input_requests + 1ULL; } else { } avail_dsds = 1U; cur_dsd = (uint32_t *)(& cmd_pkt->dseg_0_address); i = 0; sg = scsi_sglist(cmd); goto ldv_65949; ldv_65948: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); cur_dsd = (uint32_t *)(& cont_pkt->dseg_0_address); avail_dsds = 5U; } else { } sle_dma = sg->dma_address; tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = (unsigned int )sle_dma; tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = (unsigned int )(sle_dma >> 32ULL); tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = sg->dma_length; avail_dsds = (uint16_t )((int )avail_dsds - 1); i = i + 1; sg = sg_next(sg); ldv_65949: ; if ((int )tot_dsds > i) { goto ldv_65948; } else { } return; } } __inline static void qla24xx_set_t10dif_tags(srb_t *sp , struct fw_dif_context *pkt , unsigned int protcnt ) { struct scsi_cmnd *cmd ; unsigned char tmp ; sector_t tmp___0 ; int tmp___1 ; sector_t tmp___2 ; int tmp___3 ; uint8_t tmp___4 ; uint8_t tmp___5 ; uint8_t tmp___6 ; sector_t tmp___7 ; int tmp___8 ; { cmd = sp->u.scmd.cmd; tmp = scsi_get_prot_type(cmd); switch ((int )tmp) { case 0: tmp___0 = scsi_get_lba(cmd); pkt->ref_tag = (unsigned int )tmp___0; tmp___1 = qla2x00_hba_err_chk_enabled(sp); if (tmp___1 == 0) { goto ldv_65963; } else { } pkt->ref_tag_mask[0] = 255U; pkt->ref_tag_mask[1] = 255U; pkt->ref_tag_mask[2] = 255U; pkt->ref_tag_mask[3] = 255U; goto ldv_65963; case 2: pkt->app_tag = 0U; pkt->app_tag_mask[0] = 0U; pkt->app_tag_mask[1] = 0U; tmp___2 = scsi_get_lba(cmd); pkt->ref_tag = (unsigned int )tmp___2; tmp___3 = qla2x00_hba_err_chk_enabled(sp); if (tmp___3 == 0) { goto ldv_65963; } else { } pkt->ref_tag_mask[0] = 255U; pkt->ref_tag_mask[1] = 255U; pkt->ref_tag_mask[2] = 255U; pkt->ref_tag_mask[3] = 255U; goto ldv_65963; case 3: tmp___6 = 0U; pkt->ref_tag_mask[3] = tmp___6; tmp___5 = tmp___6; pkt->ref_tag_mask[2] = tmp___5; tmp___4 = tmp___5; pkt->ref_tag_mask[1] = tmp___4; pkt->ref_tag_mask[0] = tmp___4; goto ldv_65963; case 1: tmp___7 = scsi_get_lba(cmd); pkt->ref_tag = (unsigned int )tmp___7; pkt->app_tag = 0U; pkt->app_tag_mask[0] = 0U; pkt->app_tag_mask[1] = 0U; tmp___8 = qla2x00_hba_err_chk_enabled(sp); if (tmp___8 == 0) { goto ldv_65963; } else { } pkt->ref_tag_mask[0] = 255U; pkt->ref_tag_mask[1] = 255U; pkt->ref_tag_mask[2] = 255U; pkt->ref_tag_mask[3] = 255U; goto ldv_65963; } ldv_65963: ; return; } } static int qla24xx_get_one_block_sg(uint32_t blk_sz , struct qla2_sgx *sgx , uint32_t *partial ) { struct scatterlist *sg ; uint32_t cumulative_partial ; uint32_t sg_len ; dma_addr_t sg_dma_addr ; { if (sgx->num_bytes == sgx->tot_bytes) { return (0); } else { } sg = sgx->cur_sg; cumulative_partial = sgx->tot_partial; sg_dma_addr = sg->dma_address; sg_len = sg->dma_length; sgx->dma_addr = (dma_addr_t )sgx->bytes_consumed + sg_dma_addr; if ((sg_len - sgx->bytes_consumed) + cumulative_partial >= blk_sz) { sgx->dma_len = blk_sz - cumulative_partial; sgx->tot_partial = 0U; sgx->num_bytes = sgx->num_bytes + blk_sz; *partial = 0U; } else { sgx->dma_len = sg_len - sgx->bytes_consumed; sgx->tot_partial = sgx->tot_partial + sgx->dma_len; *partial = 1U; } sgx->bytes_consumed = sgx->bytes_consumed + sgx->dma_len; if (sgx->bytes_consumed == sg_len) { sg = sg_next(sg); sgx->num_sg = sgx->num_sg + 1U; sgx->cur_sg = sg; sgx->bytes_consumed = 0U; } else { } return (1); } } int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha , srb_t *sp , uint32_t *dsd , uint16_t tot_dsds , struct qla_tgt_cmd *tc ) { void *next_dsd ; uint8_t avail_dsds ; uint32_t dsd_list_len ; struct dsd_dma *dsd_ptr ; struct scatterlist *sg_prot ; uint32_t *cur_dsd ; uint16_t used_dsds ; uint32_t prot_int ; uint32_t partial ; struct qla2_sgx sgx ; dma_addr_t sle_dma ; uint32_t sle_dma_len ; uint32_t tot_prot_dma_len ; struct scsi_cmnd *cmd ; struct scsi_qla_host *vha ; void *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; uint32_t *tmp___3 ; uint32_t *tmp___4 ; uint32_t *tmp___5 ; int tmp___6 ; uint32_t *tmp___7 ; uint32_t *tmp___8 ; uint32_t *tmp___9 ; { avail_dsds = 0U; cur_dsd = dsd; used_dsds = tot_dsds; tot_prot_dma_len = 0U; memset((void *)(& sgx), 0, 48UL); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { vha = (sp->fcport)->vha; cmd = sp->u.scmd.cmd; prot_int = (cmd->device)->sector_size; sgx.tot_bytes = scsi_bufflen(cmd); sgx.cur_sg = scsi_sglist(cmd); sgx.sp = sp; sg_prot = scsi_prot_sglist(cmd); } else if ((unsigned long )tc != (unsigned long )((struct qla_tgt_cmd *)0)) { vha = tc->vha; prot_int = tc->blk_sz; sgx.tot_bytes = (uint32_t )tc->bufflen; sgx.cur_sg = tc->sg; sg_prot = tc->prot_sg; } else { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_iocb.c"), "i" (956), "i" (12UL)); ldv_66008: ; goto ldv_66008; return (1); } goto ldv_66011; ldv_66010: sle_dma = sgx.dma_addr; sle_dma_len = sgx.dma_len; alloc_and_fill: ; if ((unsigned int )avail_dsds == 0U) { avail_dsds = (unsigned int )used_dsds <= 37U ? (uint8_t )used_dsds : 37U; dsd_list_len = (uint32_t )(((int )avail_dsds + 1) * 12); used_dsds = (int )used_dsds - (int )((uint16_t )avail_dsds); tmp = kzalloc(32UL, 32U); dsd_ptr = (struct dsd_dma *)tmp; if ((unsigned long )dsd_ptr == (unsigned long )((struct dsd_dma *)0)) { return (1); } else { } next_dsd = dma_pool_alloc(ha->dl_dma_pool, 32U, & dsd_ptr->dsd_list_dma); dsd_ptr->dsd_addr = next_dsd; if ((unsigned long )next_dsd == (unsigned long )((void *)0)) { kfree((void const *)dsd_ptr); return (1); } else { } if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { list_add_tail(& dsd_ptr->list, & ((struct crc_context *)sp->u.scmd.ctx)->dsd_list); sp->flags = (uint16_t )((unsigned int )sp->flags | 32U); } else { list_add_tail(& dsd_ptr->list, & (tc->ctx)->dsd_list); tc->ctx_dsd_alloced = 1U; } tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )dsd_ptr->dsd_list_dma; tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = (unsigned int )(dsd_ptr->dsd_list_dma >> 32ULL); tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = dsd_list_len; cur_dsd = (uint32_t *)next_dsd; } else { } tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = (unsigned int )sle_dma; tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = (unsigned int )(sle_dma >> 32ULL); tmp___5 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___5 = sle_dma_len; avail_dsds = (uint8_t )((int )avail_dsds - 1); if (partial == 0U) { sle_dma = sg_prot->dma_address + (dma_addr_t )tot_prot_dma_len; sle_dma_len = 8U; tot_prot_dma_len = tot_prot_dma_len + sle_dma_len; if (sg_prot->dma_length == tot_prot_dma_len) { tot_prot_dma_len = 0U; sg_prot = sg_next(sg_prot); } else { } partial = 1U; goto alloc_and_fill; } else { } ldv_66011: tmp___6 = qla24xx_get_one_block_sg(prot_int, & sgx, & partial); if (tmp___6 != 0) { goto ldv_66010; } else { } tmp___7 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___7 = 0U; tmp___8 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___8 = 0U; tmp___9 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___9 = 0U; return (0); } } int qla24xx_walk_and_build_sglist(struct qla_hw_data *ha , srb_t *sp , uint32_t *dsd , uint16_t tot_dsds , struct qla_tgt_cmd *tc ) { void *next_dsd ; uint8_t avail_dsds ; uint32_t dsd_list_len ; struct dsd_dma *dsd_ptr ; struct scatterlist *sg ; struct scatterlist *sgl ; uint32_t *cur_dsd ; int i ; uint16_t used_dsds ; struct scsi_cmnd *cmd ; struct scsi_qla_host *vha ; dma_addr_t sle_dma ; void *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; uint32_t *tmp___3 ; uint32_t *tmp___4 ; uint32_t *tmp___5 ; uint32_t *tmp___6 ; uint32_t *tmp___7 ; uint32_t *tmp___8 ; { avail_dsds = 0U; cur_dsd = dsd; used_dsds = tot_dsds; if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { cmd = sp->u.scmd.cmd; sgl = scsi_sglist(cmd); vha = (sp->fcport)->vha; } else if ((unsigned long )tc != (unsigned long )((struct qla_tgt_cmd *)0)) { sgl = tc->sg; vha = tc->vha; } else { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_iocb.c"), "i" (1060), "i" (12UL)); ldv_66031: ; goto ldv_66031; return (1); } i = 0; sg = sgl; goto ldv_66034; ldv_66033: ; if ((unsigned int )avail_dsds == 0U) { avail_dsds = (unsigned int )used_dsds <= 37U ? (uint8_t )used_dsds : 37U; dsd_list_len = (uint32_t )(((int )avail_dsds + 1) * 12); used_dsds = (int )used_dsds - (int )((uint16_t )avail_dsds); tmp = kzalloc(32UL, 32U); dsd_ptr = (struct dsd_dma *)tmp; if ((unsigned long )dsd_ptr == (unsigned long )((struct dsd_dma *)0)) { return (1); } else { } next_dsd = dma_pool_alloc(ha->dl_dma_pool, 32U, & dsd_ptr->dsd_list_dma); dsd_ptr->dsd_addr = next_dsd; if ((unsigned long )next_dsd == (unsigned long )((void *)0)) { kfree((void const *)dsd_ptr); return (1); } else { } if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { list_add_tail(& dsd_ptr->list, & ((struct crc_context *)sp->u.scmd.ctx)->dsd_list); sp->flags = (uint16_t )((unsigned int )sp->flags | 32U); } else { list_add_tail(& dsd_ptr->list, & (tc->ctx)->dsd_list); tc->ctx_dsd_alloced = 1U; } tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )dsd_ptr->dsd_list_dma; tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = (unsigned int )(dsd_ptr->dsd_list_dma >> 32ULL); tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = dsd_list_len; cur_dsd = (uint32_t *)next_dsd; } else { } sle_dma = sg->dma_address; tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = (unsigned int )sle_dma; tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = (unsigned int )(sle_dma >> 32ULL); tmp___5 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___5 = sg->dma_length; avail_dsds = (uint8_t )((int )avail_dsds - 1); i = i + 1; sg = sg_next(sg); ldv_66034: ; if ((int )tot_dsds > i) { goto ldv_66033; } else { } tmp___6 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___6 = 0U; tmp___7 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___7 = 0U; tmp___8 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___8 = 0U; return (0); } } int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha , srb_t *sp , uint32_t *dsd , uint16_t tot_dsds , struct qla_tgt_cmd *tc ) { void *next_dsd ; uint8_t avail_dsds ; uint32_t dsd_list_len ; struct dsd_dma *dsd_ptr ; struct scatterlist *sg ; struct scatterlist *sgl ; int i ; struct scsi_cmnd *cmd ; uint32_t *cur_dsd ; uint16_t used_dsds ; struct scsi_qla_host *vha ; dma_addr_t sle_dma ; void *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; uint32_t *tmp___3 ; uint32_t *tmp___4 ; uint32_t *tmp___5 ; uint32_t *tmp___6 ; uint32_t *tmp___7 ; uint32_t *tmp___8 ; { avail_dsds = 0U; cur_dsd = dsd; used_dsds = tot_dsds; if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { cmd = sp->u.scmd.cmd; sgl = scsi_prot_sglist(cmd); vha = (sp->fcport)->vha; } else if ((unsigned long )tc != (unsigned long )((struct qla_tgt_cmd *)0)) { vha = tc->vha; sgl = tc->prot_sg; } else { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_iocb.c"), "i" (1150), "i" (12UL)); ldv_66054: ; goto ldv_66054; return (1); } ql_dbg(16384U, vha, 57377, "%s: enter\n", "qla24xx_walk_and_build_prot_sglist"); i = 0; sg = sgl; goto ldv_66058; ldv_66057: ; if ((unsigned int )avail_dsds == 0U) { avail_dsds = (unsigned int )used_dsds <= 37U ? (uint8_t )used_dsds : 37U; dsd_list_len = (uint32_t )(((int )avail_dsds + 1) * 12); used_dsds = (int )used_dsds - (int )((uint16_t )avail_dsds); tmp = kzalloc(32UL, 32U); dsd_ptr = (struct dsd_dma *)tmp; if ((unsigned long )dsd_ptr == (unsigned long )((struct dsd_dma *)0)) { return (1); } else { } next_dsd = dma_pool_alloc(ha->dl_dma_pool, 32U, & dsd_ptr->dsd_list_dma); dsd_ptr->dsd_addr = next_dsd; if ((unsigned long )next_dsd == (unsigned long )((void *)0)) { kfree((void const *)dsd_ptr); return (1); } else { } if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { list_add_tail(& dsd_ptr->list, & ((struct crc_context *)sp->u.scmd.ctx)->dsd_list); sp->flags = (uint16_t )((unsigned int )sp->flags | 32U); } else { list_add_tail(& dsd_ptr->list, & (tc->ctx)->dsd_list); tc->ctx_dsd_alloced = 1U; } tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )dsd_ptr->dsd_list_dma; tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = (unsigned int )(dsd_ptr->dsd_list_dma >> 32ULL); tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = dsd_list_len; cur_dsd = (uint32_t *)next_dsd; } else { } sle_dma = sg->dma_address; tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = (unsigned int )sle_dma; tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = (unsigned int )(sle_dma >> 32ULL); tmp___5 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___5 = sg->dma_length; avail_dsds = (uint8_t )((int )avail_dsds - 1); i = i + 1; sg = sg_next(sg); ldv_66058: ; if ((int )tot_dsds > i) { goto ldv_66057; } else { } tmp___6 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___6 = 0U; tmp___7 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___7 = 0U; tmp___8 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___8 = 0U; return (0); } } __inline static int qla24xx_build_scsi_crc_2_iocbs(srb_t *sp , struct cmd_type_crc_2 *cmd_pkt , uint16_t tot_dsds , uint16_t tot_prot_dsds , uint16_t fw_prot_opts ) { uint32_t *cur_dsd ; uint32_t *fcp_dl ; scsi_qla_host_t *vha ; struct scsi_cmnd *cmd ; int sgc ; uint32_t total_bytes ; uint32_t data_bytes ; uint32_t dif_bytes ; uint8_t bundling ; uint16_t blk_size ; uint8_t *clr_ptr ; struct crc_context *crc_ctx_pkt ; struct qla_hw_data *ha ; uint8_t additional_fcpcdb_len ; uint16_t fcp_cmnd_len ; struct fcp_cmnd *fcp_cmnd ; dma_addr_t crc_ctx_dma ; unsigned char tmp ; unsigned char tmp___0 ; unsigned char tmp___1 ; unsigned char tmp___2 ; void *tmp___3 ; unsigned char tmp___4 ; unsigned char tmp___5 ; unsigned char tmp___6 ; unsigned char tmp___7 ; int tmp___8 ; __u32 tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; { total_bytes = 0U; bundling = 1U; crc_ctx_pkt = (struct crc_context *)0; cmd = sp->u.scmd.cmd; sgc = 0; *((uint32_t *)(& cmd_pkt->entry_type)) = 106U; vha = (sp->fcport)->vha; ha = vha->hw; data_bytes = scsi_bufflen(cmd); if (data_bytes == 0U || (unsigned int )cmd->sc_data_direction == 3U) { cmd_pkt->byte_count = 0U; return (0); } else { } cmd_pkt->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; if ((unsigned int )cmd->sc_data_direction == 1U) { cmd_pkt->control_flags = 1U; } else if ((unsigned int )cmd->sc_data_direction == 2U) { cmd_pkt->control_flags = 2U; } else { } tmp = scsi_get_prot_op(cmd); if ((unsigned int )tmp == 1U) { bundling = 0U; } else { tmp___0 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___0 == 2U) { bundling = 0U; } else { tmp___1 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___1 == 3U) { bundling = 0U; } else { tmp___2 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___2 == 4U) { bundling = 0U; } else { } } } } tmp___3 = dma_pool_alloc(ha->dl_dma_pool, 32U, & crc_ctx_dma); sp->u.scmd.ctx = tmp___3; crc_ctx_pkt = (struct crc_context *)tmp___3; if ((unsigned long )crc_ctx_pkt == (unsigned long )((struct crc_context *)0)) { goto crc_queuing_error; } else { } clr_ptr = (uint8_t *)crc_ctx_pkt; memset((void *)clr_ptr, 0, 360UL); crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; sp->flags = (uint16_t )((unsigned int )sp->flags | 4U); crc_ctx_pkt->handle = cmd_pkt->handle; INIT_LIST_HEAD(& crc_ctx_pkt->dsd_list); qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)(& crc_ctx_pkt->ref_tag), (unsigned int )tot_prot_dsds); cmd_pkt->crc_context_address[0] = (unsigned int )crc_ctx_dma; cmd_pkt->crc_context_address[1] = (unsigned int )(crc_ctx_dma >> 32ULL); cmd_pkt->crc_context_len = 64U; if ((unsigned int )cmd->cmd_len > 16U) { additional_fcpcdb_len = (unsigned int )((uint8_t )cmd->cmd_len) + 240U; if (((unsigned int )cmd->cmd_len & 3U) != 0U) { goto crc_queuing_error; } else { } fcp_cmnd_len = (unsigned int )cmd->cmd_len + 16U; } else { additional_fcpcdb_len = 0U; fcp_cmnd_len = 32U; } fcp_cmnd = & crc_ctx_pkt->fcp_cmnd; fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; if ((unsigned int )cmd->sc_data_direction == 1U) { fcp_cmnd->additional_cdb_len = (uint8_t )((unsigned int )fcp_cmnd->additional_cdb_len | 1U); } else if ((unsigned int )cmd->sc_data_direction == 2U) { fcp_cmnd->additional_cdb_len = (uint8_t )((unsigned int )fcp_cmnd->additional_cdb_len | 2U); } else { } int_to_scsilun((cmd->device)->lun, & fcp_cmnd->lun); memcpy((void *)(& fcp_cmnd->cdb), (void const *)cmd->cmnd, (size_t )cmd->cmd_len); cmd_pkt->fcp_cmnd_dseg_len = fcp_cmnd_len; cmd_pkt->fcp_cmnd_dseg_address[0] = (unsigned int )crc_ctx_dma + 64U; cmd_pkt->fcp_cmnd_dseg_address[1] = (unsigned int )((crc_ctx_dma + 64ULL) >> 32ULL); fcp_cmnd->task_management = 0U; fcp_cmnd->task_attribute = 0U; cmd_pkt->fcp_rsp_dseg_len = 0U; dif_bytes = 0U; blk_size = (uint16_t )(cmd->device)->sector_size; dif_bytes = (data_bytes / (uint32_t )blk_size) * 8U; tmp___4 = scsi_get_prot_op(sp->u.scmd.cmd); switch ((int )tmp___4) { case 1: ; case 2: total_bytes = data_bytes; data_bytes = data_bytes + dif_bytes; goto ldv_66087; case 3: ; case 4: ; case 5: ; case 6: total_bytes = data_bytes + dif_bytes; goto ldv_66087; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_iocb.c"), "i" (1361), "i" (12UL)); ldv_66093: ; goto ldv_66093; } ldv_66087: tmp___8 = qla2x00_hba_err_chk_enabled(sp); if (tmp___8 == 0) { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 16U); } else if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { tmp___6 = scsi_get_prot_type(sp->u.scmd.cmd); if ((unsigned int )tmp___6 == 1U) { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 1024U); } else { tmp___7 = scsi_get_prot_type(sp->u.scmd.cmd); if ((unsigned int )tmp___7 == 2U) { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 1024U); } else { tmp___5 = scsi_get_prot_type(sp->u.scmd.cmd); if ((unsigned int )tmp___5 == 3U) { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 2048U); } else { } } } } else { } if ((unsigned int )bundling == 0U) { cur_dsd = (uint32_t *)(& crc_ctx_pkt->u.nobundling.data_address); } else { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 256U); crc_ctx_pkt->u.bundling.dif_byte_count = dif_bytes; crc_ctx_pkt->u.bundling.dseg_count = (int )tot_dsds - (int )tot_prot_dsds; cur_dsd = (uint32_t *)(& crc_ctx_pkt->u.bundling.data_address); } crc_ctx_pkt->blk_size = blk_size; crc_ctx_pkt->prot_opts = fw_prot_opts; crc_ctx_pkt->byte_count = data_bytes; crc_ctx_pkt->guard_seed = 0U; cmd_pkt->byte_count = total_bytes; fcp_dl = (uint32_t *)(& crc_ctx_pkt->fcp_cmnd.cdb) + ((unsigned long )additional_fcpcdb_len + 16UL); tmp___9 = __fswab32(total_bytes); *fcp_dl = tmp___9; if (data_bytes == 0U || (unsigned int )cmd->sc_data_direction == 3U) { cmd_pkt->byte_count = 0U; return (0); } else { } cmd_pkt->control_flags = (uint16_t )((unsigned int )cmd_pkt->control_flags | 4U); if ((unsigned int )bundling == 0U && (unsigned int )tot_prot_dsds != 0U) { tmp___10 = qla24xx_walk_and_build_sglist_no_difb(ha, sp, cur_dsd, (int )tot_dsds, (struct qla_tgt_cmd *)0); if (tmp___10 != 0) { goto crc_queuing_error; } else { } } else { tmp___11 = qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, (int )tot_dsds - (int )tot_prot_dsds, (struct qla_tgt_cmd *)0); if (tmp___11 != 0) { goto crc_queuing_error; } else { } } if ((unsigned int )bundling != 0U && (unsigned int )tot_prot_dsds != 0U) { cmd_pkt->control_flags = (uint16_t )((unsigned int )cmd_pkt->control_flags | 8U); cur_dsd = (uint32_t *)(& crc_ctx_pkt->u.bundling.dif_address); tmp___12 = qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, (int )tot_prot_dsds, (struct qla_tgt_cmd *)0); if (tmp___12 != 0) { goto crc_queuing_error; } else { } } else { } return (0); crc_queuing_error: ; return (258); } } int qla24xx_start_scsi(srb_t *sp ) { int ret ; int nseg ; unsigned long flags ; uint32_t *clr_ptr ; uint32_t index ; uint32_t handle ; struct cmd_type_7 *cmd_pkt ; uint16_t cnt ; uint16_t req_cnt ; uint16_t tot_dsds ; struct req_que *req ; struct rsp_que *rsp ; struct scsi_cmnd *cmd ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; int tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; struct scatterlist *tmp___2 ; long tmp___3 ; unsigned int tmp___4 ; unsigned int tmp___5 ; { req = (struct req_que *)0; rsp = (struct rsp_que *)0; cmd = sp->u.scmd.cmd; vha = (sp->fcport)->vha; ha = vha->hw; ret = 0; qla25xx_set_que(sp, & rsp); req = vha->req; tot_dsds = 0U; if ((unsigned int )vha->marker_needed != 0U) { tmp = qla2x00_marker(vha, req, rsp, 0, 0ULL, 2); if (tmp != 0) { return (258); } else { } vha->marker_needed = 0U; } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); handle = req->current_outstanding_cmd; index = 1U; goto ldv_66117; ldv_66116: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_66115; } else { } index = index + 1U; ldv_66117: ; if ((uint32_t )req->num_outstanding_cmds > index) { goto ldv_66116; } else { } ldv_66115: ; if ((uint32_t )req->num_outstanding_cmds == index) { goto queuing_error; } else { } tmp___4 = scsi_sg_count(cmd); if (tmp___4 != 0U) { tmp___1 = scsi_sg_count(cmd); tmp___2 = scsi_sglist(cmd); nseg = dma_map_sg_attrs(& (ha->pdev)->dev, tmp___2, (int )tmp___1, cmd->sc_data_direction, (struct dma_attrs *)0); tmp___3 = ldv__builtin_expect(nseg == 0, 0L); if (tmp___3 != 0L) { goto queuing_error; } else { } } else { nseg = 0; } tot_dsds = (uint16_t )nseg; req_cnt = qla24xx_calc_iocbs(vha, (int )tot_dsds); if ((int )req->cnt < (int )req_cnt + 2) { if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { cnt = *(req->out_ptr); } else { tmp___5 = __readl((void const volatile *)req->req_q_out); cnt = (uint16_t )tmp___5; } if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } if ((int )req->cnt < (int )req_cnt + 2) { goto queuing_error; } else { } } else { } req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)((unsigned long )handle); req->cnt = (int )req->cnt - (int )req_cnt; cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; cmd_pkt->handle = ((unsigned int )req->id << 16) | handle; clr_ptr = (uint32_t *)cmd_pkt + 2UL; memset((void *)clr_ptr, 0, 56UL); cmd_pkt->dseg_count = tot_dsds; cmd_pkt->nport_handle = (sp->fcport)->loop_id; cmd_pkt->port_id[0] = (sp->fcport)->d_id.b.al_pa; cmd_pkt->port_id[1] = (sp->fcport)->d_id.b.area; cmd_pkt->port_id[2] = (sp->fcport)->d_id.b.domain; cmd_pkt->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; int_to_scsilun((cmd->device)->lun, & cmd_pkt->lun); host_to_fcp_swap((uint8_t *)(& cmd_pkt->lun), 8U); cmd_pkt->task = 0U; memcpy((void *)(& cmd_pkt->fcp_cdb), (void const *)cmd->cmnd, (size_t )cmd->cmd_len); host_to_fcp_swap((uint8_t *)(& cmd_pkt->fcp_cdb), 16U); cmd_pkt->byte_count = scsi_bufflen(cmd); qla24xx_build_scsi_iocbs(sp, cmd_pkt, (int )tot_dsds); cmd_pkt->entry_count = (unsigned char )req_cnt; cmd_pkt->entry_status = (unsigned char )rsp->id; __asm__ volatile ("sfence": : : "memory"); req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } sp->flags = (uint16_t )((unsigned int )sp->flags | 1U); writel((unsigned int )req->ring_index, (void volatile *)req->req_q_in); __readl((void const volatile *)(& (ha->iobase)->isp24.hccr)); if (*((unsigned long *)vha + 19UL) != 0UL && (rsp->ring_ptr)->signature != 3735936685U) { qla24xx_process_response_queue(vha, rsp); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); queuing_error: ; if ((unsigned int )tot_dsds != 0U) { scsi_dma_unmap(cmd); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (258); } } int qla24xx_dif_start_scsi(srb_t *sp ) { int nseg ; unsigned long flags ; uint32_t *clr_ptr ; uint32_t index ; uint32_t handle ; uint16_t cnt ; uint16_t req_cnt ; uint16_t tot_dsds ; uint16_t tot_prot_dsds ; uint16_t fw_prot_opts ; struct req_que *req ; struct rsp_que *rsp ; struct scsi_cmnd *cmd ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct cmd_type_crc_2 *cmd_pkt ; uint32_t status ; int tmp ; unsigned char tmp___0 ; int tmp___1 ; raw_spinlock_t *tmp___2 ; unsigned int tmp___3 ; struct scatterlist *tmp___4 ; long tmp___5 ; struct qla2_sgx sgx ; uint32_t partial ; int tmp___6 ; unsigned char tmp___7 ; unsigned char tmp___8 ; unsigned int tmp___9 ; unsigned int tmp___10 ; struct scatterlist *tmp___11 ; long tmp___12 ; unsigned int tmp___13 ; unsigned char tmp___14 ; unsigned char tmp___15 ; int tmp___16 ; unsigned int tmp___17 ; int tmp___18 ; { req_cnt = 0U; fw_prot_opts = 0U; req = (struct req_que *)0; rsp = (struct rsp_que *)0; cmd = sp->u.scmd.cmd; vha = (sp->fcport)->vha; ha = vha->hw; status = 0U; tmp___0 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___0 == 0U) { if ((unsigned int )cmd->cmd_len <= 16U) { tmp = qla24xx_start_scsi(sp); return (tmp); } else { } } else { } qla25xx_set_que(sp, & rsp); req = vha->req; tot_dsds = 0U; if ((unsigned int )vha->marker_needed != 0U) { tmp___1 = qla2x00_marker(vha, req, rsp, 0, 0ULL, 2); if (tmp___1 != 0) { return (258); } else { } vha->marker_needed = 0U; } else { } tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); handle = req->current_outstanding_cmd; index = 1U; goto ldv_66144; ldv_66143: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_66142; } else { } index = index + 1U; ldv_66144: ; if ((uint32_t )req->num_outstanding_cmds > index) { goto ldv_66143; } else { } ldv_66142: ; if ((uint32_t )req->num_outstanding_cmds == index) { goto queuing_error; } else { } tmp___9 = scsi_sg_count(cmd); if (tmp___9 != 0U) { tmp___3 = scsi_sg_count(cmd); tmp___4 = scsi_sglist(cmd); nseg = dma_map_sg_attrs(& (ha->pdev)->dev, tmp___4, (int )tmp___3, cmd->sc_data_direction, (struct dma_attrs *)0); tmp___5 = ldv__builtin_expect(nseg == 0, 0L); if (tmp___5 != 0L) { goto queuing_error; } else { sp->flags = (uint16_t )((unsigned int )sp->flags | 1U); } tmp___7 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___7 == 1U) { goto _L; } else { tmp___8 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___8 == 2U) { _L: /* CIL Label */ memset((void *)(& sgx), 0, 48UL); sgx.tot_bytes = scsi_bufflen(cmd); sgx.cur_sg = scsi_sglist(cmd); sgx.sp = sp; nseg = 0; goto ldv_66149; ldv_66148: nseg = nseg + 1; ldv_66149: tmp___6 = qla24xx_get_one_block_sg((cmd->device)->sector_size, & sgx, & partial); if (tmp___6 != 0) { goto ldv_66148; } else { } } else { } } } else { nseg = 0; } tot_dsds = (uint16_t )nseg; tmp___16 = qla24xx_configure_prot_mode(sp, & fw_prot_opts); if (tmp___16 != 0) { tmp___10 = scsi_prot_sg_count(cmd); tmp___11 = scsi_prot_sglist(cmd); nseg = dma_map_sg_attrs(& (ha->pdev)->dev, tmp___11, (int )tmp___10, cmd->sc_data_direction, (struct dma_attrs *)0); tmp___12 = ldv__builtin_expect(nseg == 0, 0L); if (tmp___12 != 0L) { goto queuing_error; } else { sp->flags = (uint16_t )((unsigned int )sp->flags | 16U); } tmp___14 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___14 == 1U) { tmp___13 = scsi_bufflen(cmd); nseg = (int )(tmp___13 / (cmd->device)->sector_size); } else { tmp___15 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___15 == 2U) { tmp___13 = scsi_bufflen(cmd); nseg = (int )(tmp___13 / (cmd->device)->sector_size); } else { } } } else { nseg = 0; } req_cnt = 1U; tot_prot_dsds = (uint16_t )nseg; tot_dsds = (int )((uint16_t )nseg) + (int )tot_dsds; if ((int )req->cnt < (int )req_cnt + 2) { if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { cnt = *(req->out_ptr); } else { tmp___17 = __readl((void const volatile *)req->req_q_out); cnt = (uint16_t )tmp___17; } if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } if ((int )req->cnt < (int )req_cnt + 2) { goto queuing_error; } else { } } else { } status = status | 1U; req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)((unsigned long )handle); req->cnt = (int )req->cnt - (int )req_cnt; cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; cmd_pkt->handle = ((unsigned int )req->id << 16) | handle; clr_ptr = (uint32_t *)cmd_pkt + 2UL; memset((void *)clr_ptr, 0, 56UL); cmd_pkt->nport_handle = (sp->fcport)->loop_id; cmd_pkt->port_id[0] = (sp->fcport)->d_id.b.al_pa; cmd_pkt->port_id[1] = (sp->fcport)->d_id.b.area; cmd_pkt->port_id[2] = (sp->fcport)->d_id.b.domain; int_to_scsilun((cmd->device)->lun, & cmd_pkt->lun); host_to_fcp_swap((uint8_t *)(& cmd_pkt->lun), 8U); cmd_pkt->dseg_count = tot_dsds; tmp___18 = qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)req->ring_ptr, (int )tot_dsds, (int )tot_prot_dsds, (int )fw_prot_opts); if (tmp___18 != 0) { goto queuing_error; } else { } cmd_pkt->entry_count = (unsigned char )req_cnt; cmd_pkt->entry_status = (unsigned char )rsp->id; cmd_pkt->timeout = 0U; __asm__ volatile ("sfence": : : "memory"); req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } writel((unsigned int )req->ring_index, (void volatile *)req->req_q_in); __readl((void const volatile *)(& (ha->iobase)->isp24.hccr)); if (*((unsigned long *)vha + 19UL) != 0UL && (rsp->ring_ptr)->signature != 3735936685U) { qla24xx_process_response_queue(vha, rsp); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); queuing_error: ; if ((int )status & 1) { *(req->outstanding_cmds + (unsigned long )handle) = (srb_t *)0; req->cnt = (int )req->cnt + (int )req_cnt; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (258); } } static void qla25xx_set_que(srb_t *sp , struct rsp_que **rsp ) { struct scsi_cmnd *cmd ; struct qla_hw_data *ha ; int affinity ; { cmd = sp->u.scmd.cmd; ha = ((sp->fcport)->vha)->hw; affinity = (cmd->request)->cpu; if ((*((unsigned long *)ha + 2UL) != 0UL && affinity >= 0) && (int )ha->max_rsp_queues + -1 > affinity) { *rsp = *(ha->rsp_q_map + ((unsigned long )affinity + 1UL)); } else { *rsp = *(ha->rsp_q_map); } return; } } void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *vha , srb_t *sp ) { int tmp ; void *tmp___0 ; { tmp = qla2x00_reset_active(vha); if (tmp != 0) { return ((void *)0); } else { } tmp___0 = qla2x00_alloc_iocbs(vha, sp); return (tmp___0); } } void *qla2x00_alloc_iocbs(struct scsi_qla_host *vha , srb_t *sp ) { struct qla_hw_data *ha ; struct req_que *req ; device_reg_t *reg ; uint32_t index ; uint32_t handle ; request_t *pkt ; uint16_t cnt ; uint16_t req_cnt ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; unsigned int tmp___2 ; { ha = vha->hw; req = *(ha->req_q_map); reg = ((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) ? ha->mqiobase + (unsigned long )((int )req->id * 4096) : ha->iobase; pkt = (request_t *)0; req_cnt = 1U; handle = 0U; if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto skip_cmd_array; } else { } handle = req->current_outstanding_cmd; index = 1U; goto ldv_66177; ldv_66176: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_66175; } else { } index = index + 1U; ldv_66177: ; if ((uint32_t )req->num_outstanding_cmds > index) { goto ldv_66176; } else { } ldv_66175: ; if ((uint32_t )req->num_outstanding_cmds == index) { ql_log(1U, vha, 28683, "No room on outstanding cmd array.\n"); goto queuing_error; } else { } req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; if ((unsigned int )sp->type != 8U) { req_cnt = (uint16_t )sp->iocbs; } else { } skip_cmd_array: ; if ((int )req->cnt < (int )req_cnt + 2) { if (((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { tmp = readl((void const volatile *)(& reg->isp25mq.req_q_out)); cnt = (uint16_t )tmp; } else if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { tmp___0 = readl((void const volatile *)(& reg->isp82.req_q_out)); cnt = (uint16_t )tmp___0; } else if ((ha->device_type & 134217728U) != 0U) { tmp___1 = readl((void const volatile *)(& reg->isp24.req_q_out)); cnt = (uint16_t )tmp___1; } else if ((ha->device_type & 131072U) != 0U) { tmp___2 = readl((void const volatile *)(& reg->ispfx00.req_q_out)); cnt = (uint16_t )tmp___2; } else { cnt = qla2x00_debounce_register___0((uint16_t volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->isp.u.isp2100.mailbox4 : & reg->isp.u.isp2300.req_q_out)); } if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } } else { } if ((int )req->cnt < (int )req_cnt + 2) { goto queuing_error; } else { } req->cnt = (int )req->cnt - (int )req_cnt; pkt = req->ring_ptr; memset((void *)pkt, 0, 64UL); if ((ha->device_type & 131072U) != 0U) { writeb((int )((unsigned char )req_cnt), (void volatile *)(& pkt->entry_count)); writew((int )((unsigned short )handle), (void volatile *)(& pkt->handle)); } else { pkt->entry_count = (uint8_t )req_cnt; pkt->handle = handle; } queuing_error: ; return ((void *)pkt); } } static void qla24xx_login_iocb(srb_t *sp , struct logio_entry_24xx *logio ) { struct srb_iocb *lio ; { lio = & sp->u.iocb_cmd; logio->entry_type = 82U; logio->control_flags = 0U; if (((int )lio->u.logio.flags & 2) != 0) { logio->control_flags = (uint16_t )((unsigned int )logio->control_flags | 16U); } else { } if (((int )lio->u.logio.flags & 4) != 0) { logio->control_flags = (uint16_t )((unsigned int )logio->control_flags | 32U); } else { } logio->nport_handle = (sp->fcport)->loop_id; logio->port_id[0] = (sp->fcport)->d_id.b.al_pa; logio->port_id[1] = (sp->fcport)->d_id.b.area; logio->port_id[2] = (sp->fcport)->d_id.b.domain; logio->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; return; } } static void qla2x00_login_iocb(srb_t *sp , struct mbx_entry *mbx ) { struct qla_hw_data *ha ; struct srb_iocb *lio ; uint16_t opts ; { ha = ((sp->fcport)->vha)->hw; lio = & sp->u.iocb_cmd; mbx->entry_type = 57U; if ((int )ha->device_type < 0) { mbx->loop_id.extended = (sp->fcport)->loop_id; } else { mbx->loop_id.id.standard = (unsigned char )(sp->fcport)->loop_id; } mbx->mb0 = 111U; opts = ((int )lio->u.logio.flags & 2) != 0; opts = (uint16_t )((((int )lio->u.logio.flags & 4) != 0 ? 2 : 0) | (int )((short )opts)); if ((int )ha->device_type < 0) { mbx->mb1 = (sp->fcport)->loop_id; mbx->mb10 = opts; } else { mbx->mb1 = (unsigned short )((int )((short )((int )(sp->fcport)->loop_id << 8)) | (int )((short )opts)); } mbx->mb2 = (unsigned short )(sp->fcport)->d_id.b.domain; mbx->mb3 = (unsigned short )((int )((short )((int )(sp->fcport)->d_id.b.area << 8)) | (int )((short )(sp->fcport)->d_id.b.al_pa)); mbx->mb9 = ((sp->fcport)->vha)->vp_idx; return; } } static void qla24xx_logout_iocb(srb_t *sp , struct logio_entry_24xx *logio ) { { logio->entry_type = 82U; logio->control_flags = 24U; logio->nport_handle = (sp->fcport)->loop_id; logio->port_id[0] = (sp->fcport)->d_id.b.al_pa; logio->port_id[1] = (sp->fcport)->d_id.b.area; logio->port_id[2] = (sp->fcport)->d_id.b.domain; logio->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; return; } } static void qla2x00_logout_iocb(srb_t *sp , struct mbx_entry *mbx ) { struct qla_hw_data *ha ; { ha = ((sp->fcport)->vha)->hw; mbx->entry_type = 57U; if ((int )ha->device_type < 0) { mbx->loop_id.extended = (sp->fcport)->loop_id; } else { mbx->loop_id.id.standard = (unsigned char )(sp->fcport)->loop_id; } mbx->mb0 = 113U; mbx->mb1 = (int )ha->device_type < 0 ? (sp->fcport)->loop_id : (uint16_t )((int )(sp->fcport)->loop_id << 8U); mbx->mb2 = (unsigned short )(sp->fcport)->d_id.b.domain; mbx->mb3 = (unsigned short )((int )((short )((int )(sp->fcport)->d_id.b.area << 8)) | (int )((short )(sp->fcport)->d_id.b.al_pa)); mbx->mb9 = ((sp->fcport)->vha)->vp_idx; return; } } static void qla24xx_adisc_iocb(srb_t *sp , struct logio_entry_24xx *logio ) { { logio->entry_type = 82U; logio->control_flags = 3U; logio->nport_handle = (sp->fcport)->loop_id; logio->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; return; } } static void qla2x00_adisc_iocb(srb_t *sp , struct mbx_entry *mbx ) { struct qla_hw_data *ha ; { ha = ((sp->fcport)->vha)->hw; mbx->entry_type = 57U; if ((int )ha->device_type < 0) { mbx->loop_id.extended = (sp->fcport)->loop_id; } else { mbx->loop_id.id.standard = (unsigned char )(sp->fcport)->loop_id; } mbx->mb0 = 100U; if ((int )ha->device_type < 0) { mbx->mb1 = (sp->fcport)->loop_id; mbx->mb10 = 1U; } else { mbx->mb1 = (unsigned short )((int )((short )((int )(sp->fcport)->loop_id << 8)) | 1); } mbx->mb2 = (unsigned short )((unsigned int )ha->async_pd_dma >> 16); mbx->mb3 = (unsigned short )ha->async_pd_dma; mbx->mb6 = (unsigned short )((unsigned int )(ha->async_pd_dma >> 32ULL) >> 16); mbx->mb7 = (unsigned short )(ha->async_pd_dma >> 32ULL); mbx->mb9 = ((sp->fcport)->vha)->vp_idx; return; } } static void qla24xx_tm_iocb(srb_t *sp , struct tsk_mgmt_entry *tsk ) { uint32_t flags ; uint64_t lun ; struct fc_port *fcport ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct srb_iocb *iocb ; struct req_que *req ; { fcport = sp->fcport; vha = fcport->vha; ha = vha->hw; iocb = & sp->u.iocb_cmd; req = vha->req; flags = iocb->u.tmf.flags; lun = iocb->u.tmf.lun; tsk->entry_type = 20U; tsk->entry_count = 1U; tsk->handle = ((unsigned int )req->id << 16) | tsk->handle; tsk->nport_handle = fcport->loop_id; tsk->timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; tsk->control_flags = flags; tsk->port_id[0] = fcport->d_id.b.al_pa; tsk->port_id[1] = fcport->d_id.b.area; tsk->port_id[2] = fcport->d_id.b.domain; tsk->vp_index = (uint8_t )(fcport->vha)->vp_idx; if (flags == 16U) { int_to_scsilun(lun, & tsk->lun); host_to_fcp_swap((uint8_t *)(& tsk->lun), 8U); } else { } return; } } static void qla24xx_els_iocb(srb_t *sp , struct els_entry_24xx *els_iocb ) { struct fc_bsg_job *bsg_job ; { bsg_job = sp->u.bsg_job; els_iocb->entry_type = 83U; els_iocb->entry_count = 1U; els_iocb->sys_define = 0U; els_iocb->entry_status = 0U; els_iocb->handle = sp->handle; els_iocb->nport_handle = (sp->fcport)->loop_id; els_iocb->tx_dsd_count = (unsigned short )bsg_job->request_payload.sg_cnt; els_iocb->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; els_iocb->sof_type = 16U; els_iocb->rx_dsd_count = (unsigned short )bsg_job->reply_payload.sg_cnt; els_iocb->opcode = (unsigned int )sp->type == 3U ? (bsg_job->request)->rqst_data.r_els.els_code : (bsg_job->request)->rqst_data.h_els.command_code; els_iocb->port_id[0] = (sp->fcport)->d_id.b.al_pa; els_iocb->port_id[1] = (sp->fcport)->d_id.b.area; els_iocb->port_id[2] = (sp->fcport)->d_id.b.domain; els_iocb->control_flags = 0U; els_iocb->rx_byte_count = bsg_job->reply_payload.payload_len; els_iocb->tx_byte_count = bsg_job->request_payload.payload_len; els_iocb->tx_address[0] = (unsigned int )(bsg_job->request_payload.sg_list)->dma_address; els_iocb->tx_address[1] = (unsigned int )((bsg_job->request_payload.sg_list)->dma_address >> 32ULL); els_iocb->tx_len = (bsg_job->request_payload.sg_list)->dma_length; els_iocb->rx_address[0] = (unsigned int )(bsg_job->reply_payload.sg_list)->dma_address; els_iocb->rx_address[1] = (unsigned int )((bsg_job->reply_payload.sg_list)->dma_address >> 32ULL); els_iocb->rx_len = (bsg_job->reply_payload.sg_list)->dma_length; ((sp->fcport)->vha)->qla_stats.control_requests = ((sp->fcport)->vha)->qla_stats.control_requests + 1U; return; } } static void qla2x00_ct_iocb(srb_t *sp , ms_iocb_entry_t *ct_iocb ) { uint16_t avail_dsds ; uint32_t *cur_dsd ; struct scatterlist *sg ; int index ; uint16_t tot_dsds ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct fc_bsg_job *bsg_job ; int loop_iterartion ; int cont_iocb_prsnt ; int entry_count ; dma_addr_t sle_dma ; cont_a64_entry_t *cont_pkt ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; { vha = (sp->fcport)->vha; ha = vha->hw; bsg_job = sp->u.bsg_job; loop_iterartion = 0; cont_iocb_prsnt = 0; entry_count = 1; memset((void *)ct_iocb, 0, 64UL); ct_iocb->entry_type = 41U; ct_iocb->entry_status = 0U; ct_iocb->handle1 = sp->handle; if ((int )ha->device_type < 0) { ct_iocb->loop_id.extended = (sp->fcport)->loop_id; } else { ct_iocb->loop_id.id.standard = (unsigned char )(sp->fcport)->loop_id; } ct_iocb->status = 0U; ct_iocb->control_flags = 0U; ct_iocb->timeout = 0U; ct_iocb->cmd_dsd_count = (unsigned short )bsg_job->request_payload.sg_cnt; ct_iocb->total_dsd_count = (unsigned int )((unsigned short )bsg_job->request_payload.sg_cnt) + 1U; ct_iocb->req_bytecount = bsg_job->request_payload.payload_len; ct_iocb->rsp_bytecount = bsg_job->reply_payload.payload_len; ct_iocb->dseg_req_address[0] = (unsigned int )(bsg_job->request_payload.sg_list)->dma_address; ct_iocb->dseg_req_address[1] = (unsigned int )((bsg_job->request_payload.sg_list)->dma_address >> 32ULL); ct_iocb->dseg_req_length = ct_iocb->req_bytecount; ct_iocb->dseg_rsp_address[0] = (unsigned int )(bsg_job->reply_payload.sg_list)->dma_address; ct_iocb->dseg_rsp_address[1] = (unsigned int )((bsg_job->reply_payload.sg_list)->dma_address >> 32ULL); ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount; avail_dsds = 1U; cur_dsd = (uint32_t *)(& ct_iocb->dseg_rsp_address); index = 0; tot_dsds = (uint16_t )bsg_job->reply_payload.sg_cnt; index = 0; sg = bsg_job->reply_payload.sg_list; goto ldv_66243; ldv_66242: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt = qla2x00_prep_cont_type1_iocb(vha, *((vha->hw)->req_q_map)); cur_dsd = (uint32_t *)(& cont_pkt->dseg_0_address); avail_dsds = 5U; cont_iocb_prsnt = 1; entry_count = entry_count + 1; } else { } sle_dma = sg->dma_address; tmp = cur_dsd; cur_dsd = cur_dsd + 1; *tmp = (unsigned int )sle_dma; tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )(sle_dma >> 32ULL); tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = sg->dma_length; loop_iterartion = loop_iterartion + 1; avail_dsds = (uint16_t )((int )avail_dsds - 1); index = index + 1; sg = sg_next(sg); ldv_66243: ; if ((int )tot_dsds > index) { goto ldv_66242; } else { } ct_iocb->entry_count = (uint8_t )entry_count; ((sp->fcport)->vha)->qla_stats.control_requests = ((sp->fcport)->vha)->qla_stats.control_requests + 1U; return; } } static void qla24xx_ct_iocb(srb_t *sp , struct ct_entry_24xx *ct_iocb ) { uint16_t avail_dsds ; uint32_t *cur_dsd ; struct scatterlist *sg ; int index ; uint16_t tot_dsds ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct fc_bsg_job *bsg_job ; int loop_iterartion ; int cont_iocb_prsnt ; int entry_count ; dma_addr_t sle_dma ; cont_a64_entry_t *cont_pkt ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; { vha = (sp->fcport)->vha; ha = vha->hw; bsg_job = sp->u.bsg_job; loop_iterartion = 0; cont_iocb_prsnt = 0; entry_count = 1; ct_iocb->entry_type = 41U; ct_iocb->entry_status = 0U; ct_iocb->sys_define = 0U; ct_iocb->handle = sp->handle; ct_iocb->nport_handle = (sp->fcport)->loop_id; ct_iocb->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; ct_iocb->comp_status = 0U; ct_iocb->cmd_dsd_count = (unsigned short )bsg_job->request_payload.sg_cnt; ct_iocb->timeout = 0U; ct_iocb->rsp_dsd_count = (unsigned short )bsg_job->reply_payload.sg_cnt; ct_iocb->rsp_byte_count = bsg_job->reply_payload.payload_len; ct_iocb->cmd_byte_count = bsg_job->request_payload.payload_len; ct_iocb->dseg_0_address[0] = (unsigned int )(bsg_job->request_payload.sg_list)->dma_address; ct_iocb->dseg_0_address[1] = (unsigned int )((bsg_job->request_payload.sg_list)->dma_address >> 32ULL); ct_iocb->dseg_0_len = (bsg_job->request_payload.sg_list)->dma_length; avail_dsds = 1U; cur_dsd = (uint32_t *)(& ct_iocb->dseg_1_address); index = 0; tot_dsds = (uint16_t )bsg_job->reply_payload.sg_cnt; index = 0; sg = bsg_job->reply_payload.sg_list; goto ldv_66263; ldv_66262: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt = qla2x00_prep_cont_type1_iocb(vha, *(ha->req_q_map)); cur_dsd = (uint32_t *)(& cont_pkt->dseg_0_address); avail_dsds = 5U; cont_iocb_prsnt = 1; entry_count = entry_count + 1; } else { } sle_dma = sg->dma_address; tmp = cur_dsd; cur_dsd = cur_dsd + 1; *tmp = (unsigned int )sle_dma; tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )(sle_dma >> 32ULL); tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = sg->dma_length; loop_iterartion = loop_iterartion + 1; avail_dsds = (uint16_t )((int )avail_dsds - 1); index = index + 1; sg = sg_next(sg); ldv_66263: ; if ((int )tot_dsds > index) { goto ldv_66262; } else { } ct_iocb->entry_count = (uint8_t )entry_count; return; } } int qla82xx_start_scsi(srb_t *sp ) { int ret ; int nseg ; unsigned long flags ; struct scsi_cmnd *cmd ; uint32_t *clr_ptr ; uint32_t index ; uint32_t handle ; uint16_t cnt ; uint16_t req_cnt ; uint16_t tot_dsds ; struct device_reg_82xx *reg ; uint32_t dbval ; uint32_t *fcp_dl ; uint8_t additional_cdb_len ; struct ct6_dsd *ctx ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; int tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; struct scatterlist *tmp___2 ; long tmp___3 ; unsigned int tmp___4 ; struct cmd_type_6 *cmd_pkt ; uint16_t more_dsd_lists ; struct dsd_dma *dsd_ptr ; uint16_t i ; void *tmp___5 ; unsigned int tmp___6 ; void *tmp___7 ; void *tmp___8 ; int tmp___9 ; unsigned int tmp___10 ; __u32 tmp___11 ; struct cmd_type_7 *cmd_pkt___0 ; unsigned int tmp___12 ; unsigned int tmp___13 ; { vha = (sp->fcport)->vha; ha = vha->hw; req = (struct req_que *)0; rsp = (struct rsp_que *)0; ret = 0; reg = & (ha->iobase)->isp82; cmd = sp->u.scmd.cmd; req = vha->req; rsp = *(ha->rsp_q_map); tot_dsds = 0U; dbval = (uint32_t )(((int )ha->portnum << 5) | 4); if ((unsigned int )vha->marker_needed != 0U) { tmp = qla2x00_marker(vha, req, rsp, 0, 0ULL, 2); if (tmp != 0) { ql_log(1U, vha, 12300, "qla2x00_marker failed for cmd=%p.\n", cmd); return (258); } else { } vha->marker_needed = 0U; } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); handle = req->current_outstanding_cmd; index = 1U; goto ldv_66292; ldv_66291: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_66290; } else { } index = index + 1U; ldv_66292: ; if ((uint32_t )req->num_outstanding_cmds > index) { goto ldv_66291; } else { } ldv_66290: ; if ((uint32_t )req->num_outstanding_cmds == index) { goto queuing_error; } else { } tmp___4 = scsi_sg_count(cmd); if (tmp___4 != 0U) { tmp___1 = scsi_sg_count(cmd); tmp___2 = scsi_sglist(cmd); nseg = dma_map_sg_attrs(& (ha->pdev)->dev, tmp___2, (int )tmp___1, cmd->sc_data_direction, (struct dma_attrs *)0); tmp___3 = ldv__builtin_expect(nseg == 0, 0L); if (tmp___3 != 0L) { goto queuing_error; } else { } } else { nseg = 0; } tot_dsds = (uint16_t )nseg; if ((int )tot_dsds > ql2xshiftctondsd) { more_dsd_lists = 0U; more_dsd_lists = qla24xx_calc_dsd_lists((int )tot_dsds); if ((int )more_dsd_lists + (int )ha->gbl_dsd_inuse > 4095) { ql_dbg(134217728U, vha, 12301, "Num of DSD list %d is than %d for cmd=%p.\n", (int )more_dsd_lists + (int )ha->gbl_dsd_inuse, 4096, cmd); goto queuing_error; } else { } if ((int )ha->gbl_dsd_avail >= (int )more_dsd_lists) { goto sufficient_dsds; } else { more_dsd_lists = (int )more_dsd_lists - (int )ha->gbl_dsd_avail; } i = 0U; goto ldv_66300; ldv_66299: tmp___5 = kzalloc(32UL, 32U); dsd_ptr = (struct dsd_dma *)tmp___5; if ((unsigned long )dsd_ptr == (unsigned long )((struct dsd_dma *)0)) { ql_log(0U, vha, 12302, "Failed to allocate memory for dsd_dma for cmd=%p.\n", cmd); goto queuing_error; } else { } dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 32U, & dsd_ptr->dsd_list_dma); if ((unsigned long )dsd_ptr->dsd_addr == (unsigned long )((void *)0)) { kfree((void const *)dsd_ptr); ql_log(0U, vha, 12303, "Failed to allocate memory for dsd_addr for cmd=%p.\n", cmd); goto queuing_error; } else { } list_add_tail(& dsd_ptr->list, & ha->gbl_dsd_list); ha->gbl_dsd_avail = (uint16_t )((int )ha->gbl_dsd_avail + 1); i = (uint16_t )((int )i + 1); ldv_66300: ; if ((int )i < (int )more_dsd_lists) { goto ldv_66299; } else { } sufficient_dsds: req_cnt = 1U; if ((int )req->cnt < (int )req_cnt + 2) { tmp___6 = __readl((void const volatile *)(& reg->req_q_out)); cnt = (unsigned short )tmp___6; if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } if ((int )req->cnt < (int )req_cnt + 2) { goto queuing_error; } else { } } else { } tmp___7 = mempool_alloc(ha->ctx_mempool, 32U); sp->u.scmd.ctx = tmp___7; ctx = (struct ct6_dsd *)tmp___7; if ((unsigned long )ctx == (unsigned long )((struct ct6_dsd *)0)) { ql_log(0U, vha, 12304, "Failed to allocate ctx for cmd=%p.\n", cmd); goto queuing_error; } else { } memset((void *)ctx, 0, 48UL); tmp___8 = dma_pool_alloc(ha->fcp_cmnd_dma_pool, 32U, & ctx->fcp_cmnd_dma); ctx->fcp_cmnd = (struct fcp_cmnd *)tmp___8; if ((unsigned long )ctx->fcp_cmnd == (unsigned long )((struct fcp_cmnd *)0)) { ql_log(0U, vha, 12305, "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); goto queuing_error; } else { } INIT_LIST_HEAD(& ctx->dsd_list); ctx->dsd_use_cnt = 0; if ((unsigned int )cmd->cmd_len > 16U) { additional_cdb_len = (unsigned int )((uint8_t )cmd->cmd_len) + 240U; if (((unsigned int )cmd->cmd_len & 3U) != 0U) { ql_log(1U, vha, 12306, "scsi cmd len %d not multiple of 4 for cmd=%p.\n", (int )cmd->cmd_len, cmd); goto queuing_error_fcp_cmnd; } else { } ctx->fcp_cmnd_len = (unsigned int )cmd->cmd_len + 16U; } else { additional_cdb_len = 0U; ctx->fcp_cmnd_len = 32U; } cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; cmd_pkt->handle = ((unsigned int )req->id << 16) | handle; clr_ptr = (uint32_t *)cmd_pkt + 2UL; memset((void *)clr_ptr, 0, 56UL); cmd_pkt->dseg_count = tot_dsds; cmd_pkt->nport_handle = (sp->fcport)->loop_id; cmd_pkt->port_id[0] = (sp->fcport)->d_id.b.al_pa; cmd_pkt->port_id[1] = (sp->fcport)->d_id.b.area; cmd_pkt->port_id[2] = (sp->fcport)->d_id.b.domain; cmd_pkt->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; tmp___9 = qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, (int )tot_dsds); if (tmp___9 != 0) { goto queuing_error_fcp_cmnd; } else { } int_to_scsilun((cmd->device)->lun, & cmd_pkt->lun); host_to_fcp_swap((uint8_t *)(& cmd_pkt->lun), 8U); memset((void *)ctx->fcp_cmnd, 0, 272UL); int_to_scsilun((cmd->device)->lun, & (ctx->fcp_cmnd)->lun); (ctx->fcp_cmnd)->additional_cdb_len = additional_cdb_len; if ((unsigned int )cmd->sc_data_direction == 1U) { (ctx->fcp_cmnd)->additional_cdb_len = (uint8_t )((unsigned int )(ctx->fcp_cmnd)->additional_cdb_len | 1U); } else if ((unsigned int )cmd->sc_data_direction == 2U) { (ctx->fcp_cmnd)->additional_cdb_len = (uint8_t )((unsigned int )(ctx->fcp_cmnd)->additional_cdb_len | 2U); } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { (ctx->fcp_cmnd)->task_attribute = (uint8_t )((int )((signed char )(ctx->fcp_cmnd)->task_attribute) | (int )((signed char )((int )(sp->fcport)->fcp_prio << 3))); } else { } memcpy((void *)(& (ctx->fcp_cmnd)->cdb), (void const *)cmd->cmnd, (size_t )cmd->cmd_len); fcp_dl = (uint32_t *)(& (ctx->fcp_cmnd)->cdb) + ((unsigned long )additional_cdb_len + 16UL); tmp___10 = scsi_bufflen(cmd); tmp___11 = __fswab32(tmp___10); *fcp_dl = tmp___11; cmd_pkt->fcp_cmnd_dseg_len = ctx->fcp_cmnd_len; cmd_pkt->fcp_cmnd_dseg_address[0] = (unsigned int )ctx->fcp_cmnd_dma; cmd_pkt->fcp_cmnd_dseg_address[1] = (unsigned int )(ctx->fcp_cmnd_dma >> 32ULL); sp->flags = (uint16_t )((unsigned int )sp->flags | 4096U); cmd_pkt->byte_count = scsi_bufflen(cmd); cmd_pkt->entry_count = (unsigned char )req_cnt; cmd_pkt->entry_status = (unsigned char )rsp->id; } else { req_cnt = qla24xx_calc_iocbs(vha, (int )tot_dsds); if ((int )req->cnt < (int )req_cnt + 2) { tmp___12 = __readl((void const volatile *)(& reg->req_q_out)); cnt = (unsigned short )tmp___12; if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } } else { } if ((int )req->cnt < (int )req_cnt + 2) { goto queuing_error; } else { } cmd_pkt___0 = (struct cmd_type_7 *)req->ring_ptr; cmd_pkt___0->handle = ((unsigned int )req->id << 16) | handle; clr_ptr = (uint32_t *)cmd_pkt___0 + 2UL; memset((void *)clr_ptr, 0, 56UL); cmd_pkt___0->dseg_count = tot_dsds; cmd_pkt___0->nport_handle = (sp->fcport)->loop_id; cmd_pkt___0->port_id[0] = (sp->fcport)->d_id.b.al_pa; cmd_pkt___0->port_id[1] = (sp->fcport)->d_id.b.area; cmd_pkt___0->port_id[2] = (sp->fcport)->d_id.b.domain; cmd_pkt___0->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; int_to_scsilun((cmd->device)->lun, & cmd_pkt___0->lun); host_to_fcp_swap((uint8_t *)(& cmd_pkt___0->lun), 8U); if (*((unsigned long *)ha + 2UL) != 0UL) { cmd_pkt___0->task = (uint8_t )((int )((signed char )cmd_pkt___0->task) | (int )((signed char )((int )(sp->fcport)->fcp_prio << 3))); } else { } memcpy((void *)(& cmd_pkt___0->fcp_cdb), (void const *)cmd->cmnd, (size_t )cmd->cmd_len); host_to_fcp_swap((uint8_t *)(& cmd_pkt___0->fcp_cdb), 16U); cmd_pkt___0->byte_count = scsi_bufflen(cmd); qla24xx_build_scsi_iocbs(sp, cmd_pkt___0, (int )tot_dsds); cmd_pkt___0->entry_count = (unsigned char )req_cnt; cmd_pkt___0->entry_status = (unsigned char )rsp->id; } req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)((unsigned long )handle); req->cnt = (int )req->cnt - (int )req_cnt; __asm__ volatile ("sfence": : : "memory"); req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } sp->flags = (uint16_t )((unsigned int )sp->flags | 1U); dbval = ((uint32_t )((int )req->id << 8) | dbval) | (uint32_t )((int )req->ring_index << 16); if (ql2xdbwr != 0) { qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); } else { writel(dbval, (void volatile *)ha->nxdb_wr_ptr); __asm__ volatile ("sfence": : : "memory"); goto ldv_66305; ldv_66304: writel(dbval, (void volatile *)ha->nxdb_wr_ptr); __asm__ volatile ("sfence": : : "memory"); ldv_66305: tmp___13 = readl((void const volatile *)ha->nxdb_rd_ptr); if (tmp___13 != dbval) { goto ldv_66304; } else { } } if (*((unsigned long *)vha + 19UL) != 0UL && (rsp->ring_ptr)->signature != 3735936685U) { qla24xx_process_response_queue(vha, rsp); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); queuing_error_fcp_cmnd: dma_pool_free(ha->fcp_cmnd_dma_pool, (void *)ctx->fcp_cmnd, ctx->fcp_cmnd_dma); queuing_error: ; if ((unsigned int )tot_dsds != 0U) { scsi_dma_unmap(cmd); } else { } if ((unsigned long )sp->u.scmd.ctx != (unsigned long )((void *)0)) { mempool_free(sp->u.scmd.ctx, ha->ctx_mempool); sp->u.scmd.ctx = (void *)0; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (258); } } static void qla24xx_abort_iocb(srb_t *sp , struct abort_entry_24xx *abt_iocb ) { struct srb_iocb *aio ; scsi_qla_host_t *vha ; struct req_que *req ; { aio = & sp->u.iocb_cmd; vha = (sp->fcport)->vha; req = vha->req; memset((void *)abt_iocb, 0, 64UL); abt_iocb->entry_type = 51U; abt_iocb->entry_count = 1U; abt_iocb->handle = ((unsigned int )req->id << 16) | sp->handle; abt_iocb->nport_handle = (sp->fcport)->loop_id; abt_iocb->handle_to_abort = ((unsigned int )req->id << 16) | aio->u.abt.cmd_hndl; abt_iocb->port_id[0] = (sp->fcport)->d_id.b.al_pa; abt_iocb->port_id[1] = (sp->fcport)->d_id.b.area; abt_iocb->port_id[2] = (sp->fcport)->d_id.b.domain; abt_iocb->vp_index = (uint8_t )vha->vp_idx; abt_iocb->req_que_no = req->id; __asm__ volatile ("sfence": : : "memory"); return; } } int qla2x00_start_sp(srb_t *sp ) { int rval ; struct qla_hw_data *ha ; void *pkt ; unsigned long flags ; raw_spinlock_t *tmp ; { ha = ((sp->fcport)->vha)->hw; rval = 258; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); pkt = qla2x00_alloc_iocbs((sp->fcport)->vha, sp); if ((unsigned long )pkt == (unsigned long )((void *)0)) { ql_log(1U, (sp->fcport)->vha, 28684, "qla2x00_alloc_iocbs failed.\n"); goto done; } else { } rval = 0; switch ((int )sp->type) { case 1: ; if ((ha->device_type & 134217728U) != 0U) { qla24xx_login_iocb(sp, (struct logio_entry_24xx *)pkt); } else { qla2x00_login_iocb(sp, (struct mbx_entry *)pkt); } goto ldv_66326; case 2: ; if ((ha->device_type & 134217728U) != 0U) { qla24xx_logout_iocb(sp, (struct logio_entry_24xx *)pkt); } else { qla2x00_logout_iocb(sp, (struct mbx_entry *)pkt); } goto ldv_66326; case 3: ; case 4: qla24xx_els_iocb(sp, (struct els_entry_24xx *)pkt); goto ldv_66326; case 5: ; if ((ha->device_type & 134217728U) != 0U) { qla24xx_ct_iocb(sp, (struct ct_entry_24xx *)pkt); } else { qla2x00_ct_iocb(sp, (ms_iocb_entry_t *)pkt); } goto ldv_66326; case 6: ; if ((ha->device_type & 134217728U) != 0U) { qla24xx_adisc_iocb(sp, (struct logio_entry_24xx *)pkt); } else { qla2x00_adisc_iocb(sp, (struct mbx_entry *)pkt); } goto ldv_66326; case 7: ; if ((ha->device_type & 131072U) != 0U) { qlafx00_tm_iocb(sp, (struct tsk_mgmt_entry_fx00 *)pkt); } else { qla24xx_tm_iocb(sp, (struct tsk_mgmt_entry *)pkt); } goto ldv_66326; case 10: ; case 11: qlafx00_fxdisc_iocb(sp, (struct fxdisc_entry_fx00 *)pkt); goto ldv_66326; case 12: ; if ((ha->device_type & 131072U) != 0U) { qlafx00_abort_iocb(sp, (struct abort_iocb_entry_fx00 *)pkt); } else { qla24xx_abort_iocb(sp, (struct abort_entry_24xx *)pkt); } goto ldv_66326; default: ; goto ldv_66326; } ldv_66326: __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs((sp->fcport)->vha, *(ha->req_q_map)); done: spin_unlock_irqrestore(& ha->hardware_lock, flags); return (rval); } } static void qla25xx_build_bidir_iocb(srb_t *sp , struct scsi_qla_host *vha , struct cmd_bidir *cmd_pkt , uint32_t tot_dsds ) { uint16_t avail_dsds ; uint32_t *cur_dsd ; uint32_t req_data_len ; uint32_t rsp_data_len ; struct scatterlist *sg ; int index ; int entry_count ; struct fc_bsg_job *bsg_job ; unsigned long tmp ; dma_addr_t sle_dma ; cont_a64_entry_t *cont_pkt ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; dma_addr_t sle_dma___0 ; cont_a64_entry_t *cont_pkt___0 ; uint32_t *tmp___3 ; uint32_t *tmp___4 ; uint32_t *tmp___5 ; { req_data_len = 0U; rsp_data_len = 0U; entry_count = 1; bsg_job = sp->u.bsg_job; *((uint32_t *)(& cmd_pkt->entry_type)) = 117U; cmd_pkt->wr_dseg_count = (unsigned short )bsg_job->request_payload.sg_cnt; cmd_pkt->rd_dseg_count = (unsigned short )bsg_job->reply_payload.sg_cnt; cmd_pkt->control_flags = 11U; rsp_data_len = bsg_job->request_payload.payload_len; req_data_len = rsp_data_len; cmd_pkt->wr_byte_count = req_data_len; cmd_pkt->rd_byte_count = rsp_data_len; tmp = qla2x00_get_async_timeout(vha); cmd_pkt->timeout = (unsigned int )((unsigned short )tmp) + 2U; vha->bidi_stats.transfer_bytes = vha->bidi_stats.transfer_bytes + (unsigned long long )req_data_len; vha->bidi_stats.io_count = vha->bidi_stats.io_count + 1ULL; vha->qla_stats.output_bytes = vha->qla_stats.output_bytes + (uint64_t )req_data_len; vha->qla_stats.output_requests = vha->qla_stats.output_requests + 1ULL; avail_dsds = 1U; cur_dsd = (uint32_t *)(& cmd_pkt->fcp_data_dseg_address); index = 0; index = 0; sg = bsg_job->request_payload.sg_list; goto ldv_66354; ldv_66353: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); cur_dsd = (uint32_t *)(& cont_pkt->dseg_0_address); avail_dsds = 5U; entry_count = entry_count + 1; } else { } sle_dma = sg->dma_address; tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )sle_dma; tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = (unsigned int )(sle_dma >> 32ULL); tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = sg->dma_length; avail_dsds = (uint16_t )((int )avail_dsds - 1); index = index + 1; sg = sg_next(sg); ldv_66354: ; if (bsg_job->request_payload.sg_cnt > index) { goto ldv_66353; } else { } index = 0; sg = bsg_job->reply_payload.sg_list; goto ldv_66359; ldv_66358: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt___0 = qla2x00_prep_cont_type1_iocb(vha, vha->req); cur_dsd = (uint32_t *)(& cont_pkt___0->dseg_0_address); avail_dsds = 5U; entry_count = entry_count + 1; } else { } sle_dma___0 = sg->dma_address; tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = (unsigned int )sle_dma___0; tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = (unsigned int )(sle_dma___0 >> 32ULL); tmp___5 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___5 = sg->dma_length; avail_dsds = (uint16_t )((int )avail_dsds - 1); index = index + 1; sg = sg_next(sg); ldv_66359: ; if (bsg_job->reply_payload.sg_cnt > index) { goto ldv_66358; } else { } cmd_pkt->entry_count = (uint8_t )entry_count; return; } } int qla2x00_start_bidir(srb_t *sp , struct scsi_qla_host *vha , uint32_t tot_dsds ) { struct qla_hw_data *ha ; unsigned long flags ; uint32_t handle ; uint32_t index ; uint16_t req_cnt ; uint16_t cnt ; uint32_t *clr_ptr ; struct cmd_bidir *cmd_pkt ; struct rsp_que *rsp ; struct req_que *req ; int rval ; int tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; { ha = vha->hw; cmd_pkt = (struct cmd_bidir *)0; rval = 0; rval = 0; rsp = *(ha->rsp_q_map); req = vha->req; if ((unsigned int )vha->marker_needed != 0U) { tmp = qla2x00_marker(vha, req, rsp, 0, 0ULL, 2); if (tmp != 0) { return (11); } else { } vha->marker_needed = 0U; } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); handle = req->current_outstanding_cmd; index = 1U; goto ldv_66382; ldv_66381: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_66380; } else { } index = index + 1U; ldv_66382: ; if ((uint32_t )req->num_outstanding_cmds > index) { goto ldv_66381; } else { } ldv_66380: ; if ((uint32_t )req->num_outstanding_cmds == index) { rval = 2; goto queuing_error; } else { } req_cnt = qla24xx_calc_iocbs(vha, (int )((uint16_t )tot_dsds)); if ((int )req->cnt < (int )req_cnt + 2) { if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { cnt = *(req->out_ptr); } else { tmp___1 = __readl((void const volatile *)req->req_q_out); cnt = (uint16_t )tmp___1; } if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } } else { } if ((int )req->cnt < (int )req_cnt + 2) { rval = 2; goto queuing_error; } else { } cmd_pkt = (struct cmd_bidir *)req->ring_ptr; cmd_pkt->handle = ((unsigned int )req->id << 16) | handle; clr_ptr = (uint32_t *)cmd_pkt + 2UL; memset((void *)clr_ptr, 0, 56UL); cmd_pkt->nport_handle = vha->self_login_loop_id; cmd_pkt->port_id[0] = vha->d_id.b.al_pa; cmd_pkt->port_id[1] = vha->d_id.b.area; cmd_pkt->port_id[2] = vha->d_id.b.domain; qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); cmd_pkt->entry_status = (unsigned char )rsp->id; req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; req->cnt = (int )req->cnt - (int )req_cnt; __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs(vha, req); queuing_error: spin_unlock_irqrestore(& ha->hardware_lock, flags); return (rval); } } void disable_suitable_timer_17(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_17) { ldv_timer_state_17 = 0; return; } else { } return; } } void choose_timer_17(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_17 = 2; return; } } int reg_timer_17(struct timer_list *timer ) { { ldv_timer_list_17 = timer; ldv_timer_state_17 = 1; return (0); } } void activate_pending_timer_17(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_17 == (unsigned long )timer) { if (ldv_timer_state_17 == 2 || pending_flag != 0) { ldv_timer_list_17 = timer; ldv_timer_list_17->data = data; ldv_timer_state_17 = 1; } else { } return; } else { } reg_timer_17(timer); ldv_timer_list_17->data = data; return; } } bool ldv_queue_work_on_95(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_96(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_97(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_98(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_99(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_100(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern void _raw_spin_lock_irq(raw_spinlock_t * ) ; extern void _raw_spin_unlock_irq(raw_spinlock_t * ) ; __inline static void spin_lock_irq(spinlock_t *lock ) { { _raw_spin_lock_irq(& lock->__annonCompField18.rlock); return; } } __inline static void spin_unlock_irq(spinlock_t *lock ) { { _raw_spin_unlock_irq(& lock->__annonCompField18.rlock); return; } } extern struct workqueue_struct *system_wq ; bool ldv_queue_work_on_111(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_113(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_117(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_112(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_115(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_114(struct workqueue_struct *ldv_func_arg1 ) ; __inline static bool queue_work___0(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_111(8192, wq, work); return (tmp); } } __inline static bool schedule_work(struct work_struct *work ) { bool tmp ; { tmp = queue_work___0(system_wq, work); return (tmp); } } int reg_timer_18(struct timer_list *timer ) ; void activate_pending_timer_18(struct timer_list *timer , unsigned long data , int pending_flag ) ; void choose_timer_18(struct timer_list *timer ) ; void disable_suitable_timer_18(struct timer_list *timer ) ; __inline static struct page *sg_page___0(struct scatterlist *sg ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (123), "i" (12UL)); ldv_26185: ; goto ldv_26185; } else { } tmp___0 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (124), "i" (12UL)); ldv_26186: ; goto ldv_26186; } else { } return ((struct page *)(sg->page_link & 0xfffffffffffffffcUL)); } } extern void pci_disable_msi(struct pci_dev * ) ; extern void pci_disable_msix(struct pci_dev * ) ; extern int pci_enable_msi_range(struct pci_dev * , int , int ) ; __inline static int pci_enable_msi_exact(struct pci_dev *dev , int nvec ) { int rc ; int tmp ; { tmp = pci_enable_msi_range(dev, nvec, nvec); rc = tmp; if (rc < 0) { return (rc); } else { } return (0); } } extern int pci_enable_msix_range(struct pci_dev * , struct msix_entry * , int , int ) ; extern int request_threaded_irq(unsigned int , irqreturn_t (*)(int , void * ) , irqreturn_t (*)(int , void * ) , unsigned long , char const * , void * ) ; __inline static int request_irq(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { int tmp ; { tmp = request_threaded_irq(irq, handler, (irqreturn_t (*)(int , void * ))0, flags, name, dev); return (tmp); } } extern void free_irq(unsigned int , void * ) ; int ldv_scsi_add_host_with_dma_116(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; extern void scmd_printk(char const * , struct scsi_cmnd const * , char const * , ...) ; __inline static void scsi_set_resid(struct scsi_cmnd *cmd , int resid ) { { cmd->sdb.resid = resid; return; } } __inline static void set_host_byte(struct scsi_cmnd *cmd , char status ) { { cmd->result = (int )(((unsigned int )cmd->result & 4278255615U) | (unsigned int )((int )status << 16)); return; } } __inline static void set_driver_byte(struct scsi_cmnd *cmd , char status ) { { cmd->result = (cmd->result & 16777215) | ((int )status << 24); return; } } static char const * const port_state_str___1[5U] = { "Unknown", "UNCONFIGURED", "DEAD", "LOST", "ONLINE"}; void qla2x00_alert_all_vps(struct rsp_que *rsp , uint16_t *mb ) ; void qla2x00_async_event(scsi_qla_host_t *vha , struct rsp_que *rsp , uint16_t *mb ) ; srb_t *qla2x00_get_sp_from_handle(scsi_qla_host_t *vha , char const *func , struct req_que *req , void *iocb ) ; void qla2x00_process_completed_request(struct scsi_qla_host *vha , struct req_que *req , uint32_t index ) ; bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha , uint32_t reg ) ; int qla25xx_request_irq(struct rsp_que *rsp ) ; irqreturn_t qla82xx_msix_default(int irq , void *dev_id ) ; irqreturn_t qla82xx_msix_rsp_q(int irq , void *dev_id ) ; int ql2x_ini_mode ; void qlt_response_pkt_all_vps(struct scsi_qla_host *vha , response_t *pkt ) ; void qlt_async_event(uint16_t code , struct scsi_qla_host *vha , uint16_t *mailbox ) ; int qlt_24xx_process_response_error(struct scsi_qla_host *vha , struct sts_entry_24xx *pkt ) ; irqreturn_t qla83xx_msix_atio_q(int irq , void *dev_id ) ; __inline static uint16_t qla2x00_debounce_register___1(uint16_t volatile *addr ) { uint16_t volatile first ; uint16_t volatile second ; unsigned short tmp ; unsigned short tmp___0 ; { ldv_65637: tmp = readw((void const volatile *)addr); first = tmp; __asm__ volatile ("": : : "memory"); cpu_relax(); tmp___0 = readw((void const volatile *)addr); second = tmp___0; if ((int )((unsigned short )first) != (int )((unsigned short )second)) { goto ldv_65637; } else { } return ((uint16_t )first); } } __inline static uint8_t *host_to_fcp_swap___1(uint8_t *fcp , uint32_t bsize ) { uint32_t *ifcp ; uint32_t *ofcp ; uint32_t iter ; uint32_t *tmp ; uint32_t *tmp___0 ; __u32 tmp___1 ; { ifcp = (uint32_t *)fcp; ofcp = (uint32_t *)fcp; iter = bsize >> 2; goto ldv_65665; ldv_65664: tmp = ofcp; ofcp = ofcp + 1; tmp___0 = ifcp; ifcp = ifcp + 1; tmp___1 = __fswab32(*tmp___0); *tmp = tmp___1; iter = iter - 1U; ldv_65665: ; if (iter != 0U) { goto ldv_65664; } else { } return (fcp); } } __inline static void qla2x00_handle_mbx_completion(struct qla_hw_data *ha , int status ) { int tmp ; { tmp = constant_test_bit(2L, (unsigned long const volatile *)(& ha->mbx_cmd_flags)); if ((tmp != 0 && status & 1) && *((unsigned long *)ha + 2UL) != 0UL) { set_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); clear_bit(2L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); complete(& ha->mbx_intr_comp); } else { } return; } } __inline static void qla2x00_set_retry_delay_timestamp(fc_port_t *fcport , uint16_t retry_delay ) { { if ((unsigned int )retry_delay != 0U) { fcport->retry_delay_timestamp = (unsigned long )(((int )retry_delay * 250) / 10) + (unsigned long )jiffies; } else { } return; } } extern void scsi_build_sense_buffer(int , u8 * , u8 , u8 , u8 ) ; static void qla2x00_mbx_completion(scsi_qla_host_t *vha , uint16_t mb0 ) ; static void qla2x00_status_entry(scsi_qla_host_t *vha , struct rsp_que *rsp , void *pkt ) ; static void qla2x00_status_cont_entry(struct rsp_que *rsp , sts_cont_entry_t *pkt ) ; static void qla2x00_error_entry(scsi_qla_host_t *vha , struct rsp_que *rsp , sts_entry_t *pkt ) ; irqreturn_t qla2100_intr_handler(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; int status ; unsigned long iter ; uint16_t hccr ; uint16_t mb[4U] ; struct rsp_que *rsp ; unsigned long flags ; raw_spinlock_t *tmp ; void *tmp___0 ; bool tmp___1 ; int tmp___2 ; unsigned short tmp___3 ; unsigned short tmp___4 ; unsigned long tmp___5 ; { rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20573, "%s: NULL response queue pointer.\n", "qla2100_intr_handler"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp; status = 0; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; iter = 50UL; goto ldv_65872; ldv_65871: hccr = readw((void const volatile *)(& reg->hccr)); tmp___1 = qla2x00_check_reg16_for_disconnect(vha, (int )hccr); if ((int )tmp___1) { goto ldv_65870; } else { } if (((int )hccr & 32) != 0) { tmp___2 = pci_channel_offline(ha->pdev); if (tmp___2 != 0) { goto ldv_65870; } else { } writew(4096, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); (*((ha->isp_ops)->fw_dump))(vha, 1); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_65870; } else { tmp___3 = readw((void const volatile *)(& reg->istatus)); if (((int )tmp___3 & 8) == 0) { goto ldv_65870; } else { } } tmp___4 = readw((void const volatile *)(& reg->semaphore)); if ((int )tmp___4 & 1) { writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); mb[0] = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); if ((unsigned int )mb[0] > 16383U && (int )((short )mb[0]) >= 0) { qla2x00_mbx_completion(vha, (int )mb[0]); status = status | 1; } else if ((int )((short )mb[0]) < 0 && (unsigned int )mb[0] <= 49151U) { mb[1] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 1U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 1U); mb[2] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 2U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 2U); mb[3] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 3U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 3U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); } else { ql_dbg(33554432U, vha, 20517, "Unrecognized interrupt type (%d).\n", (int )mb[0]); } writew(0, (void volatile *)(& reg->semaphore)); readw((void const volatile *)(& reg->semaphore)); } else { qla2x00_process_response_queue(rsp); writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); } ldv_65872: tmp___5 = iter; iter = iter - 1UL; if (tmp___5 != 0UL) { goto ldv_65871; } else { } ldv_65870: qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha , uint32_t reg ) { int tmp ; int tmp___0 ; int tmp___1 ; { if (reg == 4294967295U) { tmp = test_and_set_bit(0L, (unsigned long volatile *)(& vha->pci_flags)); if (tmp == 0) { tmp___0 = constant_test_bit(1L, (unsigned long const volatile *)(& vha->pci_flags)); if (tmp___0 == 0) { tmp___1 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->pci_flags)); if (tmp___1 == 0) { schedule_work(& (vha->hw)->board_disable); } else { } } else { } } else { } return (1); } else { return (0); } } } bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha , uint16_t reg ) { bool tmp ; { tmp = qla2x00_check_reg32_for_disconnect(vha, (unsigned int )reg | 4294901760U); return (tmp); } } irqreturn_t qla2300_intr_handler(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct device_reg_2xxx *reg ; int status ; unsigned long iter ; uint32_t stat ; uint16_t hccr ; uint16_t mb[4U] ; struct rsp_que *rsp ; struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; void *tmp___0 ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; unsigned long tmp___4 ; { rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20568, "%s: NULL response queue pointer.\n", "qla2300_intr_handler"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp; status = 0; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; iter = 50UL; goto ldv_65911; ldv_65910: stat = readl((void const volatile *)(& reg->u.isp2300.host_status)); tmp___1 = qla2x00_check_reg32_for_disconnect(vha, stat); if ((int )tmp___1) { goto ldv_65899; } else { } if ((stat & 256U) != 0U) { tmp___2 = pci_channel_offline(ha->pdev); tmp___3 = ldv__builtin_expect(tmp___2 != 0, 0L); if (tmp___3 != 0L) { goto ldv_65899; } else { } hccr = readw((void const volatile *)(& reg->hccr)); if (((int )hccr & 43264) != 0) { ql_log(1U, vha, 20518, "Parity error -- HCCR=%x, Dumping firmware.\n", (int )hccr); } else { ql_log(1U, vha, 20519, "RISC paused -- HCCR=%x, Dumping firmware.\n", (int )hccr); } writew(4096, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); (*((ha->isp_ops)->fw_dump))(vha, 1); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_65899; } else if ((stat & 32768U) == 0U) { goto ldv_65899; } else { } switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla2x00_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; writew(0, (void volatile *)(& reg->semaphore)); goto ldv_65904; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 1U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 1U); mb[2] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 2U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 2U); mb[3] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 3U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 3U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_65904; case 19U: qla2x00_process_response_queue(rsp); goto ldv_65904; case 21U: mb[0] = 32817U; mb[1] = (unsigned short )(stat >> 16); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_65904; case 22U: mb[0] = 32800U; mb[1] = (unsigned short )(stat >> 16); mb[2] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 2U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 2U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_65904; default: ql_dbg(33554432U, vha, 20520, "Unrecognized interrupt type (%d).\n", stat & 255U); goto ldv_65904; } ldv_65904: writew(28672, (void volatile *)(& reg->hccr)); __readw((void const volatile *)(& reg->hccr)); ldv_65911: tmp___4 = iter; iter = iter - 1UL; if (tmp___4 != 0UL) { goto ldv_65910; } else { } ldv_65899: qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } static void qla2x00_mbx_completion(scsi_qla_host_t *vha , uint16_t mb0 ) { uint16_t cnt ; uint32_t mboxes ; uint16_t *wptr ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; { ha = vha->hw; reg = & (ha->iobase)->isp; mboxes = (uint32_t )((1 << (int )ha->mbx_count) + -1); if ((unsigned long )ha->mcp == (unsigned long )((mbx_cmd_t *)0)) { ql_dbg(33554432U, vha, 20481, "MBX pointer OLD_ERROR.\n"); } else { mboxes = (ha->mcp)->in_mb; } ha->flags.mbox_int = 1U; ha->mailbox_out[0] = mb0; mboxes = mboxes >> 1; wptr = (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 + 1UL : & reg->u.isp2300.mailbox0 + 1UL; cnt = 1U; goto ldv_65922; ldv_65921: ; if ((ha->device_type & 2U) != 0U && (unsigned int )cnt == 8U) { wptr = (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u_end.isp2200.mailbox8 : & reg->u.isp2300.mailbox0 + 8UL; } else { } if (((unsigned int )cnt == 4U || (unsigned int )cnt == 5U) && (int )mboxes & 1) { ha->mailbox_out[(int )cnt] = qla2x00_debounce_register___1((uint16_t volatile *)wptr); } else if ((int )mboxes & 1) { ha->mailbox_out[(int )cnt] = readw((void const volatile *)wptr); } else { } wptr = wptr + 1; mboxes = mboxes >> 1; cnt = (uint16_t )((int )cnt + 1); ldv_65922: ; if ((int )((unsigned short )ha->mbx_count) > (int )cnt) { goto ldv_65921; } else { } return; } } static void qla81xx_idc_event(scsi_qla_host_t *vha , uint16_t aen , uint16_t descr ) { char *event[3U] ; int rval ; struct device_reg_24xx *reg24 ; struct device_reg_82xx *reg82 ; uint16_t *wptr ; uint16_t cnt ; uint16_t timeout ; uint16_t mb[7U] ; { event[0] = (char *)"Complete"; event[1] = (char *)"Request Notification"; event[2] = (char *)"Time Extension"; reg24 = & ((vha->hw)->iobase)->isp24; reg82 = & ((vha->hw)->iobase)->isp82; if (((vha->hw)->device_type & 8192U) != 0U || (((vha->hw)->device_type & 32768U) != 0U || ((vha->hw)->device_type & 65536U) != 0U)) { wptr = & reg24->mailbox1; } else if (((vha->hw)->device_type & 262144U) != 0U) { wptr = (uint16_t *)(& reg82->mailbox_out) + 1UL; } else { return; } cnt = 0U; goto ldv_65938; ldv_65937: mb[(int )cnt] = readw((void const volatile *)wptr); cnt = (uint16_t )((int )cnt + 1); wptr = wptr + 1; ldv_65938: ; if ((unsigned int )cnt <= 6U) { goto ldv_65937; } else { } ql_dbg(33554432U, vha, 20513, "Inter-Driver Communication %s -- %04x %04x %04x %04x %04x %04x %04x.\n", event[(int )aen & 255], (int )mb[0], (int )mb[1], (int )mb[2], (int )mb[3], (int )mb[4], (int )mb[5], (int )mb[6]); switch ((int )aen) { case 33024: ; if ((int )((short )mb[1]) < 0) { (vha->hw)->flags.idc_compl_status = 1U; if ((vha->hw)->notify_dcbx_comp != 0 && (unsigned int )vha->vp_idx == 0U) { complete(& (vha->hw)->dcbx_comp); } else { } } else { } goto ldv_65941; case 33025: timeout = (unsigned int )((uint16_t )((int )descr >> 8)) & 15U; ql_dbg(33554432U, vha, 20514, "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", vha->host_no, event[(int )aen & 255], (int )timeout); if ((unsigned int )timeout == 0U) { return; } else { } rval = qla2x00_post_idc_ack_work(vha, (uint16_t *)(& mb)); if (rval != 0) { ql_log(1U, vha, 20515, "IDC failed to post ACK.\n"); } else { } goto ldv_65941; case 33026: (vha->hw)->idc_extend_tmo = (uint32_t )descr; ql_dbg(33554432U, vha, 20615, "%lu Inter-Driver Communication %s -- Extend timeout by=%d.\n", vha->host_no, event[(int )aen & 255], (vha->hw)->idc_extend_tmo); goto ldv_65941; } ldv_65941: ; return; } } char const *qla2x00_get_link_speed_str(struct qla_hw_data *ha , uint16_t speed ) { char const *link_speeds[8U] ; { link_speeds[0] = "1"; link_speeds[1] = "2"; link_speeds[2] = "?"; link_speeds[3] = "4"; link_speeds[4] = "8"; link_speeds[5] = "16"; link_speeds[6] = "32"; link_speeds[7] = "10"; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { return (link_speeds[0]); } else if ((unsigned int )speed == 19U) { return (link_speeds[7]); } else if ((unsigned int )speed <= 6U) { return (link_speeds[(int )speed]); } else { return (link_speeds[2]); } } } static void qla83xx_handle_8200_aen(scsi_qla_host_t *vha , uint16_t *mb ) { struct qla_hw_data *ha ; uint32_t protocol_engine_id ; uint32_t fw_err_code ; uint32_t err_level ; uint16_t peg_fw_state ; uint16_t nw_interface_link_up ; uint16_t nw_interface_signal_detect ; uint16_t sfp_status ; uint16_t htbt_counter ; uint16_t htbt_monitor_enable ; uint16_t sfp_additonal_info ; uint16_t sfp_multirate ; uint16_t sfp_tx_fault ; uint16_t link_speed ; uint16_t dcbx_status ; { ha = vha->hw; ql_dbg(33554432U, vha, 20587, "AEN Code: mb[0] = 0x%x AEN reason: mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", (int )*mb, (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 6UL)); ql_dbg(33554432U, vha, 20588, "PH-status2: mb[3] = 0x%x PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x Drv-Presence: mb[5] = 0x%x.\n", (int )*(mb + 3UL), (int )*(mb + 7UL), (int )*(mb + 4UL), (int )*(mb + 5UL)); if (((int )*(mb + 1UL) & 14) != 0) { ha->flags.nic_core_hung = 1U; ql_log(1U, vha, 20576, "83XX: F/W Error Reported: Check if reset required.\n"); if (((int )*(mb + 1UL) & 2) != 0) { protocol_engine_id = (uint32_t )*(mb + 2UL) & 255U; fw_err_code = (uint32_t )(((int )*(mb + 2UL) >> 8) | (((int )*(mb + 6UL) & 8191) << 8)); err_level = (uint32_t )((int )*(mb + 6UL) >> 13); ql_log(1U, vha, 20577, "PegHalt Status-1 Register: protocol_engine_id=0x%x fw_err_code=0x%x err_level=0x%x.\n", protocol_engine_id, fw_err_code, err_level); ql_log(1U, vha, 20578, "PegHalt Status-2 Register: 0x%x%x.\n", (int )*(mb + 7UL), (int )*(mb + 3UL)); if (err_level == 1U) { ql_log(1U, vha, 20579, "Not a fatal error, f/w has recovered iteself.\n"); } else if (err_level == 2U) { ql_log(0U, vha, 20580, "Recoverable Fatal error: Chip reset required.\n"); qla83xx_schedule_work(vha, 1); } else if (err_level == 4U) { ql_log(0U, vha, 20581, "Unrecoverable Fatal error: Set FAILED state, reboot required.\n"); qla83xx_schedule_work(vha, 3); } else { } } else { } if (((int )*(mb + 1UL) & 4) != 0) { peg_fw_state = (unsigned int )*(mb + 2UL) & 255U; nw_interface_link_up = (uint16_t )(((int )*(mb + 2UL) & 256) >> 8); nw_interface_signal_detect = (uint16_t )(((int )*(mb + 2UL) & 512) >> 9); sfp_status = (uint16_t )(((int )*(mb + 2UL) & 3072) >> 10); htbt_counter = (uint16_t )(((int )*(mb + 2UL) & 28672) >> 12); htbt_monitor_enable = (int )*(mb + 2UL) >> 15; sfp_additonal_info = (unsigned int )*(mb + 6UL) & 3U; sfp_multirate = (uint16_t )(((int )*(mb + 6UL) & 4) >> 2); sfp_tx_fault = (uint16_t )(((int )*(mb + 6UL) & 8) >> 3); link_speed = (uint16_t )(((int )*(mb + 6UL) & 112) >> 4); dcbx_status = (uint16_t )(((int )*(mb + 6UL) & 28672) >> 12); ql_log(1U, vha, 20582, "Peg-to-Fc Status Register:\npeg_fw_state=0x%x, nw_interface_link_up=0x%x, nw_interface_signal_detect=0x%x\nsfp_statis=0x%x.\n ", (int )peg_fw_state, (int )nw_interface_link_up, (int )nw_interface_signal_detect, (int )sfp_status); ql_log(1U, vha, 20583, "htbt_counter=0x%x, htbt_monitor_enable=0x%x, sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ", (int )htbt_counter, (int )htbt_monitor_enable, (int )sfp_additonal_info, (int )sfp_multirate); ql_log(1U, vha, 20584, "sfp_tx_fault=0x%x, link_state=0x%x, dcbx_status=0x%x.\n", (int )sfp_tx_fault, (int )link_speed, (int )dcbx_status); qla83xx_schedule_work(vha, 1); } else { } if (((int )*(mb + 1UL) & 8) != 0) { ql_log(1U, vha, 20585, "Heartbeat Failure encountered, chip reset required.\n"); qla83xx_schedule_work(vha, 1); } else { } } else { } if ((int )*(mb + 1UL) & 1) { ql_log(2U, vha, 20586, "IDC Device-State changed = 0x%x.\n", (int )*(mb + 4UL)); if (*((unsigned long *)ha + 2UL) != 0UL) { return; } else { } qla83xx_schedule_work(vha, 33280); } else { } return; } } int qla2x00_is_a_vp_did(scsi_qla_host_t *vha , uint32_t rscn_entry ) { struct qla_hw_data *ha ; scsi_qla_host_t *vp ; uint32_t vp_did ; unsigned long flags ; int ret ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { ha = vha->hw; ret = 0; if ((unsigned int )ha->num_vhosts == 0U) { return (ret); } else { } tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)ha->vp_list.next; vp = (scsi_qla_host_t *)__mptr; goto ldv_65986; ldv_65985: vp_did = vp->d_id.b24; if (vp_did == rscn_entry) { ret = 1; goto ldv_65984; } else { } __mptr___0 = (struct list_head const *)vp->list.next; vp = (scsi_qla_host_t *)__mptr___0; ldv_65986: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_65985; } else { } ldv_65984: spin_unlock_irqrestore(& ha->vport_slock, flags); return (ret); } } void qla2x00_async_event(scsi_qla_host_t *vha , struct rsp_que *rsp , uint16_t *mb ) { uint16_t handle_cnt ; uint16_t cnt ; uint16_t mbx ; uint32_t handles[5U] ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; struct device_reg_24xx *reg24 ; struct device_reg_82xx *reg82 ; uint32_t rscn_entry ; uint32_t host_pid ; uint32_t tmp_pid ; unsigned long flags ; fc_port_t *fcport ; unsigned short tmp ; unsigned short tmp___0 ; unsigned short tmp___1 ; unsigned short tmp___2 ; unsigned short tmp___3 ; unsigned short tmp___4 ; int tmp___5 ; char const *tmp___6 ; unsigned short tmp___7 ; unsigned short tmp___8 ; void *wwpn ; u64 tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; int tmp___19 ; bool tmp___20 ; int tmp___21 ; int tmp___22 ; int tmp___23 ; struct list_head const *__mptr ; int tmp___24 ; struct list_head const *__mptr___0 ; raw_spinlock_t *tmp___25 ; int tmp___26 ; { ha = vha->hw; reg = & (ha->iobase)->isp; reg24 = & (ha->iobase)->isp24; reg82 = & (ha->iobase)->isp82; fcport = (fc_port_t *)0; handle_cnt = 0U; if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { goto skip_rio; } else { } switch ((int )*mb) { case 32800: handles[0] = (unsigned int )(((int )*(mb + 2UL) << 16) | (int )*(mb + 1UL)); handle_cnt = 1U; goto ldv_66007; case 32817: handles[0] = (uint32_t )*(mb + 1UL); handle_cnt = 1U; *mb = 32800U; goto ldv_66007; case 32818: handles[0] = (uint32_t )*(mb + 1UL); handles[1] = (uint32_t )*(mb + 2UL); handle_cnt = 2U; *mb = 32800U; goto ldv_66007; case 32819: handles[0] = (uint32_t )*(mb + 1UL); handles[1] = (uint32_t )*(mb + 2UL); handles[2] = (uint32_t )*(mb + 3UL); handle_cnt = 3U; *mb = 32800U; goto ldv_66007; case 32820: handles[0] = (uint32_t )*(mb + 1UL); handles[1] = (uint32_t )*(mb + 2UL); handles[2] = (uint32_t )*(mb + 3UL); tmp = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 6U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 6U); handles[3] = (unsigned int )tmp; handle_cnt = 4U; *mb = 32800U; goto ldv_66007; case 32821: handles[0] = (uint32_t )*(mb + 1UL); handles[1] = (uint32_t )*(mb + 2UL); handles[2] = (uint32_t )*(mb + 3UL); tmp___0 = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 6U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 6U); handles[3] = (unsigned int )tmp___0; tmp___1 = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 7U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 7U); handles[4] = (unsigned int )tmp___1; handle_cnt = 5U; *mb = 32800U; goto ldv_66007; case 32834: handles[0] = (unsigned int )(((int )*(mb + 2UL) << 16) | (int )*(mb + 1UL)); tmp___2 = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 7U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 7U); tmp___3 = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 6U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 6U); handles[1] = (unsigned int )((int )tmp___2 << 16) | (unsigned int )tmp___3; handle_cnt = 2U; *mb = 32800U; goto ldv_66007; default: ; goto ldv_66007; } ldv_66007: ; skip_rio: ; switch ((int )*mb) { case 32800: ; if (*((unsigned long *)vha + 19UL) == 0UL) { goto ldv_66016; } else { } cnt = 0U; goto ldv_66018; ldv_66017: qla2x00_process_completed_request(vha, rsp->req, handles[(int )cnt]); cnt = (uint16_t )((int )cnt + 1); ldv_66018: ; if ((int )cnt < (int )handle_cnt) { goto ldv_66017; } else { } goto ldv_66016; case 32769: ql_dbg(33554432U, vha, 20482, "Asynchronous RESET.\n"); set_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_66016; case 32770: ; if (((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { tmp___4 = readw((void const volatile *)(& reg24->mailbox7)); mbx = tmp___4; } else { mbx = 0U; } ql_log(1U, vha, 20483, "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx7=%xh.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL), (int )mbx); (*((ha->isp_ops)->fw_dump))(vha, 1); if ((ha->device_type & 134217728U) != 0U) { if ((unsigned int )*(mb + 1UL) == 0U && (unsigned int )*(mb + 2UL) == 0U) { ql_log(0U, vha, 20484, "Unrecoverable Hardware Error: adapter marked OFFLINE!\n"); vha->flags.online = 0U; vha->device_flags = vha->device_flags | 32U; } else { if (((int )mbx & 8) != 0 && (unsigned int )ha->port_no == 0U) { set_bit(19L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } } else if ((unsigned int )*(mb + 1UL) == 0U) { ql_log(0U, vha, 20485, "Unrecoverable Hardware Error: adapter marked OFFLINE!\n"); vha->flags.online = 0U; vha->device_flags = vha->device_flags | 32U; } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } goto ldv_66016; case 32771: ql_log(1U, vha, 20486, "ISP Request Transfer Error (%x).\n", (int )*(mb + 1UL)); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_66016; case 32772: ql_log(1U, vha, 20487, "ISP Response Transfer Error.\n"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_66016; case 32773: ql_dbg(33554432U, vha, 20488, "Asynchronous WAKEUP_THRES.\n"); goto ldv_66016; case 32784: ql_dbg(33554432U, vha, 20489, "LIP occurred (%x).\n", (int )*(mb + 1UL)); tmp___5 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___5 != 2) { atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); qla2x00_mark_all_devices_lost(vha, 1); } else { } if ((unsigned int )vha->vp_idx != 0U) { atomic_set(& vha->vp_state, 2); fc_vport_set_state(vha->fc_vport, 9); } else { } set_bit(9L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(12L, (unsigned long volatile *)(& vha->dpc_flags)); vha->flags.management_server_logged_in = 0U; qla2x00_post_aen_work(vha, 1, (u32 )*(mb + 1UL)); goto ldv_66016; case 32785: ; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { ha->link_data_rate = 0U; } else { ha->link_data_rate = *(mb + 1UL); } tmp___6 = qla2x00_get_link_speed_str(ha, (int )ha->link_data_rate); ql_log(2U, vha, 20490, "LOOP UP detected (%s Gbps).\n", tmp___6); vha->flags.management_server_logged_in = 0U; qla2x00_post_aen_work(vha, 2, (u32 )ha->link_data_rate); goto ldv_66016; case 32786: ; if ((ha->device_type & 8192U) != 0U || (ha->device_type & 65536U) != 0U) { tmp___7 = readw((void const volatile *)(& reg24->mailbox4)); mbx = tmp___7; } else { mbx = 0U; } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { tmp___8 = readw((void const volatile *)(& reg82->mailbox_out) + 4U); mbx = tmp___8; } else { mbx = mbx; } ql_log(2U, vha, 20491, "LOOP DOWN detected (%x %x %x %x).\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL), (int )mbx); tmp___10 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___10 != 2) { atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); if ((unsigned int )vha->vp_idx == 0U) { if (*((unsigned long *)ha + 2UL) != 0UL) { wwpn = (void *)(& (ha->init_cb)->port_name); memcpy((void *)(& vha->port_name), (void const *)wwpn, 8UL); ((struct fc_host_attrs *)(vha->host)->shost_data)->port_name = wwn_to_u64((u8 *)(& vha->port_name)); tmp___9 = wwn_to_u64((u8 *)(& vha->port_name)); ql_dbg(1073774592U, vha, 324, "LOOP DOWN detected,restore WWPN %016llx\n", tmp___9); } else { } clear_bit(5L, (unsigned long volatile *)(& vha->vp_flags)); } else { } vha->device_flags = vha->device_flags | 2U; qla2x00_mark_all_devices_lost(vha, 1); } else { } if ((unsigned int )vha->vp_idx != 0U) { atomic_set(& vha->vp_state, 2); fc_vport_set_state(vha->fc_vport, 9); } else { } vha->flags.management_server_logged_in = 0U; ha->link_data_rate = 65535U; qla2x00_post_aen_work(vha, 3, 0U); goto ldv_66016; case 32787: ql_dbg(33554432U, vha, 20492, "LIP reset occurred (%x).\n", (int )*(mb + 1UL)); tmp___11 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___11 != 2) { atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); qla2x00_mark_all_devices_lost(vha, 1); } else { } if ((unsigned int )vha->vp_idx != 0U) { atomic_set(& vha->vp_state, 2); fc_vport_set_state(vha->fc_vport, 9); } else { } set_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); ha->operating_mode = 0U; vha->flags.management_server_logged_in = 0U; qla2x00_post_aen_work(vha, 4, (u32 )*(mb + 1UL)); goto ldv_66016; case 32816: ; if ((int )ha->device_type & 1) { goto ldv_66016; } else { } if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { ql_dbg(33554432U, vha, 20493, "DCBX Completed -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); if (ha->notify_dcbx_comp != 0 && (unsigned int )vha->vp_idx == 0U) { complete(& ha->dcbx_comp); } else { } } else { ql_dbg(33554432U, vha, 20494, "Asynchronous P2P MODE received.\n"); } tmp___13 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___13 != 2) { atomic_set(& vha->loop_state, 2); tmp___12 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___12 == 0) { atomic_set(& vha->loop_down_timer, 255); } else { } qla2x00_mark_all_devices_lost(vha, 1); } else { } if ((unsigned int )vha->vp_idx != 0U) { atomic_set(& vha->vp_state, 2); fc_vport_set_state(vha->fc_vport, 9); } else { } tmp___14 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___14 == 0) { set_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } set_bit(9L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(12L, (unsigned long volatile *)(& vha->dpc_flags)); ha->flags.gpsc_supported = 1U; vha->flags.management_server_logged_in = 0U; goto ldv_66016; case 32822: ; if ((int )ha->device_type & 1) { goto ldv_66016; } else { } ql_dbg(33554432U, vha, 20495, "Configuration change detected: value=%x.\n", (int )*(mb + 1UL)); tmp___16 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___16 != 2) { atomic_set(& vha->loop_state, 2); tmp___15 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___15 == 0) { atomic_set(& vha->loop_down_timer, 255); } else { } qla2x00_mark_all_devices_lost(vha, 1); } else { } if ((unsigned int )vha->vp_idx != 0U) { atomic_set(& vha->vp_state, 2); fc_vport_set_state(vha->fc_vport, 9); } else { } set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_66016; case 32788: ; if (((((((((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) || (ha->device_type & 8192U) != 0U) || (ha->device_type & 16384U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || (ha->device_type & 262144U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) && (((unsigned int )*(mb + 1UL) == 65535U && ((int )*(mb + 3UL) & 255) != 255) || (unsigned int )*(mb + 1UL) != 65535U)) && (int )vha->vp_idx != ((int )*(mb + 3UL) & 255)) { goto ldv_66016; } else { } if ((unsigned int )*(mb + 1UL) == 65535U && (unsigned int )*(mb + 2UL) == 7U) { ql_dbg(33554432U, vha, 20496, "Port unavailable %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); ql_log(1U, vha, 20574, "Link is offline.\n"); tmp___17 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___17 != 2) { atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); vha->device_flags = vha->device_flags | 2U; qla2x00_mark_all_devices_lost(vha, 1); } else { } if ((unsigned int )vha->vp_idx != 0U) { atomic_set(& vha->vp_state, 2); fc_vport_set_state(vha->fc_vport, 9); qla2x00_mark_all_devices_lost(vha, 1); } else { } vha->flags.management_server_logged_in = 0U; ha->link_data_rate = 65535U; goto ldv_66016; } else { } atomic_set(& vha->loop_down_timer, 0); tmp___18 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___18 != 2) { tmp___19 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___19 != 6) { ql_dbg(33554432U, vha, 20497, "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); qlt_async_event((int )*mb, vha, mb); goto ldv_66016; } else { } } else { } ql_dbg(33554432U, vha, 20498, "Port database changed %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); atomic_set(& vha->loop_state, 3); qla2x00_mark_all_devices_lost(vha, 1); if ((unsigned int )vha->vp_idx == 0U) { tmp___20 = qla_ini_mode_enabled(vha); if (tmp___20) { tmp___21 = 0; } else { tmp___21 = 1; } if (tmp___21) { set_bit(21L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } } else { } set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(5L, (unsigned long volatile *)(& vha->vp_flags)); qlt_async_event((int )*mb, vha, mb); goto ldv_66016; case 32789: ; if ((unsigned int )vha->vp_idx != 0U) { tmp___22 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->vp_flags)); if (tmp___22 != 0) { goto ldv_66016; } else { } } else { } if (*((unsigned long *)ha + 2UL) != 0UL && (int )vha->vp_idx != ((int )*(mb + 3UL) & 255)) { goto ldv_66016; } else { } ql_dbg(33554432U, vha, 20499, "RSCN database changed -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); rscn_entry = (uint32_t )((((int )*(mb + 1UL) & 255) << 16) | (int )*(mb + 2UL)); host_pid = (uint32_t )((((int )vha->d_id.b.domain << 16) | ((int )vha->d_id.b.area << 8)) | (int )vha->d_id.b.al_pa); if (rscn_entry == host_pid) { ql_dbg(33554432U, vha, 20500, "Ignoring RSCN update to local host port ID (%06x).\n", host_pid); goto ldv_66016; } else { } rscn_entry = (uint32_t )((((int )*(mb + 1UL) & 1023) << 16) | (int )*(mb + 2UL)); tmp___23 = qla2x00_is_a_vp_did(vha, rscn_entry); if (tmp___23 != 0) { goto ldv_66016; } else { } __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_66041; ldv_66040: tmp___24 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___24 != 4) { goto ldv_66038; } else { } tmp_pid = fcport->d_id.b24; if (fcport->d_id.b24 == rscn_entry) { qla2x00_mark_device_lost(vha, fcport, 0, 0); goto ldv_66039; } else { } ldv_66038: __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_66041: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66040; } else { } ldv_66039: atomic_set(& vha->loop_down_timer, 0); vha->flags.management_server_logged_in = 0U; set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(7L, (unsigned long volatile *)(& vha->dpc_flags)); qla2x00_post_aen_work(vha, 5, rscn_entry); goto ldv_66016; case 32832: ql_dbg(33554432U, vha, 20501, "[R|Z]IO update completion.\n"); if ((ha->device_type & 134217728U) != 0U) { qla24xx_process_response_queue(vha, rsp); } else { qla2x00_process_response_queue(rsp); } goto ldv_66016; case 32840: ql_dbg(33554432U, vha, 20502, "Discard RND Frame -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); goto ldv_66016; case 32808: ql_dbg(33554432U, vha, 20503, "Trace Notification -- %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL)); goto ldv_66016; case 32783: ql_dbg(33554432U, vha, 20504, "ISP84XX Alert Notification -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); tmp___25 = spinlock_check(& (ha->cs84xx)->access_lock); flags = _raw_spin_lock_irqsave(tmp___25); switch ((int )*(mb + 1UL)) { case 1: ql_log(2U, vha, 20505, "Alert 84XX: panic recovery %04x %04x.\n", (int )*(mb + 2UL), (int )*(mb + 3UL)); goto ldv_66050; case 2: (ha->cs84xx)->op_fw_version = (uint32_t )(((int )*(mb + 3UL) << 16) | (int )*(mb + 2UL)); ql_log(2U, vha, 20506, "Alert 84XX: firmware version %x.\n", (ha->cs84xx)->op_fw_version); goto ldv_66050; case 3: (ha->cs84xx)->diag_fw_version = (uint32_t )(((int )*(mb + 3UL) << 16) | (int )*(mb + 2UL)); ql_log(2U, vha, 20507, "Alert 84XX: diagnostic firmware version %x.\n", (ha->cs84xx)->diag_fw_version); goto ldv_66050; case 4: (ha->cs84xx)->diag_fw_version = (uint32_t )(((int )*(mb + 3UL) << 16) | (int )*(mb + 2UL)); (ha->cs84xx)->fw_update = 1U; ql_log(2U, vha, 20508, "Alert 84XX: gold firmware version %x.\n", (ha->cs84xx)->gold_fw_version); goto ldv_66050; default: ql_log(1U, vha, 20509, "Alert 84xx: Invalid Alert %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); } ldv_66050: spin_unlock_irqrestore(& (ha->cs84xx)->access_lock, flags); goto ldv_66016; case 32790: ql_dbg(33554432U, vha, 20510, "DCBX Started -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); goto ldv_66016; case 32818: ql_dbg(33554432U, vha, 20511, "DCBX Parameters Updated -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); goto ldv_66016; case 32817: ql_dbg(33554432U, vha, 20512, "FCF Configuration Error -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); goto ldv_66016; case 33025: ; if (((vha->hw)->device_type & 65536U) != 0U || (ha->device_type & 262144U) != 0U) { *(mb + 4UL) = readw((void const volatile *)(& reg24->mailbox4)); if ((((int )*(mb + 2UL) & 32767) == 288 || ((int )*(mb + 2UL) & 32767) == 290) && ((int )*(mb + 4UL) & 14) != 0) { set_bit(20L, (unsigned long volatile *)(& vha->dpc_flags)); tmp___26 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___26 == 2) { atomic_set(& vha->loop_down_timer, 255); } else { } qla2xxx_wake_dpc(vha); } else { } } else { } case 33024: ; if (ha->notify_lb_portup_comp != 0 && (unsigned int )vha->vp_idx == 0U) { complete(& ha->lb_portup_comp); } else { } case 33026: ; if ((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { qla81xx_idc_event(vha, (int )*mb, (int )*(mb + 1UL)); } else { } goto ldv_66016; case 33280: *(mb + 4UL) = readw((void const volatile *)(& reg24->mailbox4)); *(mb + 5UL) = readw((void const volatile *)(& reg24->mailbox5)); *(mb + 6UL) = readw((void const volatile *)(& reg24->mailbox6)); *(mb + 7UL) = readw((void const volatile *)(& reg24->mailbox7)); qla83xx_handle_8200_aen(vha, mb); goto ldv_66016; case 32896: ql_dbg(33554432U, vha, 20562, "D-Port Diagnostics: %04x %04x=%s\n", (int )*mb, (int )*(mb + 1UL), (unsigned int )*(mb + 1UL) != 0U ? ((unsigned int )*(mb + 1UL) != 1U ? ((unsigned int )*(mb + 1UL) == 2U ? (char *)"done (error)" : (char *)"other") : (char *)"done (ok)") : (char *)"start"); goto ldv_66016; default: ql_dbg(33554432U, vha, 20567, "Unknown AEN:%04x %04x %04x %04x\n", (int )*mb, (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); } ldv_66016: qlt_async_event((int )*mb, vha, mb); if ((unsigned int )vha->vp_idx == 0U && (unsigned int )ha->num_vhosts != 0U) { qla2x00_alert_all_vps(rsp, mb); } else { } return; } } void qla2x00_process_completed_request(struct scsi_qla_host *vha , struct req_que *req , uint32_t index ) { srb_t *sp ; struct qla_hw_data *ha ; { ha = vha->hw; if ((uint32_t )req->num_outstanding_cmds <= index) { ql_log(1U, vha, 12308, "Invalid SCSI command index (%x).\n", index); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } return; } else { } sp = *(req->outstanding_cmds + (unsigned long )index); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { *(req->outstanding_cmds + (unsigned long )index) = (srb_t *)0; (*(sp->done))((void *)ha, (void *)sp, 0); } else { ql_log(1U, vha, 12310, "Invalid SCSI SRB.\n"); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } } return; } } srb_t *qla2x00_get_sp_from_handle(scsi_qla_host_t *vha , char const *func , struct req_que *req , void *iocb ) { struct qla_hw_data *ha ; sts_entry_t *pkt ; srb_t *sp ; uint16_t index ; { ha = vha->hw; pkt = (sts_entry_t *)iocb; sp = (srb_t *)0; index = (unsigned short )pkt->handle; if ((int )req->num_outstanding_cmds <= (int )index) { ql_log(1U, vha, 20529, "Invalid command index (%x).\n", (int )index); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } goto done; } else { } sp = *(req->outstanding_cmds + (unsigned long )index); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_log(1U, vha, 20530, "Invalid completion handle (%x) -- timed-out.\n", (int )index); return (sp); } else { } if (sp->handle != (uint32_t )index) { ql_log(1U, vha, 20531, "SRB handle (%x) mismatch %x.\n", sp->handle, (int )index); return ((srb_t *)0); } else { } *(req->outstanding_cmds + (unsigned long )index) = (srb_t *)0; done: ; return (sp); } } static void qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha , struct req_que *req , struct mbx_entry *mbx ) { char func[9U] ; char const *type ; fc_port_t *fcport ; srb_t *sp ; struct srb_iocb *lio ; uint16_t *data ; uint16_t status ; { func[0] = 'M'; func[1] = 'B'; func[2] = 'X'; func[3] = '-'; func[4] = 'I'; func[5] = 'O'; func[6] = 'C'; func[7] = 'B'; func[8] = '\000'; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)mbx); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } lio = & sp->u.iocb_cmd; type = (char const *)sp->name; fcport = sp->fcport; data = (uint16_t *)(& lio->u.logio.data); *data = 16389U; *(data + 1UL) = (unsigned int )lio->u.logio.flags & 1U; if ((unsigned int )mbx->entry_status != 0U) { ql_dbg(33554432U, vha, 20547, "Async-%s error entry - hdl=%x portid=%02x%02x%02x entry-status=%x status=%x state-flag=%x status-flags=%x.\n", type, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )mbx->entry_status, (int )mbx->status, (int )mbx->state_flags, (int )mbx->status_flags); ql_dump_buffer(33685504U, vha, 20521, (uint8_t *)mbx, 64U); goto logio_done; } else { } status = mbx->status; if (((unsigned int )status == 48U && (unsigned int )sp->type == 1U) && (unsigned int )mbx->mb0 == 16384U) { status = 0U; } else { } if ((unsigned int )status == 0U && (unsigned int )mbx->mb0 == 16384U) { ql_dbg(33554432U, vha, 20549, "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", type, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )mbx->mb1); *data = 16384U; if ((unsigned int )sp->type == 1U) { fcport->port_type = 5; if ((int )mbx->mb1 & 1) { fcport->port_type = 4; } else if (((int )mbx->mb1 & 2) != 0) { fcport->flags = fcport->flags | 4U; } else { } } else { } goto logio_done; } else { } *data = mbx->mb0; switch ((int )*data) { case 16391: *(data + 1UL) = mbx->mb1; goto ldv_66096; case 16392: ; goto ldv_66096; default: *data = 16389U; goto ldv_66096; } ldv_66096: ql_log(1U, vha, 20550, "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )status, (int )mbx->mb0, (int )mbx->mb1, (int )mbx->mb2, (int )mbx->mb6, (int )mbx->mb7); logio_done: (*(sp->done))((void *)vha, (void *)sp, 0); return; } } static void qla2x00_ct_entry(scsi_qla_host_t *vha , struct req_que *req , sts_entry_t *pkt , int iocb_type ) { char func[8U] ; char const *type ; srb_t *sp ; struct fc_bsg_job *bsg_job ; uint16_t comp_status ; int res ; { func[0] = 'C'; func[1] = 'T'; func[2] = '_'; func[3] = 'I'; func[4] = 'O'; func[5] = 'C'; func[6] = 'B'; func[7] = '\000'; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)pkt); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } bsg_job = sp->u.bsg_job; type = "ct pass-through"; comp_status = pkt->comp_status; (bsg_job->reply)->reply_data.ctels_reply.status = 0U; bsg_job->reply_len = 16U; if ((unsigned int )comp_status != 0U) { if ((unsigned int )comp_status == 21U) { res = 0; (bsg_job->reply)->reply_payload_rcv_len = (uint32_t )pkt->rsp_info_len; ql_log(1U, vha, 20552, "CT pass-through-%s error comp_status-status=0x%x total_byte = 0x%x.\n", type, (int )comp_status, (bsg_job->reply)->reply_payload_rcv_len); } else { ql_log(1U, vha, 20553, "CT pass-through-%s error comp_status-status=0x%x.\n", type, (int )comp_status); res = 458752; (bsg_job->reply)->reply_payload_rcv_len = 0U; } ql_dump_buffer(33685504U, vha, 20533, (uint8_t *)pkt, 64U); } else { res = 0; (bsg_job->reply)->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; bsg_job->reply_len = 0U; } (*(sp->done))((void *)vha, (void *)sp, res); return; } } static void qla24xx_els_ct_entry(scsi_qla_host_t *vha , struct req_que *req , struct sts_entry_24xx *pkt , int iocb_type ) { char func[12U] ; char const *type ; srb_t *sp ; struct fc_bsg_job *bsg_job ; uint16_t comp_status ; uint32_t fw_status[3U] ; uint8_t *fw_sts_ptr ; int res ; { func[0] = 'E'; func[1] = 'L'; func[2] = 'S'; func[3] = '_'; func[4] = 'C'; func[5] = 'T'; func[6] = '_'; func[7] = 'I'; func[8] = 'O'; func[9] = 'C'; func[10] = 'B'; func[11] = '\000'; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)pkt); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } bsg_job = sp->u.bsg_job; type = (char const *)0; switch ((int )sp->type) { case 3: ; case 4: type = "els"; goto ldv_66127; case 5: type = "ct pass-through"; goto ldv_66127; default: ql_dbg(8388608U, vha, 20542, "Unrecognized SRB: (%p) type=%d.\n", sp, (int )sp->type); return; } ldv_66127: fw_status[0] = (uint32_t )pkt->comp_status; comp_status = (uint16_t )fw_status[0]; fw_status[1] = (uint32_t )((unsigned short )((struct els_sts_entry_24xx *)pkt)->error_subcode_1); fw_status[2] = (uint32_t )((unsigned short )((struct els_sts_entry_24xx *)pkt)->error_subcode_2); (bsg_job->reply)->reply_data.ctels_reply.status = 0U; bsg_job->reply_len = 28U; if ((unsigned int )comp_status != 0U) { if ((unsigned int )comp_status == 21U) { res = 0; (bsg_job->reply)->reply_payload_rcv_len = (uint32_t )((unsigned short )((struct els_sts_entry_24xx *)pkt)->total_byte_count); ql_dbg(8388608U, vha, 20543, "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", type, sp->handle, (int )comp_status, fw_status[1], fw_status[2], (int )((unsigned short )((struct els_sts_entry_24xx *)pkt)->total_byte_count)); fw_sts_ptr = (uint8_t *)(bsg_job->req)->sense + 16UL; memcpy((void *)fw_sts_ptr, (void const *)(& fw_status), 12UL); } else { ql_dbg(8388608U, vha, 20544, "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x error subcode 1=0x%x error subcode 2=0x%x.\n", type, sp->handle, (int )comp_status, (int )((unsigned short )((struct els_sts_entry_24xx *)pkt)->error_subcode_1), (int )((unsigned short )((struct els_sts_entry_24xx *)pkt)->error_subcode_2)); res = 458752; (bsg_job->reply)->reply_payload_rcv_len = 0U; fw_sts_ptr = (uint8_t *)(bsg_job->req)->sense + 16UL; memcpy((void *)fw_sts_ptr, (void const *)(& fw_status), 12UL); } ql_dump_buffer(8519680U, vha, 20566, (uint8_t *)pkt, 64U); } else { res = 0; (bsg_job->reply)->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; bsg_job->reply_len = 0U; } (*(sp->done))((void *)vha, (void *)sp, res); return; } } static void qla24xx_logio_entry(scsi_qla_host_t *vha , struct req_que *req , struct logio_entry_24xx *logio ) { char func[11U] ; char const *type ; fc_port_t *fcport ; srb_t *sp ; struct srb_iocb *lio ; uint16_t *data ; uint32_t iop[2U] ; { func[0] = 'L'; func[1] = 'O'; func[2] = 'G'; func[3] = 'I'; func[4] = 'O'; func[5] = '-'; func[6] = 'I'; func[7] = 'O'; func[8] = 'C'; func[9] = 'B'; func[10] = '\000'; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)logio); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } lio = & sp->u.iocb_cmd; type = (char const *)sp->name; fcport = sp->fcport; data = (uint16_t *)(& lio->u.logio.data); *data = 16389U; *(data + 1UL) = (unsigned int )lio->u.logio.flags & 1U; if ((unsigned int )logio->entry_status != 0U) { ql_log(1U, fcport->vha, 20532, "Async-%s error entry - hdl=%xportid=%02x%02x%02x entry-status=%x.\n", type, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )logio->entry_status); ql_dump_buffer(33685504U, vha, 20557, (uint8_t *)logio, 64U); goto logio_done; } else { } if ((unsigned int )logio->comp_status == 0U) { ql_dbg(33554432U, fcport->vha, 20534, "Async-%s complete - hdl=%x portid=%02x%02x%02x iop0=%x.\n", type, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, logio->io_parameter[0]); *data = 16384U; if ((unsigned int )sp->type != 1U) { goto logio_done; } else { } iop[0] = logio->io_parameter[0]; if ((iop[0] & 16U) != 0U) { fcport->port_type = 5; if ((iop[0] & 256U) != 0U) { fcport->flags = fcport->flags | 4U; } else { } } else if ((iop[0] & 32U) != 0U) { fcport->port_type = 4; } else { } if ((iop[0] & 128U) != 0U) { fcport->flags = fcport->flags | 16U; } else { } if (logio->io_parameter[7] != 0U || logio->io_parameter[8] != 0U) { fcport->supported_classes = fcport->supported_classes | 4U; } else { } if (logio->io_parameter[9] != 0U || logio->io_parameter[10] != 0U) { fcport->supported_classes = fcport->supported_classes | 8U; } else { } goto logio_done; } else { } iop[0] = logio->io_parameter[0]; iop[1] = logio->io_parameter[1]; switch (iop[0]) { case 26U: *data = 16391U; *(data + 1UL) = (unsigned short )iop[1]; goto ldv_66144; case 27U: *data = 16392U; goto ldv_66144; default: *data = 16389U; goto ldv_66144; } ldv_66144: ql_dbg(33554432U, fcport->vha, 20535, "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x iop0=%x iop1=%x.\n", type, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )logio->comp_status, logio->io_parameter[0], logio->io_parameter[1]); logio_done: (*(sp->done))((void *)vha, (void *)sp, 0); return; } } static void qla24xx_tm_iocb_entry(scsi_qla_host_t *vha , struct req_que *req , void *tsk ) { char func[9U] ; char const *type ; fc_port_t *fcport ; srb_t *sp ; struct srb_iocb *iocb ; struct sts_entry_24xx *sts ; { func[0] = 'T'; func[1] = 'M'; func[2] = 'F'; func[3] = '-'; func[4] = 'I'; func[5] = 'O'; func[6] = 'C'; func[7] = 'B'; func[8] = '\000'; sts = (struct sts_entry_24xx *)tsk; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, tsk); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } iocb = & sp->u.iocb_cmd; type = (char const *)sp->name; fcport = sp->fcport; iocb->u.tmf.data = 0U; if ((unsigned int )sts->entry_status != 0U) { ql_log(1U, fcport->vha, 20536, "Async-%s error - hdl=%x entry-status(%x).\n", type, sp->handle, (int )sts->entry_status); iocb->u.tmf.data = 258U; } else if ((unsigned int )sts->comp_status != 0U) { ql_log(1U, fcport->vha, 20537, "Async-%s error - hdl=%x completion status(%x).\n", type, sp->handle, (int )sts->comp_status); iocb->u.tmf.data = 258U; } else if (((int )sts->scsi_status & 256) != 0) { if (sts->rsp_data_len <= 3U) { ql_log(1U, fcport->vha, 20539, "Async-%s error - hdl=%x not enough response(%d).\n", type, sp->handle, sts->rsp_data_len); } else if ((unsigned int )sts->data[3] != 0U) { ql_log(1U, fcport->vha, 20540, "Async-%s error - hdl=%x response(%x).\n", type, sp->handle, (int )sts->data[3]); iocb->u.tmf.data = 258U; } else { } } else { } if (iocb->u.tmf.data != 0U) { ql_dump_buffer(33685504U, vha, 20565, (uint8_t *)sts, 64U); } else { } (*(sp->done))((void *)vha, (void *)sp, 0); return; } } void qla2x00_process_response_queue(struct rsp_que *rsp ) { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; sts_entry_t *pkt ; uint16_t handle_cnt ; uint16_t cnt ; void *tmp ; { ha = rsp->hw; reg = & (ha->iobase)->isp; tmp = pci_get_drvdata(ha->pdev); vha = (struct scsi_qla_host *)tmp; if (*((unsigned long *)vha + 19UL) == 0UL) { return; } else { } goto ldv_66167; ldv_66182: pkt = (sts_entry_t *)rsp->ring_ptr; rsp->ring_index = (uint16_t )((int )rsp->ring_index + 1); if ((int )rsp->ring_index == (int )rsp->length) { rsp->ring_index = 0U; rsp->ring_ptr = rsp->ring; } else { rsp->ring_ptr = rsp->ring_ptr + 1; } if ((unsigned int )pkt->entry_status != 0U) { qla2x00_error_entry(vha, rsp, pkt); ((response_t *)pkt)->signature = 3735936685U; __asm__ volatile ("sfence": : : "memory"); goto ldv_66167; } else { } switch ((int )pkt->entry_type) { case 3: qla2x00_status_entry(vha, rsp, (void *)pkt); goto ldv_66169; case 33: handle_cnt = (uint16_t )((sts21_entry_t *)pkt)->handle_count; cnt = 0U; goto ldv_66172; ldv_66171: qla2x00_process_completed_request(vha, rsp->req, ((sts21_entry_t *)pkt)->handle[(int )cnt]); cnt = (uint16_t )((int )cnt + 1); ldv_66172: ; if ((int )cnt < (int )handle_cnt) { goto ldv_66171; } else { } goto ldv_66169; case 34: handle_cnt = (uint16_t )((sts22_entry_t *)pkt)->handle_count; cnt = 0U; goto ldv_66176; ldv_66175: qla2x00_process_completed_request(vha, rsp->req, (uint32_t )((sts22_entry_t *)pkt)->handle[(int )cnt]); cnt = (uint16_t )((int )cnt + 1); ldv_66176: ; if ((int )cnt < (int )handle_cnt) { goto ldv_66175; } else { } goto ldv_66169; case 16: qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); goto ldv_66169; case 57: qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); goto ldv_66169; case 41: qla2x00_ct_entry(vha, rsp->req, pkt, 41); goto ldv_66169; default: ql_log(1U, vha, 20554, "Received unknown response pkt type %x entry status=%x.\n", (int )pkt->entry_type, (int )pkt->entry_status); goto ldv_66169; } ldv_66169: ((response_t *)pkt)->signature = 3735936685U; __asm__ volatile ("sfence": : : "memory"); ldv_66167: ; if ((rsp->ring_ptr)->signature != 3735936685U) { goto ldv_66182; } else { } writew((int )rsp->ring_index, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox5 : & reg->u.isp2300.rsp_q_out)); return; } } __inline static void qla2x00_handle_sense(srb_t *sp , uint8_t *sense_data , uint32_t par_sense_len , uint32_t sense_len , struct rsp_que *rsp , int res ) { struct scsi_qla_host *vha ; struct scsi_cmnd *cp ; uint32_t track_sense_len ; { vha = (sp->fcport)->vha; cp = sp->u.scmd.cmd; if (sense_len > 95U) { sense_len = 96U; } else { } sp->u.scmd.request_sense_length = sense_len; sp->u.scmd.request_sense_ptr = cp->sense_buffer; track_sense_len = sense_len; if (sense_len > par_sense_len) { sense_len = par_sense_len; } else { } memcpy((void *)cp->sense_buffer, (void const *)sense_data, (size_t )sense_len); sp->u.scmd.request_sense_ptr = cp->sense_buffer + (unsigned long )sense_len; track_sense_len = track_sense_len - sense_len; sp->u.scmd.request_sense_length = track_sense_len; if (track_sense_len != 0U) { rsp->status_srb = sp; cp->result = res; } else { } if (sense_len != 0U) { ql_dbg(134348800U, vha, 12316, "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", ((sp->fcport)->vha)->host_no, (cp->device)->id, (cp->device)->lun, cp); ql_dump_buffer(134348800U, vha, 12331, cp->sense_buffer, sense_len); } else { } return; } } __inline static int qla2x00_handle_dif_error(srb_t *sp , struct sts_entry_24xx *sts24 ) { struct scsi_qla_host *vha ; struct scsi_cmnd *cmd ; uint8_t *ap ; uint8_t *ep ; uint32_t e_ref_tag ; uint32_t a_ref_tag ; uint16_t e_app_tag ; uint16_t a_app_tag ; uint16_t e_guard ; uint16_t a_guard ; sector_t tmp ; uint32_t blocks_done ; uint32_t resid ; sector_t lba_s ; sector_t tmp___0 ; unsigned int tmp___1 ; uint32_t i ; uint32_t j ; uint32_t k ; uint32_t num_ent ; struct scatterlist *sg ; struct sd_dif_tuple *spt ; unsigned int tmp___2 ; struct page *tmp___3 ; void *tmp___4 ; unsigned char tmp___5 ; unsigned int tmp___6 ; unsigned char tmp___7 ; { vha = (sp->fcport)->vha; cmd = sp->u.scmd.cmd; ap = (uint8_t *)(& sts24->data) + 12UL; ep = (uint8_t *)(& sts24->data) + 20UL; a_guard = *((uint16_t *)ap + 2U); a_app_tag = *((uint16_t *)ap); a_ref_tag = *((uint32_t *)ap + 4U); e_guard = *((uint16_t *)ep + 2U); e_app_tag = *((uint16_t *)ep); e_ref_tag = *((uint32_t *)ep + 4U); ql_dbg(134217728U, vha, 12323, "iocb(s) %p Returned STATUS.\n", sts24); tmp = scsi_get_lba(cmd); ql_dbg(134217728U, vha, 12324, "DIF OLD_ERROR in cmd 0x%x lba 0x%llx act ref tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", (int )*(cmd->cmnd), (unsigned long long )tmp, a_ref_tag, e_ref_tag, (int )a_app_tag, (int )e_app_tag, (int )a_guard, (int )e_guard); if ((unsigned int )a_app_tag == 65535U) { tmp___7 = scsi_get_prot_type(cmd); if ((unsigned int )tmp___7 != 3U || a_ref_tag == 4294967295U) { tmp___0 = scsi_get_lba(cmd); lba_s = tmp___0; blocks_done = (e_ref_tag - (uint32_t )lba_s) + 1U; tmp___1 = scsi_bufflen(cmd); resid = tmp___1 - (cmd->device)->sector_size * blocks_done; scsi_set_resid(cmd, (int )resid); cmd->result = 0; tmp___6 = scsi_prot_sg_count(cmd); if (tmp___6 != 0U) { j = 0U; k = 0U; i = 0U; sg = scsi_prot_sglist(cmd); goto ldv_66225; ldv_66224: num_ent = sg->dma_length / 8U; if (k + num_ent < blocks_done) { k = k + num_ent; goto ldv_66222; } else { } j = (blocks_done - k) - 1U; k = blocks_done; goto ldv_66223; ldv_66222: i = i + 1U; sg = sg_next(sg); ldv_66225: tmp___2 = scsi_prot_sg_count(cmd); if (tmp___2 > i) { goto ldv_66224; } else { } ldv_66223: ; if (k != blocks_done) { ql_log(1U, vha, 12335, "unexpected tag values tag:lba=%x:%llx)\n", e_ref_tag, (unsigned long long )lba_s); return (1); } else { } tmp___3 = sg_page___0(sg); tmp___4 = lowmem_page_address((struct page const *)tmp___3); spt = (struct sd_dif_tuple *)tmp___4 + (unsigned long )sg->offset; spt = spt + (unsigned long )j; spt->app_tag = 65535U; tmp___5 = scsi_get_prot_type(cmd); if ((unsigned int )tmp___5 == 3U) { spt->ref_tag = 4294967295U; } else { } } else { } return (0); } else { } } else { } if ((int )e_guard != (int )a_guard) { scsi_build_sense_buffer(1, cmd->sense_buffer, 5, 16, 1); set_driver_byte(cmd, 8); set_host_byte(cmd, 5); cmd->result = cmd->result | 4; return (1); } else { } if (e_ref_tag != a_ref_tag) { scsi_build_sense_buffer(1, cmd->sense_buffer, 5, 16, 3); set_driver_byte(cmd, 8); set_host_byte(cmd, 5); cmd->result = cmd->result | 4; return (1); } else { } if ((int )e_app_tag != (int )a_app_tag) { scsi_build_sense_buffer(1, cmd->sense_buffer, 5, 16, 2); set_driver_byte(cmd, 8); set_host_byte(cmd, 5); cmd->result = cmd->result | 4; return (1); } else { } return (1); } } static void qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha , void *pkt , struct req_que *req , uint32_t index ) { struct qla_hw_data *ha ; srb_t *sp ; uint16_t comp_status ; uint16_t scsi_status ; uint16_t thread_id ; uint32_t rval ; struct fc_bsg_job *bsg_job ; sts_entry_t *sts ; struct sts_entry_24xx *sts24 ; { ha = vha->hw; rval = 0U; bsg_job = (struct fc_bsg_job *)0; sts = (sts_entry_t *)pkt; sts24 = (struct sts_entry_24xx *)pkt; if ((uint32_t )req->num_outstanding_cmds <= index) { ql_log(1U, vha, 28847, "Invalid SCSI completion handle 0x%x.\n", index); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); return; } else { } sp = *(req->outstanding_cmds + (unsigned long )index); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { *(req->outstanding_cmds + (unsigned long )index) = (srb_t *)0; bsg_job = sp->u.bsg_job; } else { ql_log(1U, vha, 28848, "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", (int )req->id, index); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); return; } if ((ha->device_type & 134217728U) != 0U) { comp_status = sts24->comp_status; scsi_status = (unsigned int )sts24->scsi_status & 4095U; } else { comp_status = sts->comp_status; scsi_status = (unsigned int )sts->scsi_status & 4095U; } thread_id = (uint16_t )(bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; switch ((int )comp_status) { case 0: ; if ((unsigned int )scsi_status == 0U) { (bsg_job->reply)->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; vha->qla_stats.input_bytes = vha->qla_stats.input_bytes + (uint64_t )(bsg_job->reply)->reply_payload_rcv_len; vha->qla_stats.input_requests = vha->qla_stats.input_requests + 1ULL; rval = 0U; } else { } goto done; case 7: ql_dbg(8388608U, vha, 28849, "Command completed with date overrun thread_id=%d\n", (int )thread_id); rval = 7U; goto ldv_66244; case 21: ql_dbg(8388608U, vha, 28850, "Command completed with date underrun thread_id=%d\n", (int )thread_id); rval = 8U; goto ldv_66244; case 1792: ql_dbg(8388608U, vha, 28851, "Command completed with read data overrun thread_id=%d\n", (int )thread_id); rval = 7U; goto ldv_66244; case 1799: ql_dbg(8388608U, vha, 28852, "Command completed with read and write data overrun thread_id=%d\n", (int )thread_id); rval = 7U; goto ldv_66244; case 1813: ql_dbg(8388608U, vha, 28853, "Command completed with read data over and write data underrun thread_id=%d\n", (int )thread_id); rval = 7U; goto ldv_66244; case 5376: ql_dbg(8388608U, vha, 28854, "Command completed with read data data underrun thread_id=%d\n", (int )thread_id); rval = 8U; goto ldv_66244; case 5383: ql_dbg(8388608U, vha, 28855, "Command completed with read data under and write data overrun thread_id=%d\n", (int )thread_id); rval = 8U; goto ldv_66244; case 5397: ql_dbg(8388608U, vha, 28856, "Command completed with read and write data underrun thread_id=%d\n", (int )thread_id); rval = 8U; goto ldv_66244; case 512: ql_dbg(8388608U, vha, 28857, "Command completed with data DMA error thread_id=%d\n", (int )thread_id); rval = 29U; goto ldv_66244; case 6: ql_dbg(8388608U, vha, 28858, "Command completed with timeout thread_id=%d\n", (int )thread_id); rval = 30U; goto ldv_66244; default: ql_dbg(8388608U, vha, 28859, "Command completed with completion status=0x%x thread_id=%d\n", (int )comp_status, (int )thread_id); rval = 1U; goto ldv_66244; } ldv_66244: (bsg_job->reply)->reply_payload_rcv_len = 0U; done: (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = rval; bsg_job->reply_len = 16U; (*(sp->done))((void *)vha, (void *)sp, 0); return; } } static void qla2x00_status_entry(scsi_qla_host_t *vha , struct rsp_que *rsp , void *pkt ) { srb_t *sp ; fc_port_t *fcport ; struct scsi_cmnd *cp ; sts_entry_t *sts ; struct sts_entry_24xx *sts24 ; uint16_t comp_status ; uint16_t scsi_status ; uint16_t ox_id ; uint8_t lscsi_status ; int32_t resid ; uint32_t sense_len ; uint32_t par_sense_len ; uint32_t rsp_info_len ; uint32_t resid_len ; uint32_t fw_resid_len ; uint8_t *rsp_info ; uint8_t *sense_data ; struct qla_hw_data *ha ; uint32_t handle ; uint16_t que ; struct req_que *req ; int logit ; int res ; uint16_t state_flags ; uint16_t retry_delay ; unsigned long tmp ; int tmp___0 ; long tmp___1 ; unsigned int tmp___2 ; unsigned int tmp___3 ; unsigned int tmp___4 ; unsigned int tmp___5 ; unsigned int tmp___6 ; unsigned int tmp___7 ; int tmp___8 ; int tmp___9 ; unsigned int tmp___10 ; { ha = vha->hw; logit = 1; res = 0; state_flags = 0U; retry_delay = 0U; sts = (sts_entry_t *)pkt; sts24 = (struct sts_entry_24xx *)pkt; if ((ha->device_type & 134217728U) != 0U) { comp_status = sts24->comp_status; scsi_status = (unsigned int )sts24->scsi_status & 4095U; state_flags = sts24->state_flags; } else { comp_status = sts->comp_status; scsi_status = (unsigned int )sts->scsi_status & 4095U; } handle = (unsigned int )((unsigned short )sts->handle); que = (unsigned short )(sts->handle >> 16); req = *(ha->req_q_map + (unsigned long )que); if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { ql_dbg(134217728U, vha, 12377, "Invalid status handle (0x%x): Bad req pointer. req=%p, que=%u.\n", sts->handle, req, (int )que); return; } else { tmp = find_first_zero_bit((unsigned long const *)(& ha->req_qid_map), (unsigned long )ha->max_req_queues); if ((unsigned long )que >= tmp) { ql_dbg(134217728U, vha, 12377, "Invalid status handle (0x%x): Bad req pointer. req=%p, que=%u.\n", sts->handle, req, (int )que); return; } else { } } if ((uint32_t )req->num_outstanding_cmds > handle) { sp = *(req->outstanding_cmds + (unsigned long )handle); } else { sp = (srb_t *)0; } if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_dbg(134217728U, vha, 12311, "Invalid status handle (0x%x).\n", sts->handle); tmp___0 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 == 0) { if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } qla2xxx_wake_dpc(vha); } else { } return; } else { } tmp___1 = ldv__builtin_expect((long )(((int )state_flags & 2) != 0 && (unsigned int )sp->type == 9U), 0L); if (tmp___1 != 0L) { qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); return; } else { } if ((unsigned int )sp->type == 7U) { qla24xx_tm_iocb_entry(vha, req, pkt); return; } else { } if ((unsigned int )comp_status == 0U && (unsigned int )scsi_status == 0U) { qla2x00_process_completed_request(vha, req, handle); return; } else { } *(req->outstanding_cmds + (unsigned long )handle) = (srb_t *)0; cp = sp->u.scmd.cmd; if ((unsigned long )cp == (unsigned long )((struct scsi_cmnd *)0)) { ql_dbg(134217728U, vha, 12312, "Command already returned (0x%x/%p).\n", sts->handle, sp); return; } else { } lscsi_status = (unsigned int )((uint8_t )scsi_status) & 254U; fcport = sp->fcport; ox_id = 0U; fw_resid_len = 0U; resid_len = fw_resid_len; rsp_info_len = resid_len; par_sense_len = rsp_info_len; sense_len = par_sense_len; if ((ha->device_type & 134217728U) != 0U) { if (((int )scsi_status & 512) != 0) { sense_len = sts24->sense_len; } else { } if (((int )scsi_status & 256) != 0) { rsp_info_len = sts24->rsp_data_len; } else { } if (((int )scsi_status & 3072) != 0) { resid_len = sts24->rsp_residual_count; } else { } if ((unsigned int )comp_status == 21U) { fw_resid_len = sts24->residual_len; } else { } rsp_info = (uint8_t *)(& sts24->data); sense_data = (uint8_t *)(& sts24->data); host_to_fcp_swap___1((uint8_t *)(& sts24->data), 28U); ox_id = sts24->ox_id; par_sense_len = 28U; if ((unsigned int )sts24->retry_delay != 0U && (unsigned int )sts24->retry_delay <= 65520U) { retry_delay = sts24->retry_delay; } else { } } else { if (((int )scsi_status & 512) != 0) { sense_len = (uint32_t )sts->req_sense_length; } else { } if (((int )scsi_status & 256) != 0) { rsp_info_len = (uint32_t )sts->rsp_info_len; } else { } resid_len = sts->residual_length; rsp_info = (uint8_t *)(& sts->rsp_info); sense_data = (uint8_t *)(& sts->req_sense_data); par_sense_len = 32U; } if (((int )scsi_status & 256) != 0) { if ((ha->device_type & 134217728U) != 0U) { sense_data = sense_data + (unsigned long )rsp_info_len; par_sense_len = par_sense_len - rsp_info_len; } else { } if (rsp_info_len > 3U && (unsigned int )*(rsp_info + 3UL) != 0U) { ql_dbg(134217728U, fcport->vha, 12313, "FCP I/O protocol failure (0x%x/0x%x).\n", rsp_info_len, (int )*(rsp_info + 3UL)); res = 131072; goto out; } else { } } else { } if (((ha->device_type & 134217728U) != 0U && (unsigned int )comp_status == 0U) && ((int )scsi_status & 1024) != 0) { comp_status = 7U; } else { } if ((unsigned int )lscsi_status == 40U || (unsigned int )lscsi_status == 8U) { qla2x00_set_retry_delay_timestamp(fcport, (int )retry_delay); } else { } switch ((int )comp_status) { case 0: ; case 28: ; if ((unsigned int )scsi_status == 0U) { res = 0; goto ldv_66288; } else { } if (((int )scsi_status & 3072) != 0) { resid = (int32_t )resid_len; scsi_set_resid(cp, resid); if ((unsigned int )lscsi_status == 0U) { tmp___3 = scsi_bufflen(cp); if (tmp___3 - (unsigned int )resid < cp->underflow) { tmp___2 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12314, "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", resid, tmp___2); res = 458752; goto ldv_66288; } else { } } else { } } else { } res = (int )lscsi_status; if ((unsigned int )lscsi_status == 40U) { ql_dbg(134217728U, fcport->vha, 12315, "QUEUE FULL detected.\n"); goto ldv_66288; } else { } logit = 0; if ((unsigned int )lscsi_status != 2U) { goto ldv_66288; } else { } memset((void *)cp->sense_buffer, 0, 96UL); if (((int )scsi_status & 512) == 0) { goto ldv_66288; } else { } qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); goto ldv_66288; case 21: resid = (int32_t )((ha->device_type & 134217728U) != 0U ? fw_resid_len : resid_len); scsi_set_resid(cp, resid); if (((int )scsi_status & 2048) != 0) { if ((ha->device_type & 134217728U) != 0U && fw_resid_len != resid_len) { tmp___4 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12317, "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, tmp___4); res = (int )lscsi_status | 458752; goto check_scsi_status; } else { } if ((unsigned int )lscsi_status == 0U) { tmp___6 = scsi_bufflen(cp); if (tmp___6 - (unsigned int )resid < cp->underflow) { tmp___5 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12318, "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", resid, tmp___5); res = 458752; goto ldv_66288; } else { } } else { } } else if ((unsigned int )lscsi_status != 40U && (unsigned int )lscsi_status != 8U) { tmp___7 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12319, "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, tmp___7); res = (int )lscsi_status | 458752; goto check_scsi_status; } else { ql_dbg(134217728U, fcport->vha, 12336, "scsi_status: 0x%x, lscsi_status: 0x%x\n", (int )scsi_status, (int )lscsi_status); } res = (int )lscsi_status; logit = 0; check_scsi_status: ; if ((unsigned int )lscsi_status != 0U) { if ((unsigned int )lscsi_status == 40U) { ql_dbg(134217728U, fcport->vha, 12320, "QUEUE FULL detected.\n"); logit = 1; goto ldv_66288; } else { } if ((unsigned int )lscsi_status != 2U) { goto ldv_66288; } else { } memset((void *)cp->sense_buffer, 0, 96UL); if (((int )scsi_status & 512) == 0) { goto ldv_66288; } else { } qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); } else { } goto ldv_66288; case 41: ; case 42: ; case 43: ; case 1: ; case 40: ; case 6: ; case 4: res = 917504; if ((unsigned int )comp_status == 6U) { if ((ha->device_type & 134217728U) != 0U) { goto ldv_66288; } else if (((int )sts->status_flags & 8192) == 0) { goto ldv_66288; } else { } } else { } tmp___8 = atomic_read((atomic_t const *)(& fcport->state)); ql_dbg(134217728U, fcport->vha, 12321, "Port to be marked lost on fcport=%02x%02x%02x, current port state= %s.\n", (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, port_state_str___1[tmp___8]); tmp___9 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___9 == 4) { qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); } else { } goto ldv_66288; case 5: res = 524288; goto ldv_66288; case 12: logit = qla2x00_handle_dif_error(sp, sts24); res = cp->result; goto ldv_66288; case 3: res = 458752; if (((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) || ((((int )ha->fw_attributes_h << 16) | (int )ha->fw_attributes) & 4194304) == 0) { goto ldv_66288; } else { } if (((int )state_flags & 16) != 0) { scmd_printk("\f", (struct scsi_cmnd const *)cp, "Unsupported device \'%s\' found.\n", (cp->device)->vendor); } else { } goto ldv_66288; default: res = 458752; goto ldv_66288; } ldv_66288: ; out: ; if (logit != 0) { tmp___10 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12322, "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", (int )comp_status, (int )scsi_status, res, vha->host_no, (cp->device)->id, (cp->device)->lun, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )ox_id, cp->cmnd, tmp___10, rsp_info_len, resid_len, fw_resid_len); } else { } if ((unsigned long )rsp->status_srb == (unsigned long )((srb_t *)0)) { (*(sp->done))((void *)ha, (void *)sp, res); } else { } return; } } static void qla2x00_status_cont_entry(struct rsp_que *rsp , sts_cont_entry_t *pkt ) { uint8_t sense_sz ; struct qla_hw_data *ha ; struct scsi_qla_host *vha ; void *tmp ; srb_t *sp ; struct scsi_cmnd *cp ; uint32_t sense_len ; uint8_t *sense_ptr ; { sense_sz = 0U; ha = rsp->hw; tmp = pci_get_drvdata(ha->pdev); vha = (struct scsi_qla_host *)tmp; sp = rsp->status_srb; if ((unsigned long )sp == (unsigned long )((srb_t *)0) || sp->u.scmd.request_sense_length == 0U) { return; } else { } sense_len = sp->u.scmd.request_sense_length; sense_ptr = sp->u.scmd.request_sense_ptr; cp = sp->u.scmd.cmd; if ((unsigned long )cp == (unsigned long )((struct scsi_cmnd *)0)) { ql_log(1U, vha, 12325, "cmd is NULL: already returned to OS (sp=%p).\n", sp); rsp->status_srb = (srb_t *)0; return; } else { } if (sense_len > 60U) { sense_sz = 60U; } else { sense_sz = (uint8_t )sense_len; } if ((ha->device_type & 134217728U) != 0U) { host_to_fcp_swap___1((uint8_t *)(& pkt->data), 60U); } else { } memcpy((void *)sense_ptr, (void const *)(& pkt->data), (size_t )sense_sz); ql_dump_buffer(134348800U, vha, 12332, sense_ptr, (uint32_t )sense_sz); sense_len = sense_len - (uint32_t )sense_sz; sense_ptr = sense_ptr + (unsigned long )sense_sz; sp->u.scmd.request_sense_ptr = sense_ptr; sp->u.scmd.request_sense_length = sense_len; if (sense_len == 0U) { rsp->status_srb = (srb_t *)0; (*(sp->done))((void *)ha, (void *)sp, cp->result); } else { } return; } } static void qla2x00_error_entry(scsi_qla_host_t *vha , struct rsp_que *rsp , sts_entry_t *pkt ) { srb_t *sp ; struct qla_hw_data *ha ; char func[11U] ; uint16_t que ; struct req_que *req ; int res ; { ha = vha->hw; func[0] = 'E'; func[1] = 'R'; func[2] = 'R'; func[3] = 'O'; func[4] = 'R'; func[5] = '-'; func[6] = 'I'; func[7] = 'O'; func[8] = 'C'; func[9] = 'B'; func[10] = '\000'; que = (unsigned short )(pkt->handle >> 16); req = (struct req_que *)0; res = 458752; ql_dbg(33554432U, vha, 20522, "type of error status in response: 0x%x\n", (int )pkt->entry_status); if ((int )((unsigned short )ha->max_req_queues) <= (int )que || (unsigned long )*(ha->req_q_map + (unsigned long )que) == (unsigned long )((struct req_que *)0)) { goto fatal; } else { } req = *(ha->req_q_map + (unsigned long )que); if (((int )pkt->entry_status & 2) != 0) { res = 131072; } else { } sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)pkt); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { (*(sp->done))((void *)ha, (void *)sp, res); return; } else { } fatal: ql_log(1U, vha, 20528, "Error entry - invalid handle/queue.\n"); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } qla2xxx_wake_dpc(vha); return; } } static void qla24xx_mbx_completion(scsi_qla_host_t *vha , uint16_t mb0 ) { uint16_t cnt ; uint32_t mboxes ; uint16_t *wptr ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; { ha = vha->hw; reg = & (ha->iobase)->isp24; mboxes = (uint32_t )((1 << (int )ha->mbx_count) + -1); if ((unsigned long )ha->mcp == (unsigned long )((mbx_cmd_t *)0)) { ql_dbg(33554432U, vha, 20558, "MBX pointer OLD_ERROR.\n"); } else { mboxes = (ha->mcp)->in_mb; } ha->flags.mbox_int = 1U; ha->mailbox_out[0] = mb0; mboxes = mboxes >> 1; wptr = & reg->mailbox1; cnt = 1U; goto ldv_66335; ldv_66334: ; if ((int )mboxes & 1) { ha->mailbox_out[(int )cnt] = readw((void const volatile *)wptr); } else { } mboxes = mboxes >> 1; wptr = wptr + 1; cnt = (uint16_t )((int )cnt + 1); ldv_66335: ; if ((int )((unsigned short )ha->mbx_count) > (int )cnt) { goto ldv_66334; } else { } return; } } static void qla24xx_abort_iocb_entry(scsi_qla_host_t *vha , struct req_que *req , struct abort_entry_24xx *pkt ) { char func[9U] ; srb_t *sp ; struct srb_iocb *abt ; { func[0] = 'A'; func[1] = 'B'; func[2] = 'T'; func[3] = '_'; func[4] = 'I'; func[5] = 'O'; func[6] = 'C'; func[7] = 'B'; func[8] = '\000'; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)pkt); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } abt = & sp->u.iocb_cmd; abt->u.abt.comp_status = pkt->nport_handle; (*(sp->done))((void *)vha, (void *)sp, 0); return; } } void qla24xx_process_response_queue(struct scsi_qla_host *vha , struct rsp_que *rsp ) { struct sts_entry_24xx *pkt ; struct qla_hw_data *ha ; int tmp ; struct device_reg_82xx *reg ; { ha = vha->hw; if (*((unsigned long *)vha + 19UL) == 0UL) { return; } else { } goto ldv_66352; ldv_66368: pkt = (struct sts_entry_24xx *)rsp->ring_ptr; rsp->ring_index = (uint16_t )((int )rsp->ring_index + 1); if ((int )rsp->ring_index == (int )rsp->length) { rsp->ring_index = 0U; rsp->ring_ptr = rsp->ring; } else { rsp->ring_ptr = rsp->ring_ptr + 1; } if ((unsigned int )pkt->entry_status != 0U) { qla2x00_error_entry(vha, rsp, (sts_entry_t *)pkt); tmp = qlt_24xx_process_response_error(vha, pkt); if (tmp != 0) { goto process_err; } else { } ((response_t *)pkt)->signature = 3735936685U; __asm__ volatile ("sfence": : : "memory"); goto ldv_66352; } else { } process_err: ; switch ((int )pkt->entry_type) { case 3: qla2x00_status_entry(vha, rsp, (void *)pkt); goto ldv_66354; case 16: qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); goto ldv_66354; case 50: qla24xx_report_id_acquisition(vha, (struct vp_rpt_id_entry_24xx *)pkt); goto ldv_66354; case 82: qla24xx_logio_entry(vha, rsp->req, (struct logio_entry_24xx *)pkt); goto ldv_66354; case 41: qla24xx_els_ct_entry(vha, rsp->req, pkt, 41); goto ldv_66354; case 83: qla24xx_els_ct_entry(vha, rsp->req, pkt, 83); goto ldv_66354; case 84: qlt_24xx_process_atio_queue(vha); case 85: ; case 18: ; case 14: ; case 122: qlt_response_pkt_all_vps(vha, (response_t *)pkt); goto ldv_66354; case 4: ; goto ldv_66354; case 51: qla24xx_abort_iocb_entry(vha, rsp->req, (struct abort_entry_24xx *)pkt); goto ldv_66354; default: ql_dbg(33554432U, vha, 20546, "Received unknown response pkt type %x entry status=%x.\n", (int )pkt->entry_type, (int )pkt->entry_status); goto ldv_66354; } ldv_66354: ((response_t *)pkt)->signature = 3735936685U; __asm__ volatile ("sfence": : : "memory"); ldv_66352: ; if ((rsp->ring_ptr)->signature != 3735936685U) { goto ldv_66368; } else { } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { reg = & (ha->iobase)->isp82; writel((unsigned int )rsp->ring_index, (void volatile *)(& reg->rsp_q_out)); } else { writel((unsigned int )rsp->ring_index, (void volatile *)rsp->rsp_q_out); } return; } } static void qla2xxx_check_risc_status(scsi_qla_host_t *vha ) { int rval ; uint32_t cnt ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; if ((((ha->device_type & 2048U) == 0U && (ha->device_type & 8192U) == 0U) && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) && ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U)) { return; } else { } rval = 0; writel(31744U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(1U, (void volatile *)(& reg->iobase_window)); cnt = 10000U; goto ldv_66379; ldv_66378: ; if (cnt != 0U) { writel(1U, (void volatile *)(& reg->iobase_window)); __const_udelay(42950UL); } else { rval = 256; } cnt = cnt - 1U; ldv_66379: tmp = readl((void const volatile *)(& reg->iobase_window)); if ((tmp & 1U) == 0U && rval == 0) { goto ldv_66378; } else { } if (rval == 0) { goto next_test; } else { } rval = 0; writel(3U, (void volatile *)(& reg->iobase_window)); cnt = 100U; goto ldv_66383; ldv_66382: ; if (cnt != 0U) { writel(3U, (void volatile *)(& reg->iobase_window)); __const_udelay(42950UL); } else { rval = 256; } cnt = cnt - 1U; ldv_66383: tmp___0 = readl((void const volatile *)(& reg->iobase_window)); if ((tmp___0 & 1U) == 0U && rval == 0) { goto ldv_66382; } else { } if (rval != 0) { goto done; } else { } next_test: tmp___1 = readl((void const volatile *)(& reg->iobase_c8)); if ((tmp___1 & 8U) != 0U) { ql_log(2U, vha, 20556, "Additional code -- 0x55AA.\n"); } else { } done: writel(0U, (void volatile *)(& reg->iobase_window)); readl((void const volatile *)(& reg->iobase_window)); return; } } irqreturn_t qla24xx_intr_handler(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; int status ; unsigned long iter ; uint32_t stat ; uint32_t hccr ; uint16_t mb[8U] ; struct rsp_que *rsp ; unsigned long flags ; int tmp ; long tmp___0 ; raw_spinlock_t *tmp___1 ; void *tmp___2 ; bool tmp___3 ; int tmp___4 ; long tmp___5 ; long tmp___6 ; unsigned long tmp___7 ; { rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20569, "%s: NULL response queue pointer.\n", "qla24xx_intr_handler"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp24; status = 0; tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (1); } else { } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); tmp___2 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___2; iter = 50UL; goto ldv_66417; ldv_66416: stat = readl((void const volatile *)(& reg->host_status)); tmp___3 = qla2x00_check_reg32_for_disconnect(vha, stat); if ((int )tmp___3) { goto ldv_66404; } else { } if ((stat & 256U) != 0U) { tmp___4 = pci_channel_offline(ha->pdev); tmp___5 = ldv__builtin_expect(tmp___4 != 0, 0L); if (tmp___5 != 0L) { goto ldv_66404; } else { } hccr = readl((void const volatile *)(& reg->hccr)); ql_log(1U, vha, 20555, "RISC paused -- HCCR=%x, Dumping firmware.\n", hccr); qla2xxx_check_risc_status(vha); (*((ha->isp_ops)->fw_dump))(vha, 1); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_66404; } else if ((stat & 32768U) == 0U) { goto ldv_66404; } else { } switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla24xx_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; goto ldv_66409; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((void const volatile *)(& reg->mailbox1)); mb[2] = readw((void const volatile *)(& reg->mailbox2)); mb[3] = readw((void const volatile *)(& reg->mailbox3)); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_66409; case 19U: ; case 20U: qla24xx_process_response_queue(vha, rsp); goto ldv_66409; case 28U: qlt_24xx_process_atio_queue(vha); goto ldv_66409; case 29U: qlt_24xx_process_atio_queue(vha); qla24xx_process_response_queue(vha, rsp); goto ldv_66409; default: ql_dbg(33554432U, vha, 20559, "Unrecognized interrupt type (%d).\n", stat * 255U); goto ldv_66409; } ldv_66409: writel(2684354560U, (void volatile *)(& reg->hccr)); __readl((void const volatile *)(& reg->hccr)); tmp___6 = ldv__builtin_expect((long )(((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) && (unsigned int )(ha->pdev)->revision == 1U), 0L); if (tmp___6 != 0L) { __const_udelay(17500UL); } else { } ldv_66417: tmp___7 = iter; iter = iter - 1UL; if (tmp___7 != 0UL) { goto ldv_66416; } else { } ldv_66404: qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } static irqreturn_t qla24xx_msix_rsp_q(int irq , void *dev_id ) { struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_24xx *reg ; struct scsi_qla_host *vha ; unsigned long flags ; uint32_t stat ; raw_spinlock_t *tmp ; void *tmp___0 ; bool tmp___1 ; { stat = 0U; rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20570, "%s: NULL response queue pointer.\n", "qla24xx_msix_rsp_q"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp24; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (struct scsi_qla_host *)tmp___0; stat = readl((void const volatile *)(& reg->host_status)); tmp___1 = qla2x00_check_reg32_for_disconnect(vha, stat); if ((int )tmp___1) { goto out; } else { } qla24xx_process_response_queue(vha, rsp); if (*((unsigned long *)ha + 2UL) == 0UL) { writel(2684354560U, (void volatile *)(& reg->hccr)); __readl((void const volatile *)(& reg->hccr)); } else { } out: spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } static irqreturn_t qla25xx_msix_rsp_q(int irq , void *dev_id ) { struct qla_hw_data *ha ; scsi_qla_host_t *vha ; struct rsp_que *rsp ; struct device_reg_24xx *reg ; unsigned long flags ; uint32_t hccr ; void *tmp ; raw_spinlock_t *tmp___0 ; bool tmp___1 ; { hccr = 0U; rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20571, "%s: NULL response queue pointer.\n", "qla25xx_msix_rsp_q"); return (0); } else { } ha = rsp->hw; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; if (*((unsigned long *)ha + 2UL) == 0UL) { reg = & (ha->iobase)->isp24; tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); writel(2684354560U, (void volatile *)(& reg->hccr)); hccr = __readl((void const volatile *)(& reg->hccr)); spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } tmp___1 = qla2x00_check_reg32_for_disconnect(vha, hccr); if ((int )tmp___1) { goto out; } else { } ldv_queue_work_on_117((int )rsp->id + -1, ha->wq, & rsp->q_work); out: ; return (1); } } static irqreturn_t qla24xx_msix_default(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_24xx *reg ; int status ; uint32_t stat ; uint32_t hccr ; uint16_t mb[8U] ; unsigned long flags ; raw_spinlock_t *tmp ; void *tmp___0 ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; { rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20572, "%s: NULL response queue pointer.\n", "qla24xx_msix_default"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp24; status = 0; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; stat = readl((void const volatile *)(& reg->host_status)); tmp___1 = qla2x00_check_reg32_for_disconnect(vha, stat); if ((int )tmp___1) { goto ldv_66465; } else { } if ((stat & 256U) != 0U) { tmp___2 = pci_channel_offline(ha->pdev); tmp___3 = ldv__builtin_expect(tmp___2 != 0, 0L); if (tmp___3 != 0L) { goto ldv_66465; } else { } hccr = readl((void const volatile *)(& reg->hccr)); ql_log(2U, vha, 20560, "RISC paused -- HCCR=%x, Dumping firmware.\n", hccr); qla2xxx_check_risc_status(vha); (*((ha->isp_ops)->fw_dump))(vha, 1); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_66465; } else if ((stat & 32768U) == 0U) { goto ldv_66465; } else { } switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla24xx_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; goto ldv_66470; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((void const volatile *)(& reg->mailbox1)); mb[2] = readw((void const volatile *)(& reg->mailbox2)); mb[3] = readw((void const volatile *)(& reg->mailbox3)); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_66470; case 19U: ; case 20U: qla24xx_process_response_queue(vha, rsp); goto ldv_66470; case 28U: qlt_24xx_process_atio_queue(vha); goto ldv_66470; case 29U: qlt_24xx_process_atio_queue(vha); qla24xx_process_response_queue(vha, rsp); goto ldv_66470; default: ql_dbg(33554432U, vha, 20561, "Unrecognized interrupt type (%d).\n", stat & 255U); goto ldv_66470; } ldv_66470: writel(2684354560U, (void volatile *)(& reg->hccr)); ldv_66465: qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } static struct qla_init_msix_entry msix_entries[3U] = { {"qla2xxx (default)", & qla24xx_msix_default}, {"qla2xxx (rsp_q)", & qla24xx_msix_rsp_q}, {"qla2xxx (multiq)", & qla25xx_msix_rsp_q}}; static struct qla_init_msix_entry qla82xx_msix_entries[2U] = { {"qla2xxx (default)", & qla82xx_msix_default}, {"qla2xxx (rsp_q)", & qla82xx_msix_rsp_q}}; static struct qla_init_msix_entry qla83xx_msix_entries[3U] = { {"qla2xxx (default)", & qla24xx_msix_default}, {"qla2xxx (rsp_q)", & qla24xx_msix_rsp_q}, {"qla2xxx (atio_q)", & qla83xx_msix_atio_q}}; static void qla24xx_disable_msix(struct qla_hw_data *ha ) { int i ; struct qla_msix_entry *qentry ; scsi_qla_host_t *vha ; void *tmp ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; i = 0; goto ldv_66490; ldv_66489: qentry = ha->msix_entries + (unsigned long )i; if (qentry->have_irq != 0) { free_irq(qentry->vector, (void *)qentry->rsp); } else { } i = i + 1; ldv_66490: ; if ((int )ha->msix_count > i) { goto ldv_66489; } else { } pci_disable_msix(ha->pdev); kfree((void const *)ha->msix_entries); ha->msix_entries = (struct qla_msix_entry *)0; ha->flags.msix_enabled = 0U; ql_dbg(1073741824U, vha, 66, "Disabled the MSI.\n"); return; } } static int qla24xx_enable_msix(struct qla_hw_data *ha , struct rsp_que *rsp ) { int i ; int ret ; struct msix_entry *entries ; struct qla_msix_entry *qentry ; scsi_qla_host_t *vha ; void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = kzalloc((unsigned long )ha->msix_count * 8UL, 208U); entries = (struct msix_entry *)tmp___0; if ((unsigned long )entries == (unsigned long )((struct msix_entry *)0)) { ql_log(1U, vha, 188, "Failed to allocate memory for msix_entry.\n"); return (-12); } else { } i = 0; goto ldv_66502; ldv_66501: (entries + (unsigned long )i)->entry = (u16 )i; i = i + 1; ldv_66502: ; if ((int )ha->msix_count > i) { goto ldv_66501; } else { } ret = pci_enable_msix_range(ha->pdev, entries, 2, (int )ha->msix_count); if (ret < 0) { ql_log(0U, vha, 199, "MSI-X: Failed to enable support, giving up -- %d/%d.\n", (int )ha->msix_count, ret); goto msix_out; } else if ((int )ha->msix_count > ret) { ql_log(1U, vha, 198, "MSI-X: Failed to enable support -- %d/%d\n Retry with %d vectors.\n", (int )ha->msix_count, ret, ret); } else { } ha->msix_count = (uint16_t )ret; ha->max_rsp_queues = (unsigned int )((uint8_t )ha->msix_count) + 255U; tmp___1 = kzalloc((unsigned long )ha->msix_count * 24UL, 208U); ha->msix_entries = (struct qla_msix_entry *)tmp___1; if ((unsigned long )ha->msix_entries == (unsigned long )((struct qla_msix_entry *)0)) { ql_log(0U, vha, 200, "Failed to allocate memory for ha->msix_entries.\n"); ret = -12; goto msix_out; } else { } ha->flags.msix_enabled = 1U; i = 0; goto ldv_66506; ldv_66505: qentry = ha->msix_entries + (unsigned long )i; qentry->vector = (entries + (unsigned long )i)->vector; qentry->entry = (entries + (unsigned long )i)->entry; qentry->have_irq = 0; qentry->rsp = (struct rsp_que *)0; i = i + 1; ldv_66506: ; if ((int )ha->msix_count > i) { goto ldv_66505; } else { } i = 0; goto ldv_66510; ldv_66509: qentry = ha->msix_entries + (unsigned long )i; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, 0UL, qla82xx_msix_entries[i].name, (void *)rsp); } else { ret = request_irq(qentry->vector, msix_entries[i].handler, 0UL, msix_entries[i].name, (void *)rsp); } if (ret != 0) { goto msix_register_fail; } else { } qentry->have_irq = 1; qentry->rsp = rsp; rsp->msix = qentry; i = i + 1; ldv_66510: ; if (i <= 1) { goto ldv_66509; } else { } if (ql2x_ini_mode != 2 && ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { qentry = ha->msix_entries + 2UL; ret = request_irq(qentry->vector, qla83xx_msix_entries[2].handler, 0UL, qla83xx_msix_entries[2].name, (void *)rsp); qentry->have_irq = 1; qentry->rsp = rsp; rsp->msix = qentry; } else { } msix_register_fail: ; if (ret != 0) { ql_log(0U, vha, 203, "MSI-X: unable to register handler -- %x/%d.\n", qentry->vector, ret); qla24xx_disable_msix(ha); ha->mqenable = 0U; goto msix_out; } else { } if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { if (((unsigned long )ha->msixbase != (unsigned long )((device_reg_t *)0) && (unsigned long )ha->mqiobase != (unsigned long )((device_reg_t *)0)) && ((unsigned int )ha->max_rsp_queues > 1U || (unsigned int )ha->max_req_queues > 1U)) { ha->mqenable = 1U; } else { } } else if ((unsigned long )ha->mqiobase != (unsigned long )((device_reg_t *)0) && ((unsigned int )ha->max_rsp_queues > 1U || (unsigned int )ha->max_req_queues > 1U)) { ha->mqenable = 1U; } else { } ql_dbg(1048576U, vha, 49157, "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", ha->mqiobase, (int )ha->max_rsp_queues, (int )ha->max_req_queues); ql_dbg(1073741824U, vha, 85, "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", ha->mqiobase, (int )ha->max_rsp_queues, (int )ha->max_req_queues); msix_out: kfree((void const *)entries); return (ret); } } int qla2x00_request_irqs(struct qla_hw_data *ha , struct rsp_que *rsp ) { int ret ; device_reg_t *reg ; scsi_qla_host_t *vha ; void *tmp ; { ret = 258; reg = ha->iobase; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; if (((((((ha->device_type & 256U) == 0U && (ha->device_type & 2048U) == 0U) && (ha->device_type & 4096U) == 0U) && ((((ha->device_type & 8192U) == 0U && (ha->device_type & 16384U) == 0U) && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U)) && (ha->device_type & 32768U) == 0U) && (ha->device_type & 131072U) == 0U) && ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U)) { goto skip_msi; } else { } if ((unsigned int )(ha->pdev)->subsystem_vendor == 4156U && (((unsigned int )(ha->pdev)->subsystem_device == 28736U || (unsigned int )(ha->pdev)->subsystem_device == 28737U) || (unsigned int )(ha->pdev)->subsystem_device == 5893U)) { ql_log(1U, vha, 52, "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", (int )(ha->pdev)->subsystem_vendor, (int )(ha->pdev)->subsystem_device); goto skip_msi; } else { } if ((ha->device_type & 256U) != 0U && (unsigned int )(ha->pdev)->revision <= 2U) { ql_log(1U, vha, 53, "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", (int )(ha->pdev)->revision, 3); goto skip_msix; } else { } ret = qla24xx_enable_msix(ha, rsp); if (ret == 0) { ql_dbg(1073741824U, vha, 54, "MSI-X: Enabled (0x%X, 0x%X).\n", (int )ha->chip_revision, (int )ha->fw_attributes); goto clear_risc_ints; } else { } skip_msix: ql_log(2U, vha, 55, "Falling back-to MSI mode -%d.\n", ret); if ((((((((ha->device_type & 128U) == 0U && (ha->device_type & 256U) == 0U) && (ha->device_type & 2048U) == 0U) && (ha->device_type & 4096U) == 0U) && (ha->device_type & 8192U) == 0U) && ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U)) && (ha->device_type & 131072U) == 0U) && ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U)) { goto skip_msi; } else { } ret = pci_enable_msi_exact(ha->pdev, 1); if (ret == 0) { ql_dbg(1073741824U, vha, 56, "MSI: Enabled.\n"); ha->flags.msi_enabled = 1U; } else { ql_log(1U, vha, 57, "Falling back-to INTa mode -- %d.\n", ret); } skip_msi: ; if (*((unsigned long *)ha + 2UL) == 0UL && (ha->device_type & 16384U) != 0U) { return (258); } else { } ret = request_irq((ha->pdev)->irq, (ha->isp_ops)->intr_handler, *((unsigned long *)ha + 2UL) != 0UL ? 0UL : 128UL, "qla2xxx", (void *)rsp); if (ret != 0) { ql_log(1U, vha, 58, "Failed to reserve interrupt %d already in use.\n", (ha->pdev)->irq); goto fail; } else if (*((unsigned long *)ha + 2UL) == 0UL) { ql_dbg(1073741824U, vha, 293, "INTa mode: Enabled.\n"); ha->flags.mr_intr_valid = 1U; } else { } clear_risc_ints: ; if ((ha->device_type & 134217728U) != 0U || (ha->device_type & 131072U) != 0U) { goto fail; } else { } spin_lock_irq(& ha->hardware_lock); writew(0, (void volatile *)(& reg->isp.semaphore)); spin_unlock_irq(& ha->hardware_lock); fail: ; return (ret); } } void qla2x00_free_irqs(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct rsp_que *rsp ; { ha = vha->hw; if ((unsigned long )ha->rsp_q_map == (unsigned long )((struct rsp_que **)0) || (unsigned long )*(ha->rsp_q_map) == (unsigned long )((struct rsp_que *)0)) { return; } else { } rsp = *(ha->rsp_q_map); if (*((unsigned long *)ha + 2UL) != 0UL) { qla24xx_disable_msix(ha); } else if (*((unsigned long *)ha + 2UL) != 0UL) { free_irq((ha->pdev)->irq, (void *)rsp); pci_disable_msi(ha->pdev); } else { free_irq((ha->pdev)->irq, (void *)rsp); } return; } } int qla25xx_request_irq(struct rsp_que *rsp ) { struct qla_hw_data *ha ; struct qla_init_msix_entry *intr ; struct qla_msix_entry *msix ; scsi_qla_host_t *vha ; void *tmp ; int ret ; { ha = rsp->hw; intr = (struct qla_init_msix_entry *)(& msix_entries) + 2UL; msix = rsp->msix; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ret = request_irq(msix->vector, intr->handler, 0UL, intr->name, (void *)rsp); if (ret != 0) { ql_log(0U, vha, 230, "MSI-X: Unable to register handler -- %x/%d.\n", msix->vector, ret); return (ret); } else { } msix->have_irq = 1; msix->rsp = rsp; return (ret); } } int reg_timer_18(struct timer_list *timer ) { { ldv_timer_list_18 = timer; ldv_timer_state_18 = 1; return (0); } } void activate_pending_timer_18(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_18 == (unsigned long )timer) { if (ldv_timer_state_18 == 2 || pending_flag != 0) { ldv_timer_list_18 = timer; ldv_timer_list_18->data = data; ldv_timer_state_18 = 1; } else { } return; } else { } reg_timer_18(timer); ldv_timer_list_18->data = data; return; } } void choose_timer_18(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_18 = 2; return; } } void disable_suitable_timer_18(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_18) { ldv_timer_state_18 = 0; return; } else { } return; } } bool ldv_queue_work_on_111(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_112(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_113(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_114(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_115(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_116(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } bool ldv_queue_work_on_117(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___14 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_work_on_129(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_131(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_130(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_133(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_132(struct workqueue_struct *ldv_func_arg1 ) ; void activate_pending_timer_19(struct timer_list *timer , unsigned long data , int pending_flag ) ; void choose_timer_19(struct timer_list *timer ) ; int reg_timer_19(struct timer_list *timer ) ; void disable_suitable_timer_19(struct timer_list *timer ) ; int ldv_scsi_add_host_with_dma_134(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; void qla2x00_get_sym_node_name(scsi_qla_host_t *vha , uint8_t *snn , size_t size ) ; void qlt_rff_id(struct scsi_qla_host *vha , struct ct_sns_req *ct_req ) ; __inline static struct new_utsname *utsname(void) { struct task_struct *tmp ; { tmp = get_current(); return (& ((tmp->nsproxy)->uts_ns)->name); } } static int qla2x00_sns_ga_nxt(scsi_qla_host_t *vha , fc_port_t *fcport ) ; static int qla2x00_sns_gid_pt(scsi_qla_host_t *vha , sw_info_t *list ) ; static int qla2x00_sns_gpn_id(scsi_qla_host_t *vha , sw_info_t *list ) ; static int qla2x00_sns_gnn_id(scsi_qla_host_t *vha , sw_info_t *list ) ; static int qla2x00_sns_rft_id(scsi_qla_host_t *vha ) ; static int qla2x00_sns_rnn_id(scsi_qla_host_t *vha ) ; void *qla2x00_prep_ms_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) { struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; { ha = vha->hw; ms_pkt = ha->ms_iocb; memset((void *)ms_pkt, 0, 64UL); ms_pkt->entry_type = 41U; ms_pkt->entry_count = 1U; if ((int )ha->device_type < 0) { ms_pkt->loop_id.extended = 128U; } else { ms_pkt->loop_id.id.standard = 128U; } ms_pkt->control_flags = 34U; ms_pkt->timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; ms_pkt->cmd_dsd_count = 1U; ms_pkt->total_dsd_count = 2U; ms_pkt->rsp_bytecount = rsp_size; ms_pkt->req_bytecount = req_size; ms_pkt->dseg_req_address[0] = (unsigned int )ha->ct_sns_dma; ms_pkt->dseg_req_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ms_pkt->dseg_req_length = ms_pkt->req_bytecount; ms_pkt->dseg_rsp_address[0] = (unsigned int )ha->ct_sns_dma; ms_pkt->dseg_rsp_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount; vha->qla_stats.control_requests = vha->qla_stats.control_requests + 1U; return ((void *)ms_pkt); } } void *qla24xx_prep_ms_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) { struct qla_hw_data *ha ; struct ct_entry_24xx *ct_pkt ; { ha = vha->hw; ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; memset((void *)ct_pkt, 0, 64UL); ct_pkt->entry_type = 41U; ct_pkt->entry_count = 1U; ct_pkt->nport_handle = 2044U; ct_pkt->timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; ct_pkt->cmd_dsd_count = 1U; ct_pkt->rsp_dsd_count = 1U; ct_pkt->rsp_byte_count = rsp_size; ct_pkt->cmd_byte_count = req_size; ct_pkt->dseg_0_address[0] = (unsigned int )ha->ct_sns_dma; ct_pkt->dseg_0_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; ct_pkt->dseg_1_address[0] = (unsigned int )ha->ct_sns_dma; ct_pkt->dseg_1_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; ct_pkt->vp_index = (uint8_t )vha->vp_idx; vha->qla_stats.control_requests = vha->qla_stats.control_requests + 1U; return ((void *)ct_pkt); } } __inline static struct ct_sns_req *qla2x00_prep_ct_req(struct ct_sns_pkt *p , uint16_t cmd , uint16_t rsp_size ) { __u16 tmp ; __u16 tmp___0 ; { memset((void *)p, 0, 8208UL); p->p.req.header.revision = 1U; p->p.req.header.gs_type = 252U; p->p.req.header.gs_subtype = 2U; tmp = __fswab16((int )cmd); p->p.req.command = tmp; tmp___0 = __fswab16((int )((__u16 )(((int )rsp_size + -16) / 4))); p->p.req.max_rsp_size = tmp___0; return (& p->p.req); } } static int qla2x00_chk_ms_status(scsi_qla_host_t *vha , ms_iocb_entry_t *ms_pkt , struct ct_sns_rsp *ct_rsp , char const *routine ) { int rval ; uint16_t comp_status ; struct qla_hw_data *ha ; { ha = vha->hw; rval = 258; if ((unsigned int )ms_pkt->entry_status != 0U) { ql_dbg(268435456U, vha, 8241, "%s failed, error status (%x) on port_id: %02x%02x%02x.\n", routine, (int )ms_pkt->entry_status, (int )vha->d_id.b.domain, (int )vha->d_id.b.area, (int )vha->d_id.b.al_pa); } else { if ((ha->device_type & 134217728U) != 0U) { comp_status = ((struct ct_entry_24xx *)ms_pkt)->comp_status; } else { comp_status = ms_pkt->status; } switch ((int )comp_status) { case 0: ; case 21: ; case 7: ; if ((unsigned int )ct_rsp->header.response != 640U) { ql_dbg(268566528U, vha, 8311, "%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n", routine, (int )vha->d_id.b.domain, (int )vha->d_id.b.area, (int )vha->d_id.b.al_pa, (int )comp_status, (int )ct_rsp->header.response); ql_dump_buffer(268566528U, vha, 8312, (uint8_t *)(& ct_rsp->header), 16U); rval = 1; } else { rval = 0; } goto ldv_65850; default: ql_dbg(268435456U, vha, 8243, "%s failed, completion status (%x) on port_id: %02x%02x%02x.\n", routine, (int )comp_status, (int )vha->d_id.b.domain, (int )vha->d_id.b.area, (int )vha->d_id.b.al_pa); goto ldv_65850; } ldv_65850: ; } return (rval); } } int qla2x00_ga_nxt(scsi_qla_host_t *vha , fc_port_t *fcport ) { int rval ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; struct qla_hw_data *ha ; int tmp ; void *tmp___0 ; int tmp___1 ; { ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp = qla2x00_sns_ga_nxt(vha, fcport); return (tmp); } else { } tmp___0 = (*((ha->isp_ops)->prep_ms_iocb))(vha, 20U, 636U); ms_pkt = (ms_iocb_entry_t *)tmp___0; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 256, 636); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain; ct_req->req.port_id.port_id[1] = fcport->d_id.b.area; ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8290, "GA_NXT issue IOCB failed (%d).\n", rval); } else { tmp___1 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT"); if (tmp___1 != 0) { rval = 258; } else { fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0]; fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1]; fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2]; memcpy((void *)(& fcport->node_name), (void const *)(& ct_rsp->rsp.ga_nxt.node_name), 8UL); memcpy((void *)(& fcport->port_name), (void const *)(& ct_rsp->rsp.ga_nxt.port_name), 8UL); fcport->fc4_type = (int )ct_rsp->rsp.ga_nxt.fc4_types[2] & 1 ? 8U : 0U; if ((unsigned int )ct_rsp->rsp.ga_nxt.port_type != 1U && (unsigned int )ct_rsp->rsp.ga_nxt.port_type != 2U) { fcport->d_id.b.domain = 240U; } else { } ql_dbg(268435456U, vha, 8291, "GA_NXT entry - nn %8phN pn %8phN port_id=%02x%02x%02x.\n", (uint8_t *)(& fcport->node_name), (uint8_t *)(& fcport->port_name), (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } } return (rval); } } __inline static int qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha ) { { return (((int )(vha->hw)->max_fibre_devices + 4) * 4); } } int qla2x00_gid_pt(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; uint16_t i ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; struct ct_sns_gid_pt_data *gid_data ; struct qla_hw_data *ha ; uint16_t gid_pt_rsp_size ; int tmp ; int tmp___0 ; void *tmp___1 ; int tmp___2 ; { ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp = qla2x00_sns_gid_pt(vha, list); return (tmp); } else { } gid_data = (struct ct_sns_gid_pt_data *)0; tmp___0 = qla2x00_gid_pt_rsp_size(vha); gid_pt_rsp_size = (uint16_t )tmp___0; tmp___1 = (*((ha->isp_ops)->prep_ms_iocb))(vha, 20U, (uint32_t )gid_pt_rsp_size); ms_pkt = (ms_iocb_entry_t *)tmp___1; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 417, (int )gid_pt_rsp_size); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.gid_pt.port_type = 127U; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8277, "GID_PT issue IOCB failed (%d).\n", rval); } else { tmp___2 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT"); if (tmp___2 != 0) { rval = 258; } else { i = 0U; goto ldv_65878; ldv_65877: gid_data = (struct ct_sns_gid_pt_data *)(& ct_rsp->rsp.gid_pt.entries) + (unsigned long )i; (list + (unsigned long )i)->d_id.b.domain = gid_data->port_id[0]; (list + (unsigned long )i)->d_id.b.area = gid_data->port_id[1]; (list + (unsigned long )i)->d_id.b.al_pa = gid_data->port_id[2]; memset((void *)(& (list + (unsigned long )i)->fabric_port_name), 0, 8UL); (list + (unsigned long )i)->fp_speed = 65535U; if ((int )((signed char )gid_data->control_byte) < 0) { (list + (unsigned long )i)->d_id.b.rsvd_1 = gid_data->control_byte; goto ldv_65876; } else { } i = (uint16_t )((int )i + 1); ldv_65878: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_65877; } else { } ldv_65876: ; if ((int )ha->max_fibre_devices == (int )i) { rval = 258; } else { } } } return (rval); } } int qla2x00_gpn_id(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; uint16_t i ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; struct qla_hw_data *ha ; int tmp ; void *tmp___0 ; int tmp___1 ; { rval = 0; ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp = qla2x00_sns_gpn_id(vha, list); return (tmp); } else { } i = 0U; goto ldv_65891; ldv_65890: tmp___0 = (*((ha->isp_ops)->prep_ms_iocb))(vha, 20U, 24U); ms_pkt = (ms_iocb_entry_t *)tmp___0; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 274, 24); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.port_id.port_id[0] = (list + (unsigned long )i)->d_id.b.domain; ct_req->req.port_id.port_id[1] = (list + (unsigned long )i)->d_id.b.area; ct_req->req.port_id.port_id[2] = (list + (unsigned long )i)->d_id.b.al_pa; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8278, "GPN_ID issue IOCB failed (%d).\n", rval); goto ldv_65889; } else { tmp___1 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GPN_ID"); if (tmp___1 != 0) { rval = 258; goto ldv_65889; } else { memcpy((void *)(& (list + (unsigned long )i)->port_name), (void const *)(& ct_rsp->rsp.gpn_id.port_name), 8UL); } } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_65889; } else { } i = (uint16_t )((int )i + 1); ldv_65891: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_65890; } else { } ldv_65889: ; return (rval); } } int qla2x00_gnn_id(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; uint16_t i ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; int tmp ; void *tmp___0 ; int tmp___1 ; { rval = 0; ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp = qla2x00_sns_gnn_id(vha, list); return (tmp); } else { } i = 0U; goto ldv_65904; ldv_65903: tmp___0 = (*((ha->isp_ops)->prep_ms_iocb))(vha, 20U, 24U); ms_pkt = (ms_iocb_entry_t *)tmp___0; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 275, 24); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.port_id.port_id[0] = (list + (unsigned long )i)->d_id.b.domain; ct_req->req.port_id.port_id[1] = (list + (unsigned long )i)->d_id.b.area; ct_req->req.port_id.port_id[2] = (list + (unsigned long )i)->d_id.b.al_pa; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8279, "GNN_ID issue IOCB failed (%d).\n", rval); goto ldv_65902; } else { tmp___1 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GNN_ID"); if (tmp___1 != 0) { rval = 258; goto ldv_65902; } else { memcpy((void *)(& (list + (unsigned long )i)->node_name), (void const *)(& ct_rsp->rsp.gnn_id.node_name), 8UL); ql_dbg(268435456U, vha, 8280, "GID_PT entry - nn %8phN pn %8phN portid=%02x%02x%02x.\n", (uint8_t *)(& (list + (unsigned long )i)->node_name), (uint8_t *)(& (list + (unsigned long )i)->port_name), (int )(list + (unsigned long )i)->d_id.b.domain, (int )(list + (unsigned long )i)->d_id.b.area, (int )(list + (unsigned long )i)->d_id.b.al_pa); } } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_65902; } else { } i = (uint16_t )((int )i + 1); ldv_65904: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_65903; } else { } ldv_65902: ; return (rval); } } int qla2x00_rft_id(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; int tmp ; void *tmp___0 ; int tmp___1 ; { ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp = qla2x00_sns_rft_id(vha); return (tmp); } else { } tmp___0 = (*((ha->isp_ops)->prep_ms_iocb))(vha, 52U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp___0; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 535, 16); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain; ct_req->req.rft_id.port_id[1] = vha->d_id.b.area; ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa; ct_req->req.rft_id.fc4_types[2] = 1U; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8259, "RFT_ID issue IOCB failed (%d).\n", rval); } else { tmp___1 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID"); if (tmp___1 != 0) { rval = 258; } else { ql_dbg(268435456U, vha, 8260, "RFT_ID exiting normally.\n"); } } return (rval); } } int qla2x00_rff_id(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *tmp ; int tmp___0 ; { ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { ql_dbg(268435456U, vha, 8262, "RFF_ID call not supported on ISP2100/ISP2200.\n"); return (0); } else { } tmp = (*((ha->isp_ops)->prep_ms_iocb))(vha, 24U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 543, 16); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain; ct_req->req.rff_id.port_id[1] = vha->d_id.b.area; ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa; qlt_rff_id(vha, ct_req); ct_req->req.rff_id.fc4_type = 8U; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8263, "RFF_ID issue IOCB failed (%d).\n", rval); } else { tmp___0 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID"); if (tmp___0 != 0) { rval = 258; } else { ql_dbg(268435456U, vha, 8264, "RFF_ID exiting normally.\n"); } } return (rval); } } int qla2x00_rnn_id(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; int tmp ; void *tmp___0 ; int tmp___1 ; { ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp = qla2x00_sns_rnn_id(vha); return (tmp); } else { } tmp___0 = (*((ha->isp_ops)->prep_ms_iocb))(vha, 28U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp___0; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 531, 16); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain; ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area; ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa; memcpy((void *)(& ct_req->req.rnn_id.node_name), (void const *)(& vha->node_name), 8UL); rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8269, "RNN_ID issue IOCB failed (%d).\n", rval); } else { tmp___1 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID"); if (tmp___1 != 0) { rval = 258; } else { ql_dbg(268435456U, vha, 8270, "RNN_ID exiting normally.\n"); } } return (rval); } } void qla2x00_get_sym_node_name(scsi_qla_host_t *vha , uint8_t *snn , size_t size ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((ha->device_type & 131072U) != 0U) { snprintf((char *)snn, size, "%s FW:v%s DVR:v%s", (uint8_t *)(& ha->model_number), (uint8_t *)(& ha->mr.fw_version), (char *)(& qla2x00_version_str)); } else { snprintf((char *)snn, size, "%s FW:v%d.%02d.%02d DVR:v%s", (uint8_t *)(& ha->model_number), (int )ha->fw_major_version, (int )ha->fw_minor_version, (int )ha->fw_subminor_version, (char *)(& qla2x00_version_str)); } return; } } int qla2x00_rsnn_nn(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *tmp ; size_t tmp___0 ; int tmp___1 ; { ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { ql_dbg(268435456U, vha, 8272, "RSNN_ID call unsupported on ISP2100/ISP2200.\n"); return (0); } else { } tmp = (*((ha->isp_ops)->prep_ms_iocb))(vha, 0U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 569, 16); ct_rsp = & (ha->ct_sns)->p.rsp; memcpy((void *)(& ct_req->req.rsnn_nn.node_name), (void const *)(& vha->node_name), 8UL); qla2x00_get_sym_node_name(vha, (uint8_t *)(& ct_req->req.rsnn_nn.sym_node_name), 255UL); tmp___0 = strlen((char const *)(& ct_req->req.rsnn_nn.sym_node_name)); ct_req->req.rsnn_nn.name_len = (unsigned char )tmp___0; ms_pkt->req_bytecount = (unsigned int )((int )ct_req->req.rsnn_nn.name_len + 25); ms_pkt->dseg_req_length = ms_pkt->req_bytecount; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8273, "RSNN_NN issue IOCB failed (%d).\n", rval); } else { tmp___1 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN"); if (tmp___1 != 0) { rval = 258; } else { ql_dbg(268435456U, vha, 8274, "RSNN_NN exiting normally.\n"); } } return (rval); } } __inline static struct sns_cmd_pkt *qla2x00_prep_sns_cmd(scsi_qla_host_t *vha , uint16_t cmd , uint16_t scmd_len , uint16_t data_size ) { uint16_t wc ; struct sns_cmd_pkt *sns_cmd ; struct qla_hw_data *ha ; { ha = vha->hw; sns_cmd = ha->sns_cmd; memset((void *)sns_cmd, 0, 2064UL); wc = (uint16_t )((unsigned int )data_size / 2U); sns_cmd->p.cmd.buffer_length = wc; sns_cmd->p.cmd.buffer_address[0] = (unsigned int )ha->sns_cmd_dma; sns_cmd->p.cmd.buffer_address[1] = (unsigned int )(ha->sns_cmd_dma >> 32ULL); sns_cmd->p.cmd.subcommand_length = scmd_len; sns_cmd->p.cmd.subcommand = cmd; wc = (uint16_t )(((int )data_size + -16) / 4); sns_cmd->p.cmd.size = wc; vha->qla_stats.control_requests = vha->qla_stats.control_requests + 1U; return (sns_cmd); } } static int qla2x00_sns_ga_nxt(scsi_qla_host_t *vha , fc_port_t *fcport ) { int rval ; struct qla_hw_data *ha ; struct sns_cmd_pkt *sns_cmd ; { rval = 0; ha = vha->hw; sns_cmd = qla2x00_prep_sns_cmd(vha, 256, 6, 636); sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa; sns_cmd->p.cmd.param[1] = fcport->d_id.b.area; sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain; rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 14, 2064UL); if (rval != 0) { ql_dbg(268435456U, vha, 8287, "GA_NXT Send SNS failed (%d).\n", rval); } else if ((unsigned int )sns_cmd->p.gan_data[8] != 128U || (unsigned int )sns_cmd->p.gan_data[9] != 2U) { ql_dbg(268566528U, vha, 8324, "GA_NXT failed, rejected request ga_nxt_rsp:\n"); ql_dump_buffer(268566528U, vha, 8308, (uint8_t *)(& sns_cmd->p.gan_data), 16U); rval = 258; } else { fcport->d_id.b.domain = sns_cmd->p.gan_data[17]; fcport->d_id.b.area = sns_cmd->p.gan_data[18]; fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19]; memcpy((void *)(& fcport->node_name), (void const *)(& sns_cmd->p.gan_data) + 284U, 8UL); memcpy((void *)(& fcport->port_name), (void const *)(& sns_cmd->p.gan_data) + 20U, 8UL); if ((unsigned int )sns_cmd->p.gan_data[16] != 1U && (unsigned int )sns_cmd->p.gan_data[16] != 2U) { fcport->d_id.b.domain = 240U; } else { } ql_dbg(268435456U, vha, 8289, "GA_NXT entry - nn %8phN pn %8phN port_id=%02x%02x%02x.\n", (uint8_t *)(& fcport->node_name), (uint8_t *)(& fcport->port_name), (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } return (rval); } } static int qla2x00_sns_gid_pt(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; struct qla_hw_data *ha ; uint16_t i ; uint8_t *entry ; struct sns_cmd_pkt *sns_cmd ; uint16_t gid_pt_sns_data_size ; int tmp ; { ha = vha->hw; tmp = qla2x00_gid_pt_rsp_size(vha); gid_pt_sns_data_size = (uint16_t )tmp; sns_cmd = qla2x00_prep_sns_cmd(vha, 417, 6, (int )gid_pt_sns_data_size); sns_cmd->p.cmd.param[0] = 127U; rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 14, 2064UL); if (rval != 0) { ql_dbg(268435456U, vha, 8301, "GID_PT Send SNS failed (%d).\n", rval); } else if ((unsigned int )sns_cmd->p.gid_data[8] != 128U || (unsigned int )sns_cmd->p.gid_data[9] != 2U) { ql_dbg(268435456U, vha, 8239, "GID_PT failed, rejected request, gid_rsp:\n"); ql_dump_buffer(268566528U, vha, 8321, (uint8_t *)(& sns_cmd->p.gid_data), 16U); rval = 258; } else { i = 0U; goto ldv_65971; ldv_65970: entry = (uint8_t *)(& sns_cmd->p.gid_data) + ((unsigned long )((int )i * 4) + 16UL); (list + (unsigned long )i)->d_id.b.domain = *(entry + 1UL); (list + (unsigned long )i)->d_id.b.area = *(entry + 2UL); (list + (unsigned long )i)->d_id.b.al_pa = *(entry + 3UL); if ((int )((signed char )*entry) < 0) { (list + (unsigned long )i)->d_id.b.rsvd_1 = *entry; goto ldv_65969; } else { } i = (uint16_t )((int )i + 1); ldv_65971: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_65970; } else { } ldv_65969: ; if ((int )ha->max_fibre_devices == (int )i) { rval = 258; } else { } } return (rval); } } static int qla2x00_sns_gpn_id(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; struct qla_hw_data *ha ; uint16_t i ; struct sns_cmd_pkt *sns_cmd ; { rval = 0; ha = vha->hw; i = 0U; goto ldv_65982; ldv_65981: sns_cmd = qla2x00_prep_sns_cmd(vha, 274, 6, 24); sns_cmd->p.cmd.param[0] = (list + (unsigned long )i)->d_id.b.al_pa; sns_cmd->p.cmd.param[1] = (list + (unsigned long )i)->d_id.b.area; sns_cmd->p.cmd.param[2] = (list + (unsigned long )i)->d_id.b.domain; rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 14, 2064UL); if (rval != 0) { ql_dbg(268435456U, vha, 8242, "GPN_ID Send SNS failed (%d).\n", rval); } else if ((unsigned int )sns_cmd->p.gpn_data[8] != 128U || (unsigned int )sns_cmd->p.gpn_data[9] != 2U) { ql_dbg(268566528U, vha, 8318, "GPN_ID failed, rejected request, gpn_rsp:\n"); ql_dump_buffer(268566528U, vha, 8319, (uint8_t *)(& sns_cmd->p.gpn_data), 16U); rval = 258; } else { memcpy((void *)(& (list + (unsigned long )i)->port_name), (void const *)(& sns_cmd->p.gpn_data) + 16U, 8UL); } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_65980; } else { } i = (uint16_t )((int )i + 1); ldv_65982: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_65981; } else { } ldv_65980: ; return (rval); } } static int qla2x00_sns_gnn_id(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; struct qla_hw_data *ha ; uint16_t i ; struct sns_cmd_pkt *sns_cmd ; { rval = 0; ha = vha->hw; i = 0U; goto ldv_65993; ldv_65992: sns_cmd = qla2x00_prep_sns_cmd(vha, 275, 6, 24); sns_cmd->p.cmd.param[0] = (list + (unsigned long )i)->d_id.b.al_pa; sns_cmd->p.cmd.param[1] = (list + (unsigned long )i)->d_id.b.area; sns_cmd->p.cmd.param[2] = (list + (unsigned long )i)->d_id.b.domain; rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 14, 2064UL); if (rval != 0) { ql_dbg(268435456U, vha, 8255, "GNN_ID Send SNS failed (%d).\n", rval); } else if ((unsigned int )sns_cmd->p.gnn_data[8] != 128U || (unsigned int )sns_cmd->p.gnn_data[9] != 2U) { ql_dbg(268566528U, vha, 8322, "GNN_ID failed, rejected request, gnn_rsp:\n"); ql_dump_buffer(268566528U, vha, 8314, (uint8_t *)(& sns_cmd->p.gnn_data), 16U); rval = 258; } else { memcpy((void *)(& (list + (unsigned long )i)->node_name), (void const *)(& sns_cmd->p.gnn_data) + 16U, 8UL); ql_dbg(268435456U, vha, 8302, "GID_PT entry - nn %8phN pn %8phN port_id=%02x%02x%02x.\n", (uint8_t *)(& (list + (unsigned long )i)->node_name), (uint8_t *)(& (list + (unsigned long )i)->port_name), (int )(list + (unsigned long )i)->d_id.b.domain, (int )(list + (unsigned long )i)->d_id.b.area, (int )(list + (unsigned long )i)->d_id.b.al_pa); } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_65991; } else { } i = (uint16_t )((int )i + 1); ldv_65993: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_65992; } else { } ldv_65991: ; return (rval); } } static int qla2x00_sns_rft_id(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; struct sns_cmd_pkt *sns_cmd ; { ha = vha->hw; sns_cmd = qla2x00_prep_sns_cmd(vha, 535, 22, 16); sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; sns_cmd->p.cmd.param[1] = vha->d_id.b.area; sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; sns_cmd->p.cmd.param[5] = 1U; rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 30, 2064UL); if (rval != 0) { ql_dbg(268435456U, vha, 8288, "RFT_ID Send SNS failed (%d).\n", rval); } else if ((unsigned int )sns_cmd->p.rft_data[8] != 128U || (unsigned int )sns_cmd->p.rft_data[9] != 2U) { ql_dbg(268566528U, vha, 8323, "RFT_ID failed, rejected request rft_rsp:\n"); ql_dump_buffer(268566528U, vha, 8320, (uint8_t *)(& sns_cmd->p.rft_data), 16U); rval = 258; } else { ql_dbg(268435456U, vha, 8307, "RFT_ID exiting normally.\n"); } return (rval); } } static int qla2x00_sns_rnn_id(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; struct sns_cmd_pkt *sns_cmd ; { ha = vha->hw; sns_cmd = qla2x00_prep_sns_cmd(vha, 531, 10, 16); sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; sns_cmd->p.cmd.param[1] = vha->d_id.b.area; sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; sns_cmd->p.cmd.param[4] = vha->node_name[7]; sns_cmd->p.cmd.param[5] = vha->node_name[6]; sns_cmd->p.cmd.param[6] = vha->node_name[5]; sns_cmd->p.cmd.param[7] = vha->node_name[4]; sns_cmd->p.cmd.param[8] = vha->node_name[3]; sns_cmd->p.cmd.param[9] = vha->node_name[2]; sns_cmd->p.cmd.param[10] = vha->node_name[1]; sns_cmd->p.cmd.param[11] = vha->node_name[0]; rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 18, 2064UL); if (rval != 0) { ql_dbg(268435456U, vha, 8266, "RNN_ID Send SNS failed (%d).\n", rval); } else if ((unsigned int )sns_cmd->p.rnn_data[8] != 128U || (unsigned int )sns_cmd->p.rnn_data[9] != 2U) { ql_dbg(268566528U, vha, 8315, "RNN_ID failed, rejected request, rnn_rsp:\n"); ql_dump_buffer(268566528U, vha, 8316, (uint8_t *)(& sns_cmd->p.rnn_data), 16U); rval = 258; } else { ql_dbg(268435456U, vha, 8268, "RNN_ID exiting normally.\n"); } return (rval); } } static int qla2x00_mgmt_svr_login(scsi_qla_host_t *vha ) { int ret ; int rval ; uint16_t mb[32U] ; struct qla_hw_data *ha ; { ha = vha->hw; ret = 0; if (*((unsigned long *)vha + 19UL) != 0UL) { return (ret); } else { } rval = (*((ha->isp_ops)->fabric_login))(vha, (int )vha->mgmt_svr_loop_id, 255, 255, 250, (uint16_t *)(& mb), 2); if (rval != 0 || (unsigned int )mb[0] != 16384U) { if (rval == 259) { ql_dbg(268435456U, vha, 8325, "Failed management_server login: loopid=%x rval=%d\n", (int )vha->mgmt_svr_loop_id, rval); } else { ql_dbg(268435456U, vha, 8228, "Failed management_server login: loopid=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", (int )vha->mgmt_svr_loop_id, (int )mb[0], (int )mb[1], (int )mb[2], (int )mb[6], (int )mb[7]); } ret = 258; } else { vha->flags.management_server_logged_in = 1U; } return (ret); } } void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) { ms_iocb_entry_t *ms_pkt ; struct qla_hw_data *ha ; { ha = vha->hw; ms_pkt = ha->ms_iocb; memset((void *)ms_pkt, 0, 64UL); ms_pkt->entry_type = 41U; ms_pkt->entry_count = 1U; if ((int )ha->device_type < 0) { ms_pkt->loop_id.extended = vha->mgmt_svr_loop_id; } else { ms_pkt->loop_id.id.standard = (unsigned char )vha->mgmt_svr_loop_id; } ms_pkt->control_flags = 34U; ms_pkt->timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; ms_pkt->cmd_dsd_count = 1U; ms_pkt->total_dsd_count = 2U; ms_pkt->rsp_bytecount = rsp_size; ms_pkt->req_bytecount = req_size; ms_pkt->dseg_req_address[0] = (unsigned int )ha->ct_sns_dma; ms_pkt->dseg_req_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ms_pkt->dseg_req_length = ms_pkt->req_bytecount; ms_pkt->dseg_rsp_address[0] = (unsigned int )ha->ct_sns_dma; ms_pkt->dseg_rsp_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount; return ((void *)ms_pkt); } } void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) { struct ct_entry_24xx *ct_pkt ; struct qla_hw_data *ha ; { ha = vha->hw; ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; memset((void *)ct_pkt, 0, 64UL); ct_pkt->entry_type = 41U; ct_pkt->entry_count = 1U; ct_pkt->nport_handle = vha->mgmt_svr_loop_id; ct_pkt->timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; ct_pkt->cmd_dsd_count = 1U; ct_pkt->rsp_dsd_count = 1U; ct_pkt->rsp_byte_count = rsp_size; ct_pkt->cmd_byte_count = req_size; ct_pkt->dseg_0_address[0] = (unsigned int )ha->ct_sns_dma; ct_pkt->dseg_0_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; ct_pkt->dseg_1_address[0] = (unsigned int )ha->ct_sns_dma; ct_pkt->dseg_1_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; ct_pkt->vp_index = (uint8_t )vha->vp_idx; return ((void *)ct_pkt); } } __inline static ms_iocb_entry_t *qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha , uint32_t req_size ) { struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_entry_24xx *ct_pkt ; { ha = vha->hw; ms_pkt = ha->ms_iocb; ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; if ((ha->device_type & 134217728U) != 0U) { ct_pkt->cmd_byte_count = req_size; ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; } else { ms_pkt->req_bytecount = req_size; ms_pkt->dseg_req_length = ms_pkt->req_bytecount; } return (ms_pkt); } } __inline static struct ct_sns_req *qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p , uint16_t cmd , uint16_t rsp_size ) { __u16 tmp ; __u16 tmp___0 ; { memset((void *)p, 0, 8208UL); p->p.req.header.revision = 1U; p->p.req.header.gs_type = 250U; p->p.req.header.gs_subtype = 16U; tmp = __fswab16((int )cmd); p->p.req.command = tmp; tmp___0 = __fswab16((int )((__u16 )(((int )rsp_size + -16) / 4))); p->p.req.max_rsp_size = tmp___0; return (& p->p.req); } } static int qla2x00_fdmi_rhba(scsi_qla_host_t *vha ) { int rval ; int alen ; uint32_t size ; uint32_t sn ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *entries ; struct ct_fdmi_hba_attr *eiter ; struct qla_hw_data *ha ; void *tmp ; size_t tmp___0 ; __u16 tmp___1 ; size_t tmp___2 ; __u16 tmp___3 ; size_t tmp___4 ; __u16 tmp___5 ; size_t tmp___6 ; __u16 tmp___7 ; int tmp___8 ; int tmp___9 ; size_t tmp___10 ; __u16 tmp___11 ; size_t tmp___12 ; __u16 tmp___13 ; size_t tmp___14 ; __u16 tmp___15 ; size_t tmp___16 ; __u16 tmp___17 ; int tmp___18 ; { ha = vha->hw; tmp = (*((ha->isp_ops)->prep_ms_fdmi_iocb))(vha, 0U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, 512, 16); ct_rsp = & (ha->ct_sns)->p.rsp; memcpy((void *)(& ct_req->req.rhba.hba_identifier), (void const *)(& vha->port_name), 8UL); ct_req->req.rhba.entry_count = 16777216U; memcpy((void *)(& ct_req->req.rhba.port_name), (void const *)(& vha->port_name), 8UL); size = 24U; ct_req->req.rhba.attrs.count = 150994944U; entries = (void *)(& ct_req->req.rhba.hba_identifier); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 256U; eiter->len = 3072U; memcpy((void *)(& eiter->a.node_name), (void const *)(& vha->node_name), 8UL); size = size + 12U; ql_dbg(268435456U, vha, 8229, "NodeName = %8phN.\n", (uint8_t *)(& eiter->a.node_name)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 512U; tmp___0 = strlen("QLogic Corporation"); alen = (int )tmp___0; snprintf((char *)(& eiter->a.manufacturer), 64UL, "%s", (char *)"QLogic Corporation"); alen = (4 - (alen & 3)) + alen; tmp___1 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___1; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8230, "Manufacturer = %s.\n", (uint8_t *)(& eiter->a.manufacturer)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 768U; if ((ha->device_type & 134217728U) != 0U) { qla2xxx_get_vpd_field(vha, (char *)"SN", (char *)(& eiter->a.serial_num), 32UL); } else { sn = (uint32_t )(((((int )ha->serial0 & 31) << 16) | ((int )ha->serial2 << 8)) | (int )ha->serial1); snprintf((char *)(& eiter->a.serial_num), 32UL, "%c%05d", sn / 100000U + 65U, sn % 100000U); } tmp___2 = strlen((char const *)(& eiter->a.serial_num)); alen = (int )tmp___2; alen = (4 - (alen & 3)) + alen; tmp___3 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___3; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8231, "Serial no. = %s.\n", (uint8_t *)(& eiter->a.serial_num)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 1024U; snprintf((char *)(& eiter->a.model), 17UL, "%s", (uint8_t *)(& ha->model_number)); tmp___4 = strlen((char const *)(& eiter->a.model)); alen = (int )tmp___4; alen = (4 - (alen & 3)) + alen; tmp___5 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___5; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8232, "Model Name = %s.\n", (uint8_t *)(& eiter->a.model)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 1280U; snprintf((char *)(& eiter->a.model_desc), 80UL, "%s", (char *)(& ha->model_desc)); tmp___6 = strlen((char const *)(& eiter->a.model_desc)); alen = (int )tmp___6; alen = (4 - (alen & 3)) + alen; tmp___7 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___7; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8233, "Model Desc = %s.\n", (uint8_t *)(& eiter->a.model_desc)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 1536U; if ((ha->device_type & 134217728U) == 0U) { snprintf((char *)(& eiter->a.hw_version), 32UL, "HW:%s", (uint8_t *)(& ha->adapter_id)); } else { tmp___9 = qla2xxx_get_vpd_field(vha, (char *)"MN", (char *)(& eiter->a.hw_version), 32UL); if (tmp___9 != 0) { } else { tmp___8 = qla2xxx_get_vpd_field(vha, (char *)"EC", (char *)(& eiter->a.hw_version), 32UL); if (tmp___8 != 0) { } else { snprintf((char *)(& eiter->a.hw_version), 32UL, "HW:%s", (uint8_t *)(& ha->adapter_id)); } } } tmp___10 = strlen((char const *)(& eiter->a.hw_version)); alen = (int )tmp___10; alen = (4 - (alen & 3)) + alen; tmp___11 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___11; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8234, "Hardware ver = %s.\n", (uint8_t *)(& eiter->a.hw_version)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 1792U; snprintf((char *)(& eiter->a.driver_version), 32UL, "%s", (char *)(& qla2x00_version_str)); tmp___12 = strlen((char const *)(& eiter->a.driver_version)); alen = (int )tmp___12; alen = (4 - (alen & 3)) + alen; tmp___13 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___13; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8235, "Driver ver = %s.\n", (uint8_t *)(& eiter->a.driver_version)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 2048U; snprintf((char *)(& eiter->a.orom_version), 16UL, "%d.%02d", (int )ha->bios_revision[1], (int )ha->bios_revision[0]); tmp___14 = strlen((char const *)(& eiter->a.orom_version)); alen = (int )tmp___14; alen = (4 - (alen & 3)) + alen; tmp___15 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___15; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8236, "Optrom vers = %s.\n", (uint8_t *)(& eiter->a.orom_version)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 2304U; (*((ha->isp_ops)->fw_version_str))(vha, (char *)(& eiter->a.fw_version), 32UL); tmp___16 = strlen((char const *)(& eiter->a.fw_version)); alen = (int )tmp___16; alen = (4 - (alen & 3)) + alen; tmp___17 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___17; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8237, "Firmware vers = %s.\n", (uint8_t *)(& eiter->a.fw_version)); qla2x00_update_ms_fdmi_iocb(vha, size + 16U); ql_dbg(268435456U, vha, 8238, "RHBA identifier = %8phN size=%d.\n", (uint8_t *)(& ct_req->req.rhba.hba_identifier), size); ql_dump_buffer(268566528U, vha, 8310, (uint8_t *)entries, size); rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8240, "RHBA issue IOCB failed (%d).\n", rval); } else { tmp___18 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA"); if (tmp___18 != 0) { rval = 258; if ((unsigned int )ct_rsp->header.reason_code == 9U && (unsigned int )ct_rsp->header.explanation_code == 16U) { ql_dbg(268435456U, vha, 8244, "HBA already registered.\n"); rval = 265; } else { ql_dbg(268435456U, vha, 8365, "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n", (int )ct_rsp->header.reason_code, (int )ct_rsp->header.explanation_code); } } else { ql_dbg(268435456U, vha, 8245, "RHBA exiting normally.\n"); } } return (rval); } } static int qla2x00_fdmi_rpa(scsi_qla_host_t *vha ) { int rval ; int alen ; uint32_t size ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *entries ; struct ct_fdmi_port_attr *eiter ; struct init_cb_24xx *icb24 ; struct new_utsname *p_sysid ; void *tmp ; __u32 tmp___0 ; size_t tmp___1 ; __u16 tmp___2 ; size_t tmp___3 ; __u16 tmp___4 ; u64 tmp___5 ; int tmp___6 ; { ha = vha->hw; icb24 = (struct init_cb_24xx *)ha->init_cb; p_sysid = (struct new_utsname *)0; tmp = (*((ha->isp_ops)->prep_ms_fdmi_iocb))(vha, 0U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, 529, 16); ct_rsp = & (ha->ct_sns)->p.rsp; memcpy((void *)(& ct_req->req.rpa.port_name), (void const *)(& vha->port_name), 8UL); size = 12U; ct_req->req.rpa.attrs.count = 100663296U; entries = (void *)(& ct_req->req.rpa.port_name); eiter = (struct ct_fdmi_port_attr *)entries + (unsigned long )size; eiter->type = 256U; eiter->len = 9216U; eiter->a.fc4_types[2] = 1U; size = size + 36U; ql_dbg(268435456U, vha, 8249, "FC4_TYPES=%02x %02x.\n", (int )eiter->a.fc4_types[2], (int )eiter->a.fc4_types[1]); eiter = (struct ct_fdmi_port_attr *)entries + (unsigned long )size; eiter->type = 512U; eiter->len = 2048U; if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { eiter->a.sup_speed = 67108864U; } else if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { eiter->a.sup_speed = 1879048192U; } else if ((ha->device_type & 32768U) != 0U) { eiter->a.sup_speed = 939524096U; } else if ((ha->device_type & 2048U) != 0U) { eiter->a.sup_speed = 452984832U; } else if ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) { eiter->a.sup_speed = 184549376U; } else if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { eiter->a.sup_speed = 50331648U; } else { eiter->a.sup_speed = 16777216U; } size = size + 8U; ql_dbg(268435456U, vha, 8250, "Supported_Speed=%x.\n", eiter->a.sup_speed); eiter = (struct ct_fdmi_port_attr *)entries + (unsigned long )size; eiter->type = 768U; eiter->len = 2048U; switch ((int )ha->link_data_rate) { case 0: eiter->a.cur_speed = 16777216U; goto ldv_66067; case 1: eiter->a.cur_speed = 33554432U; goto ldv_66067; case 3: eiter->a.cur_speed = 134217728U; goto ldv_66067; case 4: eiter->a.cur_speed = 268435456U; goto ldv_66067; case 19: eiter->a.cur_speed = 67108864U; goto ldv_66067; case 5: eiter->a.cur_speed = 536870912U; goto ldv_66067; case 6: eiter->a.cur_speed = 1073741824U; goto ldv_66067; default: eiter->a.cur_speed = 8388608U; goto ldv_66067; } ldv_66067: size = size + 8U; ql_dbg(268435456U, vha, 8251, "Current_Speed=%x.\n", eiter->a.cur_speed); eiter = (struct ct_fdmi_port_attr *)entries + (unsigned long )size; eiter->type = 1024U; eiter->len = 2048U; eiter->a.max_frame_size = (uint32_t )((ha->device_type & 134217728U) != 0U ? icb24->frame_payload_size : (ha->init_cb)->frame_payload_size); tmp___0 = __fswab32(eiter->a.max_frame_size); eiter->a.max_frame_size = tmp___0; size = size + 8U; ql_dbg(268435456U, vha, 8252, "Max_Frame_Size=%x.\n", eiter->a.max_frame_size); eiter = (struct ct_fdmi_port_attr *)entries + (unsigned long )size; eiter->type = 1280U; snprintf((char *)(& eiter->a.os_dev_name), 32UL, "%s:host%lu", (char *)"qla2xxx", vha->host_no); tmp___1 = strlen((char const *)(& eiter->a.os_dev_name)); alen = (int )tmp___1; alen = (4 - (alen & 3)) + alen; tmp___2 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___2; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8267, "OS_Device_Name=%s.\n", (uint8_t *)(& eiter->a.os_dev_name)); eiter = (struct ct_fdmi_port_attr *)entries + (unsigned long )size; eiter->type = 1536U; p_sysid = utsname(); if ((unsigned long )p_sysid != (unsigned long )((struct new_utsname *)0)) { snprintf((char *)(& eiter->a.host_name), 256UL, "%s", (char *)(& p_sysid->nodename)); } else { snprintf((char *)(& eiter->a.host_name), 256UL, "%s", (char *)(& ((struct fc_host_attrs *)(vha->host)->shost_data)->system_hostname)); } tmp___3 = strlen((char const *)(& eiter->a.host_name)); alen = (int )tmp___3; alen = (4 - (alen & 3)) + alen; tmp___4 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___4; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8253, "HostName=%s.\n", (uint8_t *)(& eiter->a.host_name)); qla2x00_update_ms_fdmi_iocb(vha, size + 16U); tmp___5 = wwn_to_u64((u8 *)(& ct_req->req.rpa.port_name)); ql_dbg(268435456U, vha, 8254, "RPA portname %016llx, size = %d.\n", tmp___5, size); ql_dump_buffer(268566528U, vha, 8313, (uint8_t *)entries, size); rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8256, "RPA issue IOCB failed (%d).\n", rval); } else { tmp___6 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA"); if (tmp___6 != 0) { rval = 258; if ((unsigned int )ct_rsp->header.reason_code == 9U && (unsigned int )ct_rsp->header.explanation_code == 16U) { ql_dbg(268435456U, vha, 8397, "RPA already registered.\n"); rval = 265; } else { } } else { ql_dbg(268435456U, vha, 8257, "RPA exiting normally.\n"); } } return (rval); } } static int qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha ) { int rval ; int alen ; uint32_t size ; uint32_t sn ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *entries ; struct ct_fdmiv2_hba_attr *eiter ; struct qla_hw_data *ha ; struct init_cb_24xx *icb24 ; struct new_utsname *p_sysid ; void *tmp ; u64 tmp___0 ; size_t tmp___1 ; size_t tmp___2 ; __u16 tmp___3 ; size_t tmp___4 ; __u16 tmp___5 ; size_t tmp___6 ; __u16 tmp___7 ; size_t tmp___8 ; __u16 tmp___9 ; int tmp___10 ; int tmp___11 ; size_t tmp___12 ; __u16 tmp___13 ; size_t tmp___14 ; __u16 tmp___15 ; size_t tmp___16 ; __u16 tmp___17 ; size_t tmp___18 ; __u16 tmp___19 ; size_t tmp___20 ; __u16 tmp___21 ; __u32 tmp___22 ; size_t tmp___23 ; __u16 tmp___24 ; u64 tmp___25 ; size_t tmp___26 ; __u16 tmp___27 ; size_t tmp___28 ; __u16 tmp___29 ; u64 tmp___30 ; int tmp___31 ; { ha = vha->hw; icb24 = (struct init_cb_24xx *)ha->init_cb; p_sysid = (struct new_utsname *)0; tmp = (*((ha->isp_ops)->prep_ms_fdmi_iocb))(vha, 0U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, 512, 16); ct_rsp = & (ha->ct_sns)->p.rsp; memcpy((void *)(& ct_req->req.rhba2.hba_identifier), (void const *)(& vha->port_name), 8UL); ct_req->req.rhba2.entry_count = 16777216U; memcpy((void *)(& ct_req->req.rhba2.port_name), (void const *)(& vha->port_name), 8UL); size = 24U; ct_req->req.rhba2.attrs.count = 285212672U; entries = (void *)(& ct_req->req.rhba2.hba_identifier); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 256U; eiter->len = 3072U; memcpy((void *)(& eiter->a.node_name), (void const *)(& vha->node_name), 8UL); size = size + 12U; tmp___0 = wwn_to_u64((u8 *)(& eiter->a.node_name)); ql_dbg(268435456U, vha, 8317, "NodeName = %016llx.\n", tmp___0); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 512U; snprintf((char *)(& eiter->a.manufacturer), 64UL, "%s", (char *)"QLogic Corporation"); tmp___1 = strlen("QLogic Corporation"); eiter->a.manufacturer[tmp___1] = 0U; tmp___2 = strlen((char const *)(& eiter->a.manufacturer)); alen = (int )tmp___2; alen = (4 - (alen & 3)) + alen; tmp___3 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___3; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8357, "Manufacturer = %s.\n", (uint8_t *)(& eiter->a.manufacturer)); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 768U; if ((ha->device_type & 134217728U) != 0U) { qla2xxx_get_vpd_field(vha, (char *)"SN", (char *)(& eiter->a.serial_num), 32UL); } else { sn = (uint32_t )(((((int )ha->serial0 & 31) << 16) | ((int )ha->serial2 << 8)) | (int )ha->serial1); snprintf((char *)(& eiter->a.serial_num), 32UL, "%c%05d", sn / 100000U + 65U, sn % 100000U); } tmp___4 = strlen((char const *)(& eiter->a.serial_num)); alen = (int )tmp___4; alen = (4 - (alen & 3)) + alen; tmp___5 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___5; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8358, "Serial no. = %s.\n", (uint8_t *)(& eiter->a.serial_num)); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 1024U; snprintf((char *)(& eiter->a.model), 17UL, "%s", (uint8_t *)(& ha->model_number)); tmp___6 = strlen((char const *)(& eiter->a.model)); alen = (int )tmp___6; alen = (4 - (alen & 3)) + alen; tmp___7 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___7; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8359, "Model Name = %s.\n", (uint8_t *)(& eiter->a.model)); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 1280U; snprintf((char *)(& eiter->a.model_desc), 80UL, "%s", (char *)(& ha->model_desc)); tmp___8 = strlen((char const *)(& eiter->a.model_desc)); alen = (int )tmp___8; alen = (4 - (alen & 3)) + alen; tmp___9 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___9; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8360, "Model Desc = %s.\n", (uint8_t *)(& eiter->a.model_desc)); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 1536U; if ((ha->device_type & 134217728U) == 0U) { snprintf((char *)(& eiter->a.hw_version), 16UL, "HW:%s", (uint8_t *)(& ha->adapter_id)); } else { tmp___11 = qla2xxx_get_vpd_field(vha, (char *)"MN", (char *)(& eiter->a.hw_version), 16UL); if (tmp___11 != 0) { } else { tmp___10 = qla2xxx_get_vpd_field(vha, (char *)"EC", (char *)(& eiter->a.hw_version), 16UL); if (tmp___10 != 0) { } else { snprintf((char *)(& eiter->a.hw_version), 16UL, "HW:%s", (uint8_t *)(& ha->adapter_id)); } } } tmp___12 = strlen((char const *)(& eiter->a.hw_version)); alen = (int )tmp___12; alen = (4 - (alen & 3)) + alen; tmp___13 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___13; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8361, "Hardware ver = %s.\n", (uint8_t *)(& eiter->a.hw_version)); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 1792U; snprintf((char *)(& eiter->a.driver_version), 32UL, "%s", (char *)(& qla2x00_version_str)); tmp___14 = strlen((char const *)(& eiter->a.driver_version)); alen = (int )tmp___14; alen = (4 - (alen & 3)) + alen; tmp___15 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___15; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8362, "Driver ver = %s.\n", (uint8_t *)(& eiter->a.driver_version)); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 2048U; snprintf((char *)(& eiter->a.orom_version), 16UL, "%d.%02d", (int )ha->bios_revision[1], (int )ha->bios_revision[0]); tmp___16 = strlen((char const *)(& eiter->a.orom_version)); alen = (int )tmp___16; alen = (4 - (alen & 3)) + alen; tmp___17 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___17; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8363, "Optrom version = %d.%02d.\n", (int )eiter->a.orom_version[1], (int )eiter->a.orom_version[0]); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 2304U; (*((ha->isp_ops)->fw_version_str))(vha, (char *)(& eiter->a.fw_version), 32UL); tmp___18 = strlen((char const *)(& eiter->a.fw_version)); alen = (int )tmp___18; alen = (4 - (alen & 3)) + alen; tmp___19 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___19; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8364, "Firmware vers = %s.\n", (uint8_t *)(& eiter->a.fw_version)); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 2560U; p_sysid = utsname(); if ((unsigned long )p_sysid != (unsigned long )((struct new_utsname *)0)) { snprintf((char *)(& eiter->a.os_version), 128UL, "%s %s %s", (char *)(& p_sysid->sysname), (char *)(& p_sysid->release), (char *)(& p_sysid->version)); } else { snprintf((char *)(& eiter->a.os_version), 128UL, "%s %s", (char *)"Linux", (char *)(& ((struct fc_host_attrs *)(vha->host)->shost_data)->system_hostname)); } tmp___20 = strlen((char const *)(& eiter->a.os_version)); alen = (int )tmp___20; alen = (4 - (alen & 3)) + alen; tmp___21 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___21; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8366, "OS Name and Version = %s.\n", (uint8_t *)(& eiter->a.os_version)); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 2816U; eiter->a.max_ct_len = (uint32_t )((ha->device_type & 134217728U) != 0U ? icb24->frame_payload_size : (ha->init_cb)->frame_payload_size); tmp___22 = __fswab32(eiter->a.max_ct_len); eiter->a.max_ct_len = tmp___22; eiter->len = 2048U; size = size + 8U; ql_dbg(268435456U, vha, 8367, "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 3072U; qla2x00_get_sym_node_name(vha, (uint8_t *)(& eiter->a.sym_name), 256UL); tmp___23 = strlen((char const *)(& eiter->a.sym_name)); alen = (int )tmp___23; alen = (4 - (alen & 3)) + alen; tmp___24 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___24; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8368, "Symbolic Name = %s.\n", (uint8_t *)(& eiter->a.sym_name)); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 3328U; eiter->a.vendor_id = 1997537280U; eiter->len = 2048U; size = size + 8U; ql_dbg(268435456U, vha, 8369, "Vendor Id = %x.\n", eiter->a.vendor_id); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 3584U; eiter->a.num_ports = 16777216U; eiter->len = 2048U; size = size + 8U; ql_dbg(268435456U, vha, 8370, "Port Num = %x.\n", eiter->a.num_ports); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 3840U; memcpy((void *)(& eiter->a.fabric_name), (void const *)(& vha->fabric_node_name), 8UL); eiter->len = 3072U; size = size + 12U; tmp___25 = wwn_to_u64((u8 *)(& eiter->a.fabric_name)); ql_dbg(268435456U, vha, 8371, "Fabric Name = %016llx.\n", tmp___25); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 4096U; snprintf((char *)(& eiter->a.bios_name), 32UL, "BIOS %d.%02d", (int )ha->bios_revision[1], (int )ha->bios_revision[0]); tmp___26 = strlen((char const *)(& eiter->a.bios_name)); alen = (int )tmp___26; alen = (4 - (alen & 3)) + alen; tmp___27 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___27; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8372, "BIOS Name = %s\n", (uint8_t *)(& eiter->a.bios_name)); eiter = (struct ct_fdmiv2_hba_attr *)entries + (unsigned long )size; eiter->type = 57344U; snprintf((char *)(& eiter->a.vendor_indentifer), 8UL, "%s", (char *)"QLGC"); tmp___28 = strlen((char const *)(& eiter->a.vendor_indentifer)); alen = (int )tmp___28; alen = (4 - (alen & 3)) + alen; tmp___29 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___29; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8369, "Vendor Identifier = %s.\n", (uint8_t *)(& eiter->a.vendor_indentifer)); qla2x00_update_ms_fdmi_iocb(vha, size + 16U); tmp___30 = wwn_to_u64((u8 *)(& ct_req->req.rhba2.hba_identifier)); ql_dbg(268435456U, vha, 8373, "RHBA identifier = %016llx.\n", tmp___30); ql_dump_buffer(268566528U, vha, 8374, (uint8_t *)entries, size); rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8375, "RHBA issue IOCB failed (%d).\n", rval); } else { tmp___31 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA"); if (tmp___31 != 0) { rval = 258; if ((unsigned int )ct_rsp->header.reason_code == 9U && (unsigned int )ct_rsp->header.explanation_code == 16U) { ql_dbg(268435456U, vha, 8376, "HBA already registered.\n"); rval = 265; } else { ql_dbg(268435456U, vha, 8214, "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n", (int )ct_rsp->header.reason_code, (int )ct_rsp->header.explanation_code); } } else { ql_dbg(268435456U, vha, 8377, "RHBA FDMI V2 exiting normally.\n"); } } return (rval); } } static int qla2x00_fdmi_dhba(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *tmp ; int tmp___0 ; { ha = vha->hw; tmp = (*((ha->isp_ops)->prep_ms_fdmi_iocb))(vha, 24U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, 768, 16); ct_rsp = & (ha->ct_sns)->p.rsp; memcpy((void *)(& ct_req->req.dhba.port_name), (void const *)(& vha->port_name), 8UL); ql_dbg(268435456U, vha, 8246, "DHBA portname = %8phN.\n", (uint8_t *)(& ct_req->req.dhba.port_name)); rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8247, "DHBA issue IOCB failed (%d).\n", rval); } else { tmp___0 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA"); if (tmp___0 != 0) { rval = 258; } else { ql_dbg(268435456U, vha, 8248, "DHBA exiting normally.\n"); } } return (rval); } } static int qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha ) { int rval ; int alen ; uint32_t size ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *entries ; struct ct_fdmiv2_port_attr *eiter ; struct init_cb_24xx *icb24 ; struct new_utsname *p_sysid ; void *tmp ; __u32 tmp___0 ; size_t tmp___1 ; __u16 tmp___2 ; size_t tmp___3 ; __u16 tmp___4 ; u64 tmp___5 ; u64 tmp___6 ; size_t tmp___7 ; __u16 tmp___8 ; u64 tmp___9 ; __u32 tmp___10 ; int tmp___11 ; { ha = vha->hw; icb24 = (struct init_cb_24xx *)ha->init_cb; p_sysid = (struct new_utsname *)0; tmp = (*((ha->isp_ops)->prep_ms_fdmi_iocb))(vha, 0U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, 529, 16); ct_rsp = & (ha->ct_sns)->p.rsp; memcpy((void *)(& ct_req->req.rpa2.port_name), (void const *)(& vha->port_name), 8UL); size = 12U; ct_req->req.rpa2.attrs.count = 268435456U; entries = (void *)(& ct_req->req.rpa2.port_name); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 256U; eiter->len = 9216U; eiter->a.fc4_types[2] = 1U; size = size + 36U; ql_dbg(268435456U, vha, 8378, "FC4_TYPES=%02x %02x.\n", (int )eiter->a.fc4_types[2], (int )eiter->a.fc4_types[1]); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 512U; eiter->len = 2048U; if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { eiter->a.sup_speed = 67108864U; } else if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { eiter->a.sup_speed = 1879048192U; } else if ((ha->device_type & 32768U) != 0U) { eiter->a.sup_speed = 939524096U; } else if ((ha->device_type & 2048U) != 0U) { eiter->a.sup_speed = 452984832U; } else if ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) { eiter->a.sup_speed = 184549376U; } else if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { eiter->a.sup_speed = 50331648U; } else { eiter->a.sup_speed = 16777216U; } size = size + 8U; ql_dbg(268435456U, vha, 8379, "Supported Port Speed = %x.\n", eiter->a.sup_speed); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 768U; eiter->len = 2048U; switch ((int )ha->link_data_rate) { case 0: eiter->a.cur_speed = 16777216U; goto ldv_66113; case 1: eiter->a.cur_speed = 33554432U; goto ldv_66113; case 3: eiter->a.cur_speed = 134217728U; goto ldv_66113; case 4: eiter->a.cur_speed = 268435456U; goto ldv_66113; case 19: eiter->a.cur_speed = 67108864U; goto ldv_66113; case 5: eiter->a.cur_speed = 536870912U; goto ldv_66113; case 6: eiter->a.cur_speed = 1073741824U; goto ldv_66113; default: eiter->a.cur_speed = 8388608U; goto ldv_66113; } ldv_66113: size = size + 8U; ql_dbg(268435456U, vha, 8380, "Current_Speed = %x.\n", eiter->a.cur_speed); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 1024U; eiter->len = 2048U; eiter->a.max_frame_size = (uint32_t )((ha->device_type & 134217728U) != 0U ? icb24->frame_payload_size : (ha->init_cb)->frame_payload_size); tmp___0 = __fswab32(eiter->a.max_frame_size); eiter->a.max_frame_size = tmp___0; size = size + 8U; ql_dbg(268435456U, vha, 8380, "Max_Frame_Size = %x.\n", eiter->a.max_frame_size); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 1280U; tmp___1 = strlen("qla2xxx"); alen = (int )tmp___1; snprintf((char *)(& eiter->a.os_dev_name), 32UL, "%s:host%lu", (char *)"qla2xxx", vha->host_no); alen = (4 - (alen & 3)) + alen; tmp___2 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___2; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8382, "OS_Device_Name = %s.\n", (uint8_t *)(& eiter->a.os_dev_name)); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 1536U; p_sysid = utsname(); if ((unsigned long )p_sysid != (unsigned long )((struct new_utsname *)0)) { snprintf((char *)(& eiter->a.host_name), 256UL, "%s", (char *)(& p_sysid->nodename)); } else { snprintf((char *)(& eiter->a.host_name), 256UL, "%s", (char *)(& ((struct fc_host_attrs *)(vha->host)->shost_data)->system_hostname)); } tmp___3 = strlen((char const *)(& eiter->a.host_name)); alen = (int )tmp___3; alen = (4 - (alen & 3)) + alen; tmp___4 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___4; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8253, "HostName=%s.\n", (uint8_t *)(& eiter->a.host_name)); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 1792U; memcpy((void *)(& eiter->a.node_name), (void const *)(& vha->node_name), 8UL); eiter->len = 3072U; size = size + 12U; tmp___5 = wwn_to_u64((u8 *)(& eiter->a.node_name)); ql_dbg(268435456U, vha, 8384, "Node Name = %016llx.\n", tmp___5); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 2048U; memcpy((void *)(& eiter->a.port_name), (void const *)(& vha->port_name), 8UL); eiter->len = 3072U; size = size + 12U; tmp___6 = wwn_to_u64((u8 *)(& eiter->a.port_name)); ql_dbg(268435456U, vha, 8385, "Port Name = %016llx.\n", tmp___6); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 2304U; qla2x00_get_sym_node_name(vha, (uint8_t *)(& eiter->a.port_sym_name), 128UL); tmp___7 = strlen((char const *)(& eiter->a.port_sym_name)); alen = (int )tmp___7; alen = (4 - (alen & 3)) + alen; tmp___8 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___8; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8386, "port symbolic name = %s\n", (uint8_t *)(& eiter->a.port_sym_name)); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 2560U; eiter->a.port_type = 2130706432U; eiter->len = 2048U; size = size + 8U; ql_dbg(268435456U, vha, 8387, "Port Type = %x.\n", eiter->a.port_type); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 2816U; eiter->a.port_supported_cos = 134217728U; eiter->len = 2048U; size = size + 8U; ql_dbg(268435456U, vha, 8388, "Supported COS = %08x\n", eiter->a.port_supported_cos); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 3072U; memcpy((void *)(& eiter->a.fabric_name), (void const *)(& vha->fabric_node_name), 8UL); eiter->len = 3072U; size = size + 12U; tmp___9 = wwn_to_u64((u8 *)(& eiter->a.fabric_name)); ql_dbg(268435456U, vha, 8389, "Fabric Name = %016llx.\n", tmp___9); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 3328U; eiter->a.port_fc4_type[0] = 0U; eiter->a.port_fc4_type[1] = 0U; eiter->a.port_fc4_type[2] = 1U; eiter->a.port_fc4_type[3] = 0U; eiter->len = 9216U; size = size + 36U; ql_dbg(268435456U, vha, 8390, "Port Active FC4 Type = %02x %02x.\n", (int )eiter->a.port_fc4_type[2], (int )eiter->a.port_fc4_type[1]); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 257U; eiter->a.port_state = 16777216U; eiter->len = 2048U; size = size + 8U; ql_dbg(268435456U, vha, 8391, "Port State = %x.\n", eiter->a.port_state); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 513U; eiter->a.num_ports = 16777216U; eiter->len = 2048U; size = size + 8U; ql_dbg(268435456U, vha, 8392, "Number of ports = %x.\n", eiter->a.num_ports); eiter = (struct ct_fdmiv2_port_attr *)entries + (unsigned long )size; eiter->type = 769U; tmp___10 = __fswab32(vha->d_id.b24); eiter->a.port_id = tmp___10; eiter->len = 2048U; size = size + 8U; ql_dbg(268435456U, vha, 8392, "Port Id = %x.\n", eiter->a.port_id); qla2x00_update_ms_fdmi_iocb(vha, size + 16U); ql_dbg(268435456U, vha, 8254, "RPA portname= %8phN size=%d.\n", (uint8_t *)(& ct_req->req.rpa.port_name), size); ql_dump_buffer(268566528U, vha, 8394, (uint8_t *)entries, size); rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8395, "RPA FDMI v2 issue IOCB failed (%d).\n", rval); } else { tmp___11 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA"); if (tmp___11 != 0) { rval = 258; if ((unsigned int )ct_rsp->header.reason_code == 9U && (unsigned int )ct_rsp->header.explanation_code == 16U) { ql_dbg(268435456U, vha, 8398, "RPA FDMI v2 already registered\n"); rval = 265; } else { ql_dbg(268435456U, vha, 8224, "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n", (int )ct_rsp->header.reason_code, (int )ct_rsp->header.explanation_code); } } else { ql_dbg(268435456U, vha, 8396, "RPA FDMI V2 exiting normally.\n"); } } return (rval); } } int qla2x00_fdmi_register(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; { rval = 258; ha = vha->hw; if (((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) || (ha->device_type & 131072U) != 0U) { return (258); } else { } rval = qla2x00_mgmt_svr_login(vha); if (rval != 0) { return (rval); } else { } rval = qla2x00_fdmiv2_rhba(vha); if (rval != 0) { if (rval != 265) { goto try_fdmi; } else { } rval = qla2x00_fdmi_dhba(vha); if (rval != 0) { goto try_fdmi; } else { } rval = qla2x00_fdmiv2_rhba(vha); if (rval != 0) { goto try_fdmi; } else { } } else { } rval = qla2x00_fdmiv2_rpa(vha); if (rval != 0) { goto try_fdmi; } else { } goto out; try_fdmi: rval = qla2x00_fdmi_rhba(vha); if (rval != 0) { if (rval != 265) { return (rval); } else { } rval = qla2x00_fdmi_dhba(vha); if (rval != 0) { return (rval); } else { } rval = qla2x00_fdmi_rhba(vha); if (rval != 0) { return (rval); } else { } } else { } rval = qla2x00_fdmi_rpa(vha); out: ; return (rval); } } int qla2x00_gfpn_id(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; uint16_t i ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *tmp ; int tmp___0 ; { rval = 0; ha = vha->hw; if ((ha->device_type & 67108864U) == 0U) { return (258); } else { } i = 0U; goto ldv_66140; ldv_66139: tmp = (*((ha->isp_ops)->prep_ms_iocb))(vha, 20U, 24U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 284, 24); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.port_id.port_id[0] = (list + (unsigned long )i)->d_id.b.domain; ct_req->req.port_id.port_id[1] = (list + (unsigned long )i)->d_id.b.area; ct_req->req.port_id.port_id[2] = (list + (unsigned long )i)->d_id.b.al_pa; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8227, "GFPN_ID issue IOCB failed (%d).\n", rval); goto ldv_66138; } else { tmp___0 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GFPN_ID"); if (tmp___0 != 0) { rval = 258; goto ldv_66138; } else { memcpy((void *)(& (list + (unsigned long )i)->fabric_port_name), (void const *)(& ct_rsp->rsp.gfpn_id.port_name), 8UL); } } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_66138; } else { } i = (uint16_t )((int )i + 1); ldv_66140: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_66139; } else { } ldv_66138: ; return (rval); } } __inline static void *qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) { struct ct_entry_24xx *ct_pkt ; struct qla_hw_data *ha ; { ha = vha->hw; ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; memset((void *)ct_pkt, 0, 64UL); ct_pkt->entry_type = 41U; ct_pkt->entry_count = 1U; ct_pkt->nport_handle = vha->mgmt_svr_loop_id; ct_pkt->timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; ct_pkt->cmd_dsd_count = 1U; ct_pkt->rsp_dsd_count = 1U; ct_pkt->rsp_byte_count = rsp_size; ct_pkt->cmd_byte_count = req_size; ct_pkt->dseg_0_address[0] = (unsigned int )ha->ct_sns_dma; ct_pkt->dseg_0_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; ct_pkt->dseg_1_address[0] = (unsigned int )ha->ct_sns_dma; ct_pkt->dseg_1_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; ct_pkt->vp_index = (uint8_t )vha->vp_idx; return ((void *)ct_pkt); } } __inline static struct ct_sns_req *qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p , uint16_t cmd , uint16_t rsp_size ) { __u16 tmp ; __u16 tmp___0 ; { memset((void *)p, 0, 8208UL); p->p.req.header.revision = 1U; p->p.req.header.gs_type = 250U; p->p.req.header.gs_subtype = 1U; tmp = __fswab16((int )cmd); p->p.req.command = tmp; tmp___0 = __fswab16((int )((__u16 )(((int )rsp_size + -16) / 4))); p->p.req.max_rsp_size = tmp___0; return (& p->p.req); } } int qla2x00_gpsc(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; uint16_t i ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *tmp ; __u16 tmp___0 ; __u16 tmp___1 ; __u16 tmp___2 ; { ha = vha->hw; if ((ha->device_type & 67108864U) == 0U) { return (258); } else { } if (*((unsigned long *)ha + 2UL) == 0UL) { return (258); } else { } rval = qla2x00_mgmt_svr_login(vha); if (rval != 0) { return (rval); } else { } i = 0U; goto ldv_66173; ldv_66172: tmp = qla24xx_prep_ms_fm_iocb(vha, 24U, 20U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, 295, 20); ct_rsp = & (ha->ct_sns)->p.rsp; memcpy((void *)(& ct_req->req.gpsc.port_name), (void const *)(& (list + (unsigned long )i)->fabric_port_name), 8UL); rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8281, "GPSC issue IOCB failed (%d).\n", rval); } else { rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GPSC"); if (rval != 0) { if (rval == 1 && ((unsigned int )ct_rsp->header.reason_code == 1U || (unsigned int )ct_rsp->header.reason_code == 11U)) { ql_dbg(268435456U, vha, 8282, "GPSC command unsupported, disabling query.\n"); ha->flags.gpsc_supported = 0U; rval = 258; goto ldv_66163; } else { } rval = 258; } else { tmp___0 = __fswab16((int )ct_rsp->rsp.gpsc.speed); switch ((int )tmp___0) { case 32768: (list + (unsigned long )i)->fp_speed = 0U; goto ldv_66165; case 16384: (list + (unsigned long )i)->fp_speed = 1U; goto ldv_66165; case 8192: (list + (unsigned long )i)->fp_speed = 3U; goto ldv_66165; case 4096: (list + (unsigned long )i)->fp_speed = 19U; goto ldv_66165; case 2048: (list + (unsigned long )i)->fp_speed = 4U; goto ldv_66165; case 1024: (list + (unsigned long )i)->fp_speed = 5U; goto ldv_66165; case 256: (list + (unsigned long )i)->fp_speed = 6U; goto ldv_66165; } ldv_66165: tmp___1 = __fswab16((int )ct_rsp->rsp.gpsc.speed); tmp___2 = __fswab16((int )ct_rsp->rsp.gpsc.speeds); ql_dbg(268435456U, vha, 8283, "GPSC ext entry - fpn %8phN speeds=%04x speed=%04x.\n", (uint8_t *)(& (list + (unsigned long )i)->fabric_port_name), (int )tmp___2, (int )tmp___1); } } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_66163; } else { } i = (uint16_t )((int )i + 1); ldv_66173: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_66172; } else { } ldv_66163: ; return (rval); } } void qla2x00_gff_id(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; uint16_t i ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; struct qla_hw_data *ha ; uint8_t fcp_scsi_features ; void *tmp ; int tmp___0 ; { ha = vha->hw; fcp_scsi_features = 0U; i = 0U; goto ldv_66188; ldv_66187: (list + (unsigned long )i)->fc4_type = 255U; if ((ha->device_type & 134217728U) == 0U) { goto ldv_66185; } else { } tmp = (*((ha->isp_ops)->prep_ms_iocb))(vha, 20U, 144U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 287, 144); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.port_id.port_id[0] = (list + (unsigned long )i)->d_id.b.domain; ct_req->req.port_id.port_id[1] = (list + (unsigned long )i)->d_id.b.area; ct_req->req.port_id.port_id[2] = (list + (unsigned long )i)->d_id.b.al_pa; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8284, "GFF_ID issue IOCB failed (%d).\n", rval); } else { tmp___0 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GFF_ID"); if (tmp___0 != 0) { ql_dbg(268435456U, vha, 8285, "GFF_ID IOCB status had a failure status code.\n"); } else { fcp_scsi_features = ct_rsp->rsp.gff_id.fc4_features[7]; fcp_scsi_features = (unsigned int )fcp_scsi_features & 15U; if ((unsigned int )fcp_scsi_features != 0U) { (list + (unsigned long )i)->fc4_type = 8U; } else { (list + (unsigned long )i)->fc4_type = 0U; } } } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_66186; } else { } ldv_66185: i = (uint16_t )((int )i + 1); ldv_66188: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_66187; } else { } ldv_66186: ; return; } } void activate_pending_timer_19(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_19 == (unsigned long )timer) { if (ldv_timer_state_19 == 2 || pending_flag != 0) { ldv_timer_list_19 = timer; ldv_timer_list_19->data = data; ldv_timer_state_19 = 1; } else { } return; } else { } reg_timer_19(timer); ldv_timer_list_19->data = data; return; } } void choose_timer_19(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_19 = 2; return; } } int reg_timer_19(struct timer_list *timer ) { { ldv_timer_list_19 = timer; ldv_timer_state_19 = 1; return (0); } } void disable_suitable_timer_19(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_19) { ldv_timer_state_19 = 0; return; } else { } return; } } bool ldv_queue_work_on_129(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_130(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_131(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_132(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_133(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_134(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } void ldv__builtin_va_end(__builtin_va_list * ) ; void ldv__builtin_va_start(__builtin_va_list * ) ; extern int printk(char const * , ...) ; bool ldv_queue_work_on_145(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_147(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_146(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_149(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_148(struct workqueue_struct *ldv_func_arg1 ) ; int reg_timer_20(struct timer_list *timer ) ; void disable_suitable_timer_20(struct timer_list *timer ) ; void activate_pending_timer_20(struct timer_list *timer , unsigned long data , int pending_flag ) ; void choose_timer_20(struct timer_list *timer ) ; int ldv_scsi_add_host_with_dma_150(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; void qla2xxx_dump_post_process(scsi_qla_host_t *vha , int rval ) ; int qla27xx_dump_mpi_ram(struct qla_hw_data *ha , uint32_t addr , uint32_t *ram , uint32_t ram_dwords , void **nxt ) ; int qla24xx_dump_ram(struct qla_hw_data *ha , uint32_t addr , uint32_t *ram , uint32_t ram_dwords , void **nxt ) ; void qla24xx_pause_risc(struct device_reg_24xx *reg , struct qla_hw_data *ha ) ; int qla24xx_soft_reset(struct qla_hw_data *ha ) ; static uint32_t ql_dbg_offset = 2048U; __inline static void qla2xxx_prep_dump(struct qla_hw_data *ha , struct qla2xxx_fw_dump *fw_dump ) { __u32 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; __u32 tmp___5 ; __u32 tmp___6 ; { tmp = __fswab32((__u32 )ha->fw_major_version); fw_dump->fw_major_version = tmp; tmp___0 = __fswab32((__u32 )ha->fw_minor_version); fw_dump->fw_minor_version = tmp___0; tmp___1 = __fswab32((__u32 )ha->fw_subminor_version); fw_dump->fw_subminor_version = tmp___1; tmp___2 = __fswab32((__u32 )ha->fw_attributes); fw_dump->fw_attributes = tmp___2; tmp___3 = __fswab32((__u32 )(ha->pdev)->vendor); fw_dump->vendor = tmp___3; tmp___4 = __fswab32((__u32 )(ha->pdev)->device); fw_dump->device = tmp___4; tmp___5 = __fswab32((__u32 )(ha->pdev)->subsystem_vendor); fw_dump->subsystem_vendor = tmp___5; tmp___6 = __fswab32((__u32 )(ha->pdev)->subsystem_device); fw_dump->subsystem_device = tmp___6; return; } } __inline static void *qla2xxx_copy_queues(struct qla_hw_data *ha , void *ptr ) { struct req_que *req ; struct rsp_que *rsp ; { req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); memcpy(ptr, (void const *)req->ring, (unsigned long )req->length * 64UL); ptr = ptr + (unsigned long )req->length * 64UL; memcpy(ptr, (void const *)rsp->ring, (unsigned long )rsp->length * 64UL); return (ptr + (unsigned long )rsp->length * 64UL); } } int qla27xx_dump_mpi_ram(struct qla_hw_data *ha , uint32_t addr , uint32_t *ram , uint32_t ram_dwords , void **nxt ) { int rval ; uint32_t cnt ; uint32_t stat ; uint32_t timer ; uint32_t dwords ; uint32_t idx ; uint16_t mb0 ; uint16_t mb1 ; struct device_reg_24xx *reg ; dma_addr_t dump_dma ; uint32_t *dump ; int tmp ; __u32 tmp___0 ; int tmp___1 ; { reg = & (ha->iobase)->isp24; dump_dma = ha->gid_list_dma; dump = (uint32_t *)ha->gid_list; rval = 0; mb0 = 0U; writew(5, (void volatile *)(& reg->mailbox0)); clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); tmp = qla2x00_gid_list_size(ha); dwords = (uint32_t )(tmp / 4); cnt = 0U; goto ldv_65787; ldv_65786: ; if (cnt + dwords > ram_dwords) { dwords = ram_dwords - cnt; } else { } writew((int )((unsigned short )addr), (void volatile *)(& reg->mailbox1)); writew((int )((unsigned short )(addr >> 16)), (void volatile *)(& reg->mailbox8)); writew((int )((unsigned short )((unsigned int )dump_dma >> 16)), (void volatile *)(& reg->mailbox2)); writew((int )((unsigned short )dump_dma), (void volatile *)(& reg->mailbox3)); writew((int )((unsigned short )((unsigned int )(dump_dma >> 32ULL) >> 16)), (void volatile *)(& reg->mailbox6)); writew((int )((unsigned short )(dump_dma >> 32ULL)), (void volatile *)(& reg->mailbox7)); writew((int )((unsigned short )(dwords >> 16)), (void volatile *)(& reg->mailbox4)); writew((int )((unsigned short )dwords), (void volatile *)(& reg->mailbox5)); writew(0, (void volatile *)(& reg->mailbox9)); writel(1342177280U, (void volatile *)(& reg->hccr)); ha->flags.mbox_int = 0U; timer = 6000000U; goto ldv_65782; ldv_65781: stat = readl((void const volatile *)(& reg->host_status)); if ((stat & 32768U) != 0U) { stat = stat & 255U; if (((stat == 1U || stat == 2U) || stat == 16U) || stat == 17U) { set_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); mb0 = readw((void const volatile *)(& reg->mailbox0)); mb1 = readw((void const volatile *)(& reg->mailbox1)); writel(2684354560U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); goto ldv_65780; } else { } writel(2684354560U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); } else { } __const_udelay(21475UL); timer = timer - 1U; ldv_65782: ; if (timer != 0U) { goto ldv_65781; } else { } ldv_65780: ha->flags.mbox_int = 1U; tmp___1 = test_and_clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (tmp___1 != 0) { rval = (int )mb0 & 16383; idx = 0U; goto ldv_65784; ldv_65783: ; if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { *(ram + (unsigned long )(cnt + idx)) = *(dump + (unsigned long )idx); } else { tmp___0 = __fswab32(*(dump + (unsigned long )idx)); *(ram + (unsigned long )(cnt + idx)) = tmp___0; } idx = idx + 1U; ldv_65784: ; if (idx < dwords) { goto ldv_65783; } else { } } else { rval = 258; } cnt = cnt + dwords; addr = addr + dwords; ldv_65787: ; if (cnt < ram_dwords && rval == 0) { goto ldv_65786; } else { } *nxt = rval == 0 ? (void *)ram + (unsigned long )cnt : (void *)0; return (rval); } } int qla24xx_dump_ram(struct qla_hw_data *ha , uint32_t addr , uint32_t *ram , uint32_t ram_dwords , void **nxt ) { int rval ; uint32_t cnt ; uint32_t stat ; uint32_t timer ; uint32_t dwords ; uint32_t idx ; uint16_t mb0 ; struct device_reg_24xx *reg ; dma_addr_t dump_dma ; uint32_t *dump ; int tmp ; __u32 tmp___0 ; int tmp___1 ; { reg = & (ha->iobase)->isp24; dump_dma = ha->gid_list_dma; dump = (uint32_t *)ha->gid_list; rval = 0; mb0 = 0U; writew(12, (void volatile *)(& reg->mailbox0)); clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); tmp = qla2x00_gid_list_size(ha); dwords = (uint32_t )(tmp / 4); cnt = 0U; goto ldv_65813; ldv_65812: ; if (cnt + dwords > ram_dwords) { dwords = ram_dwords - cnt; } else { } writew((int )((unsigned short )addr), (void volatile *)(& reg->mailbox1)); writew((int )((unsigned short )(addr >> 16)), (void volatile *)(& reg->mailbox8)); writew((int )((unsigned short )((unsigned int )dump_dma >> 16)), (void volatile *)(& reg->mailbox2)); writew((int )((unsigned short )dump_dma), (void volatile *)(& reg->mailbox3)); writew((int )((unsigned short )((unsigned int )(dump_dma >> 32ULL) >> 16)), (void volatile *)(& reg->mailbox6)); writew((int )((unsigned short )(dump_dma >> 32ULL)), (void volatile *)(& reg->mailbox7)); writew((int )((unsigned short )(dwords >> 16)), (void volatile *)(& reg->mailbox4)); writew((int )((unsigned short )dwords), (void volatile *)(& reg->mailbox5)); writel(1342177280U, (void volatile *)(& reg->hccr)); ha->flags.mbox_int = 0U; timer = 6000000U; goto ldv_65808; ldv_65807: stat = readl((void const volatile *)(& reg->host_status)); if ((stat & 32768U) != 0U) { stat = stat & 255U; if (((stat == 1U || stat == 2U) || stat == 16U) || stat == 17U) { set_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); mb0 = readw((void const volatile *)(& reg->mailbox0)); writel(2684354560U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); goto ldv_65806; } else { } writel(2684354560U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); } else { } __const_udelay(21475UL); timer = timer - 1U; ldv_65808: ; if (timer != 0U) { goto ldv_65807; } else { } ldv_65806: ha->flags.mbox_int = 1U; tmp___1 = test_and_clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (tmp___1 != 0) { rval = (int )mb0 & 16383; idx = 0U; goto ldv_65810; ldv_65809: ; if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { *(ram + (unsigned long )(cnt + idx)) = *(dump + (unsigned long )idx); } else { tmp___0 = __fswab32(*(dump + (unsigned long )idx)); *(ram + (unsigned long )(cnt + idx)) = tmp___0; } idx = idx + 1U; ldv_65810: ; if (idx < dwords) { goto ldv_65809; } else { } } else { rval = 258; } cnt = cnt + dwords; addr = addr + dwords; ldv_65813: ; if (cnt < ram_dwords && rval == 0) { goto ldv_65812; } else { } *nxt = rval == 0 ? (void *)ram + (unsigned long )cnt : (void *)0; return (rval); } } static int qla24xx_dump_memory(struct qla_hw_data *ha , uint32_t *code_ram , uint32_t cram_size , void **nxt ) { int rval ; { rval = qla24xx_dump_ram(ha, 131072U, code_ram, cram_size / 4U, nxt); if (rval != 0) { return (rval); } else { } set_bit(4L, (unsigned long volatile *)(& ha->fw_dump_cap_flags)); rval = qla24xx_dump_ram(ha, 1048576U, (uint32_t *)*nxt, ha->fw_memory_size - 1048575U, nxt); if (rval == 0) { set_bit(5L, (unsigned long volatile *)(& ha->fw_dump_cap_flags)); } else { } return (rval); } } static uint32_t *qla24xx_read_window(struct device_reg_24xx *reg , uint32_t iobase , uint32_t count , uint32_t *buf ) { uint32_t *dmp_reg ; uint32_t *tmp ; uint32_t *tmp___0 ; unsigned int tmp___1 ; __u32 tmp___2 ; uint32_t tmp___3 ; { writel(iobase, (void volatile *)(& reg->iobase_addr)); dmp_reg = & reg->iobase_window; goto ldv_65830; ldv_65829: tmp = buf; buf = buf + 1; tmp___0 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___1 = readl((void const volatile *)tmp___0); tmp___2 = __fswab32(tmp___1); *tmp = tmp___2; ldv_65830: tmp___3 = count; count = count - 1U; if (tmp___3 != 0U) { goto ldv_65829; } else { } return (buf); } } void qla24xx_pause_risc(struct device_reg_24xx *reg , struct qla_hw_data *ha ) { unsigned int tmp ; { writel(805306368U, (void volatile *)(& reg->hccr)); __const_udelay(429500UL); tmp = readl((void const volatile *)(& reg->host_status)); if ((tmp & 256U) != 0U) { set_bit(0L, (unsigned long volatile *)(& ha->fw_dump_cap_flags)); } else { } return; } } int qla24xx_soft_reset(struct qla_hw_data *ha ) { int rval ; uint32_t cnt ; uint16_t wd ; struct device_reg_24xx *reg ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; unsigned int tmp___2 ; unsigned short tmp___3 ; { rval = 0; reg = & (ha->iobase)->isp24; writel(65584U, (void volatile *)(& reg->ctrl_status)); cnt = 0U; goto ldv_65845; ldv_65844: tmp = readl((void const volatile *)(& reg->ctrl_status)); if ((tmp & 131072U) == 0U) { goto ldv_65843; } else { } __const_udelay(42950UL); cnt = cnt + 1U; ldv_65845: ; if (cnt <= 29999U) { goto ldv_65844; } else { } ldv_65843: tmp___0 = readl((void const volatile *)(& reg->ctrl_status)); if ((tmp___0 & 131072U) == 0U) { set_bit(1L, (unsigned long volatile *)(& ha->fw_dump_cap_flags)); } else { } writel(65585U, (void volatile *)(& reg->ctrl_status)); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & wd); __const_udelay(429500UL); cnt = 0U; goto ldv_65848; ldv_65847: tmp___1 = readl((void const volatile *)(& reg->ctrl_status)); if ((tmp___1 & 1U) == 0U) { goto ldv_65846; } else { } __const_udelay(42950UL); cnt = cnt + 1U; ldv_65848: ; if (cnt <= 29999U) { goto ldv_65847; } else { } ldv_65846: tmp___2 = readl((void const volatile *)(& reg->ctrl_status)); if ((tmp___2 & 1U) == 0U) { set_bit(2L, (unsigned long volatile *)(& ha->fw_dump_cap_flags)); } else { } writel(536870912U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); cnt = 10000U; goto ldv_65850; ldv_65849: ; if (cnt != 0U) { __const_udelay(42950UL); } else { rval = 256; } cnt = cnt - 1U; ldv_65850: tmp___3 = readw((void const volatile *)(& reg->mailbox0)); if ((unsigned int )tmp___3 != 0U && rval == 0) { goto ldv_65849; } else { } if (rval == 0) { set_bit(3L, (unsigned long volatile *)(& ha->fw_dump_cap_flags)); } else { } return (rval); } } static int qla2xxx_dump_ram(struct qla_hw_data *ha , uint32_t addr , uint16_t *ram , uint32_t ram_words , void **nxt ) { int rval ; uint32_t cnt ; uint32_t stat ; uint32_t timer ; uint32_t words ; uint32_t idx ; uint16_t mb0 ; struct device_reg_2xxx *reg ; dma_addr_t dump_dma ; uint16_t *dump ; int tmp ; __u16 tmp___0 ; int tmp___1 ; { reg = & (ha->iobase)->isp; dump_dma = ha->gid_list_dma; dump = (uint16_t *)ha->gid_list; rval = 0; mb0 = 0U; writew(12, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); tmp = qla2x00_gid_list_size(ha); words = (uint32_t )(tmp / 2); cnt = 0U; goto ldv_65876; ldv_65875: ; if (cnt + words > ram_words) { words = ram_words - cnt; } else { } writew((int )((unsigned short )addr), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 1U : (void volatile *)(& reg->u.isp2300.mailbox0) + 1U); writew((int )((unsigned short )(addr >> 16)), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u_end.isp2200.mailbox8) : (void volatile *)(& reg->u.isp2300.mailbox0) + 8U); writew((int )((unsigned short )((unsigned int )dump_dma >> 16)), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 2U : (void volatile *)(& reg->u.isp2300.mailbox0) + 2U); writew((int )((unsigned short )dump_dma), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 3U : (void volatile *)(& reg->u.isp2300.mailbox0) + 3U); writew((int )((unsigned short )((unsigned int )(dump_dma >> 32ULL) >> 16)), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 6U : (void volatile *)(& reg->u.isp2300.mailbox0) + 6U); writew((int )((unsigned short )(dump_dma >> 32ULL)), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 7U : (void volatile *)(& reg->u.isp2300.mailbox0) + 7U); writew((int )((unsigned short )words), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 4U : (void volatile *)(& reg->u.isp2300.mailbox0) + 4U); writew(20480, (void volatile *)(& reg->hccr)); timer = 6000000U; goto ldv_65871; ldv_65870: stat = readl((void const volatile *)(& reg->u.isp2300.host_status)); if ((stat & 32768U) != 0U) { stat = stat & 255U; if (stat == 1U || stat == 2U) { set_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); mb0 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); writew(0, (void volatile *)(& reg->semaphore)); writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); goto ldv_65869; } else if (stat == 16U || stat == 17U) { set_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); mb0 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); goto ldv_65869; } else { } writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); } else { } __const_udelay(21475UL); timer = timer - 1U; ldv_65871: ; if (timer != 0U) { goto ldv_65870; } else { } ldv_65869: tmp___1 = test_and_clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (tmp___1 != 0) { rval = (int )mb0 & 16383; idx = 0U; goto ldv_65873; ldv_65872: tmp___0 = __fswab16((int )*(dump + (unsigned long )idx)); *(ram + (unsigned long )(cnt + idx)) = tmp___0; idx = idx + 1U; ldv_65873: ; if (idx < words) { goto ldv_65872; } else { } } else { rval = 258; } cnt = cnt + words; addr = addr + words; ldv_65876: ; if (cnt < ram_words && rval == 0) { goto ldv_65875; } else { } *nxt = rval == 0 ? (void *)ram + (unsigned long )cnt : (void *)0; return (rval); } } __inline static void qla2xxx_read_window(struct device_reg_2xxx *reg , uint32_t count , uint16_t *buf ) { uint16_t *dmp_reg ; uint16_t *tmp ; uint16_t *tmp___0 ; unsigned short tmp___1 ; __u16 tmp___2 ; uint32_t tmp___3 ; { dmp_reg = & reg->u.isp2300.fb_cmd; goto ldv_65885; ldv_65884: tmp = buf; buf = buf + 1; tmp___0 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___1 = readw((void const volatile *)tmp___0); tmp___2 = __fswab16((int )tmp___1); *tmp = tmp___2; ldv_65885: tmp___3 = count; count = count - 1U; if (tmp___3 != 0U) { goto ldv_65884; } else { } return; } } __inline static void *qla24xx_copy_eft(struct qla_hw_data *ha , void *ptr ) { __u32 tmp ; __u32 tmp___0 ; { if ((unsigned long )ha->eft == (unsigned long )((void *)0)) { return (ptr); } else { } tmp = __fswab32((ha->fw_dump)->eft_size); memcpy(ptr, (void const *)ha->eft, (size_t )tmp); tmp___0 = __fswab32((ha->fw_dump)->eft_size); return (ptr + (unsigned long )tmp___0); } } __inline static void *qla25xx_copy_fce(struct qla_hw_data *ha , void *ptr , uint32_t **last_chain ) { uint32_t cnt ; uint32_t *iter_reg ; struct qla2xxx_fce_chain *fcec ; __u32 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; uint32_t *tmp___3 ; __u32 tmp___4 ; __u32 tmp___5 ; __u32 tmp___6 ; { fcec = (struct qla2xxx_fce_chain *)ptr; if ((unsigned long )ha->fce == (unsigned long )((void *)0)) { return (ptr); } else { } *last_chain = & fcec->type; fcec->type = 4042981247U; tmp = __fswab32(ha->fce_bufs * 1024U + 52U); fcec->chain_size = tmp; tmp___0 = __fswab32(ha->fce_bufs * 1024U); fcec->size = tmp___0; tmp___1 = __fswab32((unsigned int )ha->fce_dma); fcec->addr_l = tmp___1; tmp___2 = __fswab32((unsigned int )(ha->fce_dma >> 32ULL)); fcec->addr_h = tmp___2; iter_reg = (uint32_t *)(& fcec->eregs); cnt = 0U; goto ldv_65900; ldv_65899: tmp___3 = iter_reg; iter_reg = iter_reg + 1; tmp___4 = __fswab32((__u32 )ha->fce_mb[cnt]); *tmp___3 = tmp___4; cnt = cnt + 1U; ldv_65900: ; if (cnt <= 7U) { goto ldv_65899; } else { } tmp___5 = __fswab32(fcec->size); memcpy((void *)iter_reg, (void const *)ha->fce, (size_t )tmp___5); tmp___6 = __fswab32(fcec->size); return ((void *)iter_reg + (unsigned long )tmp___6); } } __inline static void *qla2xxx_copy_atioqueues(struct qla_hw_data *ha , void *ptr , uint32_t **last_chain ) { struct qla2xxx_mqueue_chain *q ; struct qla2xxx_mqueue_header *qh ; uint32_t num_queues ; int que ; struct __anonstruct_aq_521 aq ; struct __anonstruct_aqp_522 *aqp ; __u32 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; { if ((unsigned long )ha->tgt.atio_ring == (unsigned long )((struct atio *)0)) { return (ptr); } else { } num_queues = 1U; aqp = & aq; aqp->length = (int )ha->tgt.atio_q_length; aqp->ring = (void *)ha->tgt.atio_ring; que = 0; goto ldv_65917; ldv_65916: q = (struct qla2xxx_mqueue_chain *)ptr; *last_chain = & q->type; q->type = 4076535679U; tmp = __fswab32((__u32 )((unsigned long )aqp->length) * 64U + 20U); q->chain_size = tmp; ptr = ptr + 8UL; qh = (struct qla2xxx_mqueue_header *)ptr; qh->queue = 50331648U; tmp___0 = __fswab32((__u32 )que); qh->number = tmp___0; tmp___1 = __fswab32((__u32 )((unsigned long )aqp->length) * 64U); qh->size = tmp___1; ptr = ptr + 12UL; memcpy(ptr, (void const *)aqp->ring, (unsigned long )aqp->length * 64UL); ptr = ptr + (unsigned long )aqp->length * 64UL; que = que + 1; ldv_65917: ; if ((uint32_t )que < num_queues) { goto ldv_65916; } else { } return (ptr); } } __inline static void *qla25xx_copy_mqueues(struct qla_hw_data *ha , void *ptr , uint32_t **last_chain ) { struct qla2xxx_mqueue_chain *q ; struct qla2xxx_mqueue_header *qh ; struct req_que *req ; struct rsp_que *rsp ; int que ; __u32 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; { if ((unsigned int )ha->mqenable == 0U) { return (ptr); } else { } que = 1; goto ldv_65931; ldv_65930: req = *(ha->req_q_map + (unsigned long )que); if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { goto ldv_65929; } else { } q = (struct qla2xxx_mqueue_chain *)ptr; *last_chain = & q->type; q->type = 4076535679U; tmp = __fswab32((__u32 )req->length * 64U + 20U); q->chain_size = tmp; ptr = ptr + 8UL; qh = (struct qla2xxx_mqueue_header *)ptr; qh->queue = 16777216U; tmp___0 = __fswab32((__u32 )que); qh->number = tmp___0; tmp___1 = __fswab32((__u32 )req->length * 64U); qh->size = tmp___1; ptr = ptr + 12UL; memcpy(ptr, (void const *)req->ring, (unsigned long )req->length * 64UL); ptr = ptr + (unsigned long )req->length * 64UL; que = que + 1; ldv_65931: ; if ((int )ha->max_req_queues > que) { goto ldv_65930; } else { } ldv_65929: que = 1; goto ldv_65934; ldv_65933: rsp = *(ha->rsp_q_map + (unsigned long )que); if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { goto ldv_65932; } else { } q = (struct qla2xxx_mqueue_chain *)ptr; *last_chain = & q->type; q->type = 4076535679U; tmp___2 = __fswab32((__u32 )rsp->length * 64U + 20U); q->chain_size = tmp___2; ptr = ptr + 8UL; qh = (struct qla2xxx_mqueue_header *)ptr; qh->queue = 33554432U; tmp___3 = __fswab32((__u32 )que); qh->number = tmp___3; tmp___4 = __fswab32((__u32 )rsp->length * 64U); qh->size = tmp___4; ptr = ptr + 12UL; memcpy(ptr, (void const *)rsp->ring, (unsigned long )rsp->length * 64UL); ptr = ptr + (unsigned long )rsp->length * 64UL; que = que + 1; ldv_65934: ; if ((int )ha->max_rsp_queues > que) { goto ldv_65933; } else { } ldv_65932: ; return (ptr); } } __inline static void *qla25xx_copy_mq(struct qla_hw_data *ha , void *ptr , uint32_t **last_chain ) { uint32_t cnt ; uint32_t que_idx ; uint8_t que_cnt ; struct qla2xxx_mq_chain *mq ; device_reg_t *reg ; __u32 tmp ; unsigned int tmp___0 ; __u32 tmp___1 ; unsigned int tmp___2 ; __u32 tmp___3 ; unsigned int tmp___4 ; __u32 tmp___5 ; unsigned int tmp___6 ; __u32 tmp___7 ; { mq = (struct qla2xxx_mq_chain *)ptr; if (((unsigned int )ha->mqenable == 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { return (ptr); } else { } mq = (struct qla2xxx_mq_chain *)ptr; *last_chain = & mq->type; mq->type = 4059758463U; mq->chain_size = 201457664U; que_cnt = (uint8_t )((int )ha->max_req_queues > (int )ha->max_rsp_queues ? ha->max_req_queues : ha->max_rsp_queues); tmp = __fswab32((__u32 )que_cnt); mq->count = tmp; cnt = 0U; goto ldv_65946; ldv_65945: reg = ((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) ? ha->mqiobase + (unsigned long )(cnt * 4096U) : ha->iobase; que_idx = cnt * 4U; tmp___0 = readl((void const volatile *)(& reg->isp25mq.req_q_in)); tmp___1 = __fswab32(tmp___0); mq->qregs[que_idx] = tmp___1; tmp___2 = readl((void const volatile *)(& reg->isp25mq.req_q_out)); tmp___3 = __fswab32(tmp___2); mq->qregs[que_idx + 1U] = tmp___3; tmp___4 = readl((void const volatile *)(& reg->isp25mq.rsp_q_in)); tmp___5 = __fswab32(tmp___4); mq->qregs[que_idx + 2U] = tmp___5; tmp___6 = readl((void const volatile *)(& reg->isp25mq.rsp_q_out)); tmp___7 = __fswab32(tmp___6); mq->qregs[que_idx + 3U] = tmp___7; cnt = cnt + 1U; ldv_65946: ; if ((uint32_t )que_cnt > cnt) { goto ldv_65945; } else { } return (ptr + 524UL); } } void qla2xxx_dump_post_process(scsi_qla_host_t *vha , int rval ) { struct qla_hw_data *ha ; { ha = vha->hw; if (rval != 0) { ql_log(1U, vha, 53248, "Failed to dump firmware (%x), dump status flags (0x%lx).\n", rval, ha->fw_dump_cap_flags); ha->fw_dumped = 0; } else { ql_log(2U, vha, 53249, "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n", vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags); ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, 0U); } return; } } void qla2300_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { int rval ; uint32_t cnt ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; uint16_t *dmp_reg ; unsigned long flags ; struct qla2300_fw_dump *fw ; void *nxt ; struct scsi_qla_host *base_vha ; void *tmp ; raw_spinlock_t *tmp___0 ; unsigned short tmp___1 ; __u16 tmp___2 ; unsigned short tmp___3 ; uint16_t *tmp___4 ; unsigned short tmp___5 ; __u16 tmp___6 ; uint16_t *tmp___7 ; unsigned short tmp___8 ; __u16 tmp___9 ; uint16_t *tmp___10 ; unsigned short tmp___11 ; __u16 tmp___12 ; uint16_t *tmp___13 ; unsigned short tmp___14 ; __u16 tmp___15 ; unsigned short tmp___16 ; unsigned short tmp___17 ; { ha = vha->hw; reg = & (ha->iobase)->isp; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; flags = 0UL; if (hardware_locked == 0) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 53250, "No buffer available for dump.\n"); goto qla2300_fw_dump_failed; } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 53251, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto qla2300_fw_dump_failed; } else { } fw = & (ha->fw_dump)->isp.isp23; qla2xxx_prep_dump(ha, ha->fw_dump); rval = 0; tmp___1 = readw((void const volatile *)(& reg->hccr)); tmp___2 = __fswab16((int )tmp___1); fw->hccr = tmp___2; writew(8192, (void volatile *)(& reg->hccr)); if ((ha->device_type & 4U) != 0U) { cnt = 30000U; goto ldv_65971; ldv_65970: ; if (cnt != 0U) { __const_udelay(429500UL); } else { rval = 256; } cnt = cnt - 1U; ldv_65971: tmp___3 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___3 & 32) == 0 && rval == 0) { goto ldv_65970; } else { } } else { readw((void const volatile *)(& reg->hccr)); __const_udelay(42950UL); } if (rval == 0) { dmp_reg = & reg->flash_address; cnt = 0U; goto ldv_65974; ldv_65973: tmp___4 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___5 = readw((void const volatile *)tmp___4); tmp___6 = __fswab16((int )tmp___5); fw->pbiu_reg[cnt] = tmp___6; cnt = cnt + 1U; ldv_65974: ; if (cnt <= 7U) { goto ldv_65973; } else { } dmp_reg = & reg->u.isp2300.req_q_in; cnt = 0U; goto ldv_65977; ldv_65976: tmp___7 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___8 = readw((void const volatile *)tmp___7); tmp___9 = __fswab16((int )tmp___8); fw->risc_host_reg[cnt] = tmp___9; cnt = cnt + 1U; ldv_65977: ; if (cnt <= 7U) { goto ldv_65976; } else { } dmp_reg = & reg->u.isp2300.mailbox0; cnt = 0U; goto ldv_65980; ldv_65979: tmp___10 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___11 = readw((void const volatile *)tmp___10); tmp___12 = __fswab16((int )tmp___11); fw->mailbox_reg[cnt] = tmp___12; cnt = cnt + 1U; ldv_65980: ; if (cnt <= 31U) { goto ldv_65979; } else { } writew(64, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 32U, (uint16_t *)(& fw->resp_dma_reg)); writew(80, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 48U, (uint16_t *)(& fw->dma_reg)); writew(0, (void volatile *)(& reg->ctrl_status)); dmp_reg = & reg->risc_hw; cnt = 0U; goto ldv_65983; ldv_65982: tmp___13 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___14 = readw((void const volatile *)tmp___13); tmp___15 = __fswab16((int )tmp___14); fw->risc_hdw_reg[cnt] = tmp___15; cnt = cnt + 1U; ldv_65983: ; if (cnt <= 15U) { goto ldv_65982; } else { } writew(8192, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp0_reg)); writew(8704, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp1_reg)); writew(9216, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp2_reg)); writew(9728, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp3_reg)); writew(10240, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp4_reg)); writew(10752, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp5_reg)); writew(11264, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp6_reg)); writew(11776, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp7_reg)); writew(16, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 64U, (uint16_t *)(& fw->frame_buf_hdw_reg)); writew(32, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 64U, (uint16_t *)(& fw->fpm_b0_reg)); writew(48, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 64U, (uint16_t *)(& fw->fpm_b1_reg)); writew(1, (void volatile *)(& reg->ctrl_status)); cnt = 0U; goto ldv_65987; ldv_65986: tmp___16 = readw((void const volatile *)(& reg->ctrl_status)); if (((int )tmp___16 & 1) == 0) { goto ldv_65985; } else { } __const_udelay(42950UL); cnt = cnt + 1U; ldv_65987: ; if (cnt <= 29999U) { goto ldv_65986; } else { } ldv_65985: ; } else { } if ((ha->device_type & 4U) == 0U) { cnt = 30000U; goto ldv_65989; ldv_65988: ; if (cnt != 0U) { __const_udelay(429500UL); } else { rval = 256; } cnt = cnt - 1U; ldv_65989: tmp___17 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); if ((unsigned int )tmp___17 != 0U && rval == 0) { goto ldv_65988; } else { } } else { } if (rval == 0) { rval = qla2xxx_dump_ram(ha, 2048U, (uint16_t *)(& fw->risc_ram), 63488U, & nxt); } else { } if (rval == 0) { rval = qla2xxx_dump_ram(ha, 65536U, (uint16_t *)(& fw->stack_ram), 4096U, & nxt); } else { } if (rval == 0) { rval = qla2xxx_dump_ram(ha, 69632U, (uint16_t *)(& fw->data_ram), ha->fw_memory_size - 69631U, & nxt); } else { } if (rval == 0) { qla2xxx_copy_queues(ha, nxt); } else { } qla2xxx_dump_post_process(base_vha, rval); qla2300_fw_dump_failed: ; if (hardware_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } return; } } void qla2100_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { int rval ; uint32_t cnt ; uint32_t timer ; uint16_t risc_address ; uint16_t mb0 ; uint16_t mb2 ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; uint16_t *dmp_reg ; unsigned long flags ; struct qla2100_fw_dump *fw ; struct scsi_qla_host *base_vha ; void *tmp ; raw_spinlock_t *tmp___0 ; unsigned short tmp___1 ; __u16 tmp___2 ; unsigned short tmp___3 ; uint16_t *tmp___4 ; unsigned short tmp___5 ; __u16 tmp___6 ; uint16_t *tmp___7 ; unsigned short tmp___8 ; __u16 tmp___9 ; uint16_t *tmp___10 ; unsigned short tmp___11 ; __u16 tmp___12 ; uint16_t *tmp___13 ; unsigned short tmp___14 ; __u16 tmp___15 ; unsigned short tmp___16 ; unsigned short tmp___17 ; unsigned short tmp___18 ; unsigned short tmp___19 ; unsigned short tmp___20 ; __u16 tmp___21 ; int tmp___22 ; { ha = vha->hw; reg = & (ha->iobase)->isp; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; risc_address = 0U; mb2 = 0U; mb0 = mb2; flags = 0UL; if (hardware_locked == 0) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 53252, "No buffer available for dump.\n"); goto qla2100_fw_dump_failed; } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 53253, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto qla2100_fw_dump_failed; } else { } fw = & (ha->fw_dump)->isp.isp21; qla2xxx_prep_dump(ha, ha->fw_dump); rval = 0; tmp___1 = readw((void const volatile *)(& reg->hccr)); tmp___2 = __fswab16((int )tmp___1); fw->hccr = tmp___2; writew(8192, (void volatile *)(& reg->hccr)); cnt = 30000U; goto ldv_66012; ldv_66011: ; if (cnt != 0U) { __const_udelay(429500UL); } else { rval = 256; } cnt = cnt - 1U; ldv_66012: tmp___3 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___3 & 32) == 0 && rval == 0) { goto ldv_66011; } else { } if (rval == 0) { dmp_reg = & reg->flash_address; cnt = 0U; goto ldv_66015; ldv_66014: tmp___4 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___5 = readw((void const volatile *)tmp___4); tmp___6 = __fswab16((int )tmp___5); fw->pbiu_reg[cnt] = tmp___6; cnt = cnt + 1U; ldv_66015: ; if (cnt <= 7U) { goto ldv_66014; } else { } dmp_reg = & reg->u.isp2100.mailbox0; cnt = 0U; goto ldv_66018; ldv_66017: ; if (cnt == 8U) { dmp_reg = & reg->u_end.isp2200.mailbox8; } else { } tmp___7 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___8 = readw((void const volatile *)tmp___7); tmp___9 = __fswab16((int )tmp___8); fw->mailbox_reg[cnt] = tmp___9; cnt = cnt + 1U; ldv_66018: ; if ((uint32_t )ha->mbx_count > cnt) { goto ldv_66017; } else { } dmp_reg = (uint16_t *)(& reg->u.isp2100.unused_2); cnt = 0U; goto ldv_66021; ldv_66020: tmp___10 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___11 = readw((void const volatile *)tmp___10); tmp___12 = __fswab16((int )tmp___11); fw->dma_reg[cnt] = tmp___12; cnt = cnt + 1U; ldv_66021: ; if (cnt <= 47U) { goto ldv_66020; } else { } writew(0, (void volatile *)(& reg->ctrl_status)); dmp_reg = & reg->risc_hw; cnt = 0U; goto ldv_66024; ldv_66023: tmp___13 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___14 = readw((void const volatile *)tmp___13); tmp___15 = __fswab16((int )tmp___14); fw->risc_hdw_reg[cnt] = tmp___15; cnt = cnt + 1U; ldv_66024: ; if (cnt <= 15U) { goto ldv_66023; } else { } writew(8192, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp0_reg)); writew(8448, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp1_reg)); writew(8704, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp2_reg)); writew(8960, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp3_reg)); writew(9216, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp4_reg)); writew(9472, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp5_reg)); writew(9728, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp6_reg)); writew(9984, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp7_reg)); writew(16, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->frame_buf_hdw_reg)); writew(32, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 64U, (uint16_t *)(& fw->fpm_b0_reg)); writew(48, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 64U, (uint16_t *)(& fw->fpm_b1_reg)); writew(1, (void volatile *)(& reg->ctrl_status)); } else { } cnt = 30000U; goto ldv_66027; ldv_66026: ; if (cnt != 0U) { __const_udelay(429500UL); } else { rval = 256; } cnt = cnt - 1U; ldv_66027: tmp___16 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); if ((unsigned int )tmp___16 != 0U && rval == 0) { goto ldv_66026; } else { } if (rval == 0) { if ((ha->device_type & 2U) != 0U) { goto _L; } else if ((int )ha->device_type & 1) { tmp___18 = readw((void const volatile *)(& reg->mctr)); if (((int )tmp___18 & 3) != 0) { _L: /* CIL Label */ writew(8192, (void volatile *)(& reg->hccr)); cnt = 30000U; goto ldv_66030; ldv_66029: ; if (cnt != 0U) { __const_udelay(429500UL); } else { rval = 256; } cnt = cnt - 1U; ldv_66030: tmp___17 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___17 & 32) == 0 && rval == 0) { goto ldv_66029; } else { } if (rval == 0) { if ((int )ha->device_type & 1) { writew(241, (void volatile *)(& reg->mctr)); } else { writew(242, (void volatile *)(& reg->mctr)); } readw((void const volatile *)(& reg->mctr)); writew(12288, (void volatile *)(& reg->hccr)); } else { } } else { } } else { } } else { } if (rval == 0) { risc_address = 4096U; writew(5, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); } else { } cnt = 0U; goto ldv_66036; ldv_66035: writew((int )risc_address, (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 1U : (void volatile *)(& reg->u.isp2300.mailbox0) + 1U); writew(20480, (void volatile *)(& reg->hccr)); timer = 6000000U; goto ldv_66034; ldv_66033: tmp___20 = readw((void const volatile *)(& reg->istatus)); if (((int )tmp___20 & 8) != 0) { tmp___19 = readw((void const volatile *)(& reg->semaphore)); if ((int )tmp___19 & 1) { set_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); mb0 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); mb2 = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 2U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 2U); writew(0, (void volatile *)(& reg->semaphore)); writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); goto ldv_66032; } else { } writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); } else { } __const_udelay(21475UL); timer = timer - 1U; ldv_66034: ; if (timer != 0U) { goto ldv_66033; } else { } ldv_66032: tmp___22 = test_and_clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (tmp___22 != 0) { rval = (int )mb0 & 16383; tmp___21 = __fswab16((int )mb2); fw->risc_ram[cnt] = tmp___21; } else { rval = 258; } cnt = cnt + 1U; risc_address = (uint16_t )((int )risc_address + 1); ldv_66036: ; if (cnt <= 61439U && rval == 0) { goto ldv_66035; } else { } if (rval == 0) { qla2xxx_copy_queues(ha, (void *)(& fw->risc_ram) + (unsigned long )cnt); } else { } qla2xxx_dump_post_process(base_vha, rval); qla2100_fw_dump_failed: ; if (hardware_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } return; } } void qla24xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { int rval ; uint32_t cnt ; uint32_t risc_address ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; uint32_t *dmp_reg ; uint32_t *iter_reg ; uint16_t *mbx_reg ; unsigned long flags ; struct qla24xx_fw_dump *fw ; uint32_t ext_mem_cnt ; void *nxt ; void *nxt_chain ; uint32_t *last_chain ; struct scsi_qla_host *base_vha ; void *tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; __u32 tmp___2 ; uint32_t *tmp___3 ; unsigned int tmp___4 ; __u32 tmp___5 ; unsigned int tmp___6 ; __u32 tmp___7 ; unsigned int tmp___8 ; __u32 tmp___9 ; unsigned int tmp___10 ; __u32 tmp___11 ; unsigned int tmp___12 ; __u32 tmp___13 ; unsigned int tmp___14 ; __u32 tmp___15 ; unsigned int tmp___16 ; __u32 tmp___17 ; unsigned int tmp___18 ; __u32 tmp___19 ; uint16_t *tmp___20 ; unsigned short tmp___21 ; __u16 tmp___22 ; uint32_t *tmp___23 ; uint32_t *tmp___24 ; unsigned int tmp___25 ; __u32 tmp___26 ; uint32_t *tmp___27 ; uint32_t *tmp___28 ; unsigned int tmp___29 ; __u32 tmp___30 ; uint32_t *tmp___31 ; uint32_t *tmp___32 ; unsigned int tmp___33 ; __u32 tmp___34 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; last_chain = (uint32_t *)0U; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return; } else { } ext_mem_cnt = 0U; risc_address = ext_mem_cnt; flags = 0UL; ha->fw_dump_cap_flags = 0UL; if (hardware_locked == 0) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 53254, "No buffer available for dump.\n"); goto qla24xx_fw_dump_failed; } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 53255, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto qla24xx_fw_dump_failed; } else { } fw = & (ha->fw_dump)->isp.isp24; qla2xxx_prep_dump(ha, ha->fw_dump); tmp___1 = readl((void const volatile *)(& reg->host_status)); tmp___2 = __fswab32(tmp___1); fw->host_status = tmp___2; qla24xx_pause_risc(reg, ha); dmp_reg = & reg->flash_addr; cnt = 0U; goto ldv_66062; ldv_66061: tmp___3 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___4 = readl((void const volatile *)tmp___3); tmp___5 = __fswab32(tmp___4); fw->host_reg[cnt] = tmp___5; cnt = cnt + 1U; ldv_66062: ; if (cnt <= 31U) { goto ldv_66061; } else { } writel(0U, (void volatile *)(& reg->ictrl)); readl((void const volatile *)(& reg->ictrl)); writel(3952U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(2952790016U, (void volatile *)(& reg->iobase_select)); tmp___6 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___7 = __fswab32(tmp___6); fw->shadow_reg[0] = tmp___7; writel(2953838592U, (void volatile *)(& reg->iobase_select)); tmp___8 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___9 = __fswab32(tmp___8); fw->shadow_reg[1] = tmp___9; writel(2954887168U, (void volatile *)(& reg->iobase_select)); tmp___10 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___11 = __fswab32(tmp___10); fw->shadow_reg[2] = tmp___11; writel(2955935744U, (void volatile *)(& reg->iobase_select)); tmp___12 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___13 = __fswab32(tmp___12); fw->shadow_reg[3] = tmp___13; writel(2956984320U, (void volatile *)(& reg->iobase_select)); tmp___14 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___15 = __fswab32(tmp___14); fw->shadow_reg[4] = tmp___15; writel(2958032896U, (void volatile *)(& reg->iobase_select)); tmp___16 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___17 = __fswab32(tmp___16); fw->shadow_reg[5] = tmp___17; writel(2959081472U, (void volatile *)(& reg->iobase_select)); tmp___18 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___19 = __fswab32(tmp___18); fw->shadow_reg[6] = tmp___19; mbx_reg = & reg->mailbox0; cnt = 0U; goto ldv_66065; ldv_66064: tmp___20 = mbx_reg; mbx_reg = mbx_reg + 1; tmp___21 = readw((void const volatile *)tmp___20); tmp___22 = __fswab16((int )tmp___21); fw->mailbox_reg[cnt] = tmp___22; cnt = cnt + 1U; ldv_66065: ; if (cnt <= 31U) { goto ldv_66064; } else { } iter_reg = (uint32_t *)(& fw->xseq_gp_reg); iter_reg = qla24xx_read_window(reg, 48896U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48928U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48960U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48992U, 16U, iter_reg); qla24xx_read_window(reg, 49008U, 16U, iter_reg); qla24xx_read_window(reg, 49120U, 16U, (uint32_t *)(& fw->xseq_0_reg)); qla24xx_read_window(reg, 49136U, 16U, (uint32_t *)(& fw->xseq_1_reg)); iter_reg = (uint32_t *)(& fw->rseq_gp_reg); iter_reg = qla24xx_read_window(reg, 65280U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65296U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65312U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65328U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65344U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65360U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65376U, 16U, iter_reg); qla24xx_read_window(reg, 65392U, 16U, iter_reg); qla24xx_read_window(reg, 65488U, 16U, (uint32_t *)(& fw->rseq_0_reg)); qla24xx_read_window(reg, 65504U, 16U, (uint32_t *)(& fw->rseq_1_reg)); qla24xx_read_window(reg, 65520U, 16U, (uint32_t *)(& fw->rseq_2_reg)); qla24xx_read_window(reg, 28928U, 16U, (uint32_t *)(& fw->cmd_dma_reg)); iter_reg = (uint32_t *)(& fw->req0_dma_reg); iter_reg = qla24xx_read_window(reg, 29184U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_66068; ldv_66067: tmp___23 = iter_reg; iter_reg = iter_reg + 1; tmp___24 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___25 = readl((void const volatile *)tmp___24); tmp___26 = __fswab32(tmp___25); *tmp___23 = tmp___26; cnt = cnt + 1U; ldv_66068: ; if (cnt <= 6U) { goto ldv_66067; } else { } iter_reg = (uint32_t *)(& fw->resp0_dma_reg); iter_reg = qla24xx_read_window(reg, 29440U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_66071; ldv_66070: tmp___27 = iter_reg; iter_reg = iter_reg + 1; tmp___28 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___29 = readl((void const volatile *)tmp___28); tmp___30 = __fswab32(tmp___29); *tmp___27 = tmp___30; cnt = cnt + 1U; ldv_66071: ; if (cnt <= 6U) { goto ldv_66070; } else { } iter_reg = (uint32_t *)(& fw->req1_dma_reg); iter_reg = qla24xx_read_window(reg, 29696U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_66074; ldv_66073: tmp___31 = iter_reg; iter_reg = iter_reg + 1; tmp___32 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___33 = readl((void const volatile *)tmp___32); tmp___34 = __fswab32(tmp___33); *tmp___31 = tmp___34; cnt = cnt + 1U; ldv_66074: ; if (cnt <= 6U) { goto ldv_66073; } else { } iter_reg = (uint32_t *)(& fw->xmt0_dma_reg); iter_reg = qla24xx_read_window(reg, 30208U, 16U, iter_reg); qla24xx_read_window(reg, 30224U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt1_dma_reg); iter_reg = qla24xx_read_window(reg, 30240U, 16U, iter_reg); qla24xx_read_window(reg, 30256U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt2_dma_reg); iter_reg = qla24xx_read_window(reg, 30272U, 16U, iter_reg); qla24xx_read_window(reg, 30288U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt3_dma_reg); iter_reg = qla24xx_read_window(reg, 30304U, 16U, iter_reg); qla24xx_read_window(reg, 30320U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt4_dma_reg); iter_reg = qla24xx_read_window(reg, 30336U, 16U, iter_reg); qla24xx_read_window(reg, 30352U, 16U, iter_reg); qla24xx_read_window(reg, 30368U, 16U, (uint32_t *)(& fw->xmt_data_dma_reg)); iter_reg = (uint32_t *)(& fw->rcvt0_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30464U, 16U, iter_reg); qla24xx_read_window(reg, 30480U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rcvt1_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30496U, 16U, iter_reg); qla24xx_read_window(reg, 30512U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->risc_gp_reg); iter_reg = qla24xx_read_window(reg, 3840U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3856U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3872U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3888U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3904U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3920U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3936U, 16U, iter_reg); qla24xx_read_window(reg, 3952U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->lmc_reg); iter_reg = qla24xx_read_window(reg, 12288U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12304U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12320U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12336U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12352U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12368U, 16U, iter_reg); qla24xx_read_window(reg, 12384U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fpm_hdw_reg); iter_reg = qla24xx_read_window(reg, 16384U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16400U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16416U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16432U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16448U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16464U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16480U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16496U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16512U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16528U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16544U, 16U, iter_reg); qla24xx_read_window(reg, 16560U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fb_hdw_reg); iter_reg = qla24xx_read_window(reg, 24576U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24592U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24608U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24624U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24640U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24832U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24880U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24976U, 16U, iter_reg); qla24xx_read_window(reg, 25008U, 16U, iter_reg); rval = qla24xx_soft_reset(ha); if (rval != 0) { goto qla24xx_fw_dump_failed_0; } else { } rval = qla24xx_dump_memory(ha, (uint32_t *)(& fw->code_ram), 32768U, & nxt); if (rval != 0) { goto qla24xx_fw_dump_failed_0; } else { } nxt = qla2xxx_copy_queues(ha, nxt); qla24xx_copy_eft(ha, nxt); nxt_chain = (void *)ha->fw_dump + (unsigned long )ha->chain_offset; nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, & last_chain); if ((unsigned long )last_chain != (unsigned long )((uint32_t *)0U)) { (ha->fw_dump)->version = (ha->fw_dump)->version | 128U; *last_chain = *last_chain | 128U; } else { } ha->fw_dump_len = (uint32_t )((long )nxt_chain) - (uint32_t )((long )ha->fw_dump); qla24xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla24xx_fw_dump_failed: ; if (hardware_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } return; } } void qla25xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { int rval ; uint32_t cnt ; uint32_t risc_address ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; uint32_t *dmp_reg ; uint32_t *iter_reg ; uint16_t *mbx_reg ; unsigned long flags ; struct qla25xx_fw_dump *fw ; uint32_t ext_mem_cnt ; void *nxt ; void *nxt_chain ; uint32_t *last_chain ; struct scsi_qla_host *base_vha ; void *tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; __u32 tmp___2 ; uint32_t *tmp___3 ; unsigned int tmp___4 ; __u32 tmp___5 ; uint32_t *tmp___6 ; unsigned int tmp___7 ; __u32 tmp___8 ; unsigned int tmp___9 ; __u32 tmp___10 ; unsigned int tmp___11 ; __u32 tmp___12 ; uint32_t *tmp___13 ; unsigned int tmp___14 ; __u32 tmp___15 ; unsigned int tmp___16 ; __u32 tmp___17 ; unsigned int tmp___18 ; __u32 tmp___19 ; unsigned int tmp___20 ; __u32 tmp___21 ; unsigned int tmp___22 ; __u32 tmp___23 ; unsigned int tmp___24 ; __u32 tmp___25 ; unsigned int tmp___26 ; __u32 tmp___27 ; unsigned int tmp___28 ; __u32 tmp___29 ; unsigned int tmp___30 ; __u32 tmp___31 ; unsigned int tmp___32 ; __u32 tmp___33 ; unsigned int tmp___34 ; __u32 tmp___35 ; unsigned int tmp___36 ; __u32 tmp___37 ; unsigned int tmp___38 ; __u32 tmp___39 ; uint16_t *tmp___40 ; unsigned short tmp___41 ; __u16 tmp___42 ; uint32_t *tmp___43 ; uint32_t *tmp___44 ; unsigned int tmp___45 ; __u32 tmp___46 ; uint32_t *tmp___47 ; uint32_t *tmp___48 ; unsigned int tmp___49 ; __u32 tmp___50 ; uint32_t *tmp___51 ; uint32_t *tmp___52 ; unsigned int tmp___53 ; __u32 tmp___54 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; last_chain = (uint32_t *)0U; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; ext_mem_cnt = 0U; risc_address = ext_mem_cnt; flags = 0UL; ha->fw_dump_cap_flags = 0UL; if (hardware_locked == 0) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 53256, "No buffer available for dump.\n"); goto qla25xx_fw_dump_failed; } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 53257, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto qla25xx_fw_dump_failed; } else { } fw = & (ha->fw_dump)->isp.isp25; qla2xxx_prep_dump(ha, ha->fw_dump); (ha->fw_dump)->version = 33554432U; tmp___1 = readl((void const volatile *)(& reg->host_status)); tmp___2 = __fswab32(tmp___1); fw->host_status = tmp___2; qla24xx_pause_risc(reg, ha); iter_reg = (uint32_t *)(& fw->host_risc_reg); iter_reg = qla24xx_read_window(reg, 28672U, 16U, iter_reg); qla24xx_read_window(reg, 28688U, 16U, iter_reg); writel(31744U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(1U, (void volatile *)(& reg->iobase_window)); dmp_reg = & reg->iobase_c4; tmp___3 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___4 = readl((void const volatile *)tmp___3); tmp___5 = __fswab32(tmp___4); fw->pcie_regs[0] = tmp___5; tmp___6 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___7 = readl((void const volatile *)tmp___6); tmp___8 = __fswab32(tmp___7); fw->pcie_regs[1] = tmp___8; tmp___9 = readl((void const volatile *)dmp_reg); tmp___10 = __fswab32(tmp___9); fw->pcie_regs[2] = tmp___10; tmp___11 = readl((void const volatile *)(& reg->iobase_window)); tmp___12 = __fswab32(tmp___11); fw->pcie_regs[3] = tmp___12; writel(0U, (void volatile *)(& reg->iobase_window)); readl((void const volatile *)(& reg->iobase_window)); dmp_reg = & reg->flash_addr; cnt = 0U; goto ldv_66101; ldv_66100: tmp___13 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___14 = readl((void const volatile *)tmp___13); tmp___15 = __fswab32(tmp___14); fw->host_reg[cnt] = tmp___15; cnt = cnt + 1U; ldv_66101: ; if (cnt <= 31U) { goto ldv_66100; } else { } writel(0U, (void volatile *)(& reg->ictrl)); readl((void const volatile *)(& reg->ictrl)); writel(3952U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(2952790016U, (void volatile *)(& reg->iobase_select)); tmp___16 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___17 = __fswab32(tmp___16); fw->shadow_reg[0] = tmp___17; writel(2953838592U, (void volatile *)(& reg->iobase_select)); tmp___18 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___19 = __fswab32(tmp___18); fw->shadow_reg[1] = tmp___19; writel(2954887168U, (void volatile *)(& reg->iobase_select)); tmp___20 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___21 = __fswab32(tmp___20); fw->shadow_reg[2] = tmp___21; writel(2955935744U, (void volatile *)(& reg->iobase_select)); tmp___22 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___23 = __fswab32(tmp___22); fw->shadow_reg[3] = tmp___23; writel(2956984320U, (void volatile *)(& reg->iobase_select)); tmp___24 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___25 = __fswab32(tmp___24); fw->shadow_reg[4] = tmp___25; writel(2958032896U, (void volatile *)(& reg->iobase_select)); tmp___26 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___27 = __fswab32(tmp___26); fw->shadow_reg[5] = tmp___27; writel(2959081472U, (void volatile *)(& reg->iobase_select)); tmp___28 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___29 = __fswab32(tmp___28); fw->shadow_reg[6] = tmp___29; writel(2960130048U, (void volatile *)(& reg->iobase_select)); tmp___30 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___31 = __fswab32(tmp___30); fw->shadow_reg[7] = tmp___31; writel(2961178624U, (void volatile *)(& reg->iobase_select)); tmp___32 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___33 = __fswab32(tmp___32); fw->shadow_reg[8] = tmp___33; writel(2962227200U, (void volatile *)(& reg->iobase_select)); tmp___34 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___35 = __fswab32(tmp___34); fw->shadow_reg[9] = tmp___35; writel(2963275776U, (void volatile *)(& reg->iobase_select)); tmp___36 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___37 = __fswab32(tmp___36); fw->shadow_reg[10] = tmp___37; writel(16U, (void volatile *)(& reg->iobase_addr)); tmp___38 = readl((void const volatile *)(& reg->iobase_window)); tmp___39 = __fswab32(tmp___38); fw->risc_io_reg = tmp___39; mbx_reg = & reg->mailbox0; cnt = 0U; goto ldv_66104; ldv_66103: tmp___40 = mbx_reg; mbx_reg = mbx_reg + 1; tmp___41 = readw((void const volatile *)tmp___40); tmp___42 = __fswab16((int )tmp___41); fw->mailbox_reg[cnt] = tmp___42; cnt = cnt + 1U; ldv_66104: ; if (cnt <= 31U) { goto ldv_66103; } else { } iter_reg = (uint32_t *)(& fw->xseq_gp_reg); iter_reg = qla24xx_read_window(reg, 48896U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48928U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48960U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48992U, 16U, iter_reg); qla24xx_read_window(reg, 49008U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xseq_0_reg); iter_reg = qla24xx_read_window(reg, 49088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 49104U, 16U, iter_reg); qla24xx_read_window(reg, 49120U, 16U, iter_reg); qla24xx_read_window(reg, 49136U, 16U, (uint32_t *)(& fw->xseq_1_reg)); iter_reg = (uint32_t *)(& fw->rseq_gp_reg); iter_reg = qla24xx_read_window(reg, 65280U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65296U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65312U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65328U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65344U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65360U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65376U, 16U, iter_reg); qla24xx_read_window(reg, 65392U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rseq_0_reg); iter_reg = qla24xx_read_window(reg, 65472U, 16U, iter_reg); qla24xx_read_window(reg, 65488U, 16U, iter_reg); qla24xx_read_window(reg, 65504U, 16U, (uint32_t *)(& fw->rseq_1_reg)); qla24xx_read_window(reg, 65520U, 16U, (uint32_t *)(& fw->rseq_2_reg)); iter_reg = (uint32_t *)(& fw->aseq_gp_reg); iter_reg = qla24xx_read_window(reg, 45056U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45072U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45104U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45120U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45136U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45152U, 16U, iter_reg); qla24xx_read_window(reg, 45168U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->aseq_0_reg); iter_reg = qla24xx_read_window(reg, 45248U, 16U, iter_reg); qla24xx_read_window(reg, 45264U, 16U, iter_reg); qla24xx_read_window(reg, 45280U, 16U, (uint32_t *)(& fw->aseq_1_reg)); qla24xx_read_window(reg, 45296U, 16U, (uint32_t *)(& fw->aseq_2_reg)); qla24xx_read_window(reg, 28928U, 16U, (uint32_t *)(& fw->cmd_dma_reg)); iter_reg = (uint32_t *)(& fw->req0_dma_reg); iter_reg = qla24xx_read_window(reg, 29184U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_66107; ldv_66106: tmp___43 = iter_reg; iter_reg = iter_reg + 1; tmp___44 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___45 = readl((void const volatile *)tmp___44); tmp___46 = __fswab32(tmp___45); *tmp___43 = tmp___46; cnt = cnt + 1U; ldv_66107: ; if (cnt <= 6U) { goto ldv_66106; } else { } iter_reg = (uint32_t *)(& fw->resp0_dma_reg); iter_reg = qla24xx_read_window(reg, 29440U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_66110; ldv_66109: tmp___47 = iter_reg; iter_reg = iter_reg + 1; tmp___48 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___49 = readl((void const volatile *)tmp___48); tmp___50 = __fswab32(tmp___49); *tmp___47 = tmp___50; cnt = cnt + 1U; ldv_66110: ; if (cnt <= 6U) { goto ldv_66109; } else { } iter_reg = (uint32_t *)(& fw->req1_dma_reg); iter_reg = qla24xx_read_window(reg, 29696U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_66113; ldv_66112: tmp___51 = iter_reg; iter_reg = iter_reg + 1; tmp___52 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___53 = readl((void const volatile *)tmp___52); tmp___54 = __fswab32(tmp___53); *tmp___51 = tmp___54; cnt = cnt + 1U; ldv_66113: ; if (cnt <= 6U) { goto ldv_66112; } else { } iter_reg = (uint32_t *)(& fw->xmt0_dma_reg); iter_reg = qla24xx_read_window(reg, 30208U, 16U, iter_reg); qla24xx_read_window(reg, 30224U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt1_dma_reg); iter_reg = qla24xx_read_window(reg, 30240U, 16U, iter_reg); qla24xx_read_window(reg, 30256U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt2_dma_reg); iter_reg = qla24xx_read_window(reg, 30272U, 16U, iter_reg); qla24xx_read_window(reg, 30288U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt3_dma_reg); iter_reg = qla24xx_read_window(reg, 30304U, 16U, iter_reg); qla24xx_read_window(reg, 30320U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt4_dma_reg); iter_reg = qla24xx_read_window(reg, 30336U, 16U, iter_reg); qla24xx_read_window(reg, 30352U, 16U, iter_reg); qla24xx_read_window(reg, 30368U, 16U, (uint32_t *)(& fw->xmt_data_dma_reg)); iter_reg = (uint32_t *)(& fw->rcvt0_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30464U, 16U, iter_reg); qla24xx_read_window(reg, 30480U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rcvt1_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30496U, 16U, iter_reg); qla24xx_read_window(reg, 30512U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->risc_gp_reg); iter_reg = qla24xx_read_window(reg, 3840U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3856U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3872U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3888U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3904U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3920U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3936U, 16U, iter_reg); qla24xx_read_window(reg, 3952U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->lmc_reg); iter_reg = qla24xx_read_window(reg, 12288U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12304U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12320U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12336U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12352U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12368U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12384U, 16U, iter_reg); qla24xx_read_window(reg, 12400U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fpm_hdw_reg); iter_reg = qla24xx_read_window(reg, 16384U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16400U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16416U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16432U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16448U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16464U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16480U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16496U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16512U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16528U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16544U, 16U, iter_reg); qla24xx_read_window(reg, 16560U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fb_hdw_reg); iter_reg = qla24xx_read_window(reg, 24576U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24592U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24608U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24624U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24640U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24832U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24880U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25008U, 16U, iter_reg); qla24xx_read_window(reg, 28416U, 16U, iter_reg); nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + (unsigned long )ha->chain_offset, & last_chain); rval = qla24xx_soft_reset(ha); if (rval != 0) { goto qla25xx_fw_dump_failed_0; } else { } rval = qla24xx_dump_memory(ha, (uint32_t *)(& fw->code_ram), 32768U, & nxt); if (rval != 0) { goto qla25xx_fw_dump_failed_0; } else { } nxt = qla2xxx_copy_queues(ha, nxt); qla24xx_copy_eft(ha, nxt); nxt_chain = qla25xx_copy_fce(ha, nxt_chain, & last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, & last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, & last_chain); if ((unsigned long )last_chain != (unsigned long )((uint32_t *)0U)) { (ha->fw_dump)->version = (ha->fw_dump)->version | 128U; *last_chain = *last_chain | 128U; } else { } ha->fw_dump_len = (uint32_t )((long )nxt_chain) - (uint32_t )((long )ha->fw_dump); qla25xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla25xx_fw_dump_failed: ; if (hardware_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } return; } } void qla81xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { int rval ; uint32_t cnt ; uint32_t risc_address ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; uint32_t *dmp_reg ; uint32_t *iter_reg ; uint16_t *mbx_reg ; unsigned long flags ; struct qla81xx_fw_dump *fw ; uint32_t ext_mem_cnt ; void *nxt ; void *nxt_chain ; uint32_t *last_chain ; struct scsi_qla_host *base_vha ; void *tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; __u32 tmp___2 ; uint32_t *tmp___3 ; unsigned int tmp___4 ; __u32 tmp___5 ; uint32_t *tmp___6 ; unsigned int tmp___7 ; __u32 tmp___8 ; unsigned int tmp___9 ; __u32 tmp___10 ; unsigned int tmp___11 ; __u32 tmp___12 ; uint32_t *tmp___13 ; unsigned int tmp___14 ; __u32 tmp___15 ; unsigned int tmp___16 ; __u32 tmp___17 ; unsigned int tmp___18 ; __u32 tmp___19 ; unsigned int tmp___20 ; __u32 tmp___21 ; unsigned int tmp___22 ; __u32 tmp___23 ; unsigned int tmp___24 ; __u32 tmp___25 ; unsigned int tmp___26 ; __u32 tmp___27 ; unsigned int tmp___28 ; __u32 tmp___29 ; unsigned int tmp___30 ; __u32 tmp___31 ; unsigned int tmp___32 ; __u32 tmp___33 ; unsigned int tmp___34 ; __u32 tmp___35 ; unsigned int tmp___36 ; __u32 tmp___37 ; unsigned int tmp___38 ; __u32 tmp___39 ; uint16_t *tmp___40 ; unsigned short tmp___41 ; __u16 tmp___42 ; uint32_t *tmp___43 ; uint32_t *tmp___44 ; unsigned int tmp___45 ; __u32 tmp___46 ; uint32_t *tmp___47 ; uint32_t *tmp___48 ; unsigned int tmp___49 ; __u32 tmp___50 ; uint32_t *tmp___51 ; uint32_t *tmp___52 ; unsigned int tmp___53 ; __u32 tmp___54 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; last_chain = (uint32_t *)0U; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; ext_mem_cnt = 0U; risc_address = ext_mem_cnt; flags = 0UL; ha->fw_dump_cap_flags = 0UL; if (hardware_locked == 0) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 53258, "No buffer available for dump.\n"); goto qla81xx_fw_dump_failed; } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 53259, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto qla81xx_fw_dump_failed; } else { } fw = & (ha->fw_dump)->isp.isp81; qla2xxx_prep_dump(ha, ha->fw_dump); tmp___1 = readl((void const volatile *)(& reg->host_status)); tmp___2 = __fswab32(tmp___1); fw->host_status = tmp___2; qla24xx_pause_risc(reg, ha); iter_reg = (uint32_t *)(& fw->host_risc_reg); iter_reg = qla24xx_read_window(reg, 28672U, 16U, iter_reg); qla24xx_read_window(reg, 28688U, 16U, iter_reg); writel(31744U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(1U, (void volatile *)(& reg->iobase_window)); dmp_reg = & reg->iobase_c4; tmp___3 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___4 = readl((void const volatile *)tmp___3); tmp___5 = __fswab32(tmp___4); fw->pcie_regs[0] = tmp___5; tmp___6 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___7 = readl((void const volatile *)tmp___6); tmp___8 = __fswab32(tmp___7); fw->pcie_regs[1] = tmp___8; tmp___9 = readl((void const volatile *)dmp_reg); tmp___10 = __fswab32(tmp___9); fw->pcie_regs[2] = tmp___10; tmp___11 = readl((void const volatile *)(& reg->iobase_window)); tmp___12 = __fswab32(tmp___11); fw->pcie_regs[3] = tmp___12; writel(0U, (void volatile *)(& reg->iobase_window)); readl((void const volatile *)(& reg->iobase_window)); dmp_reg = & reg->flash_addr; cnt = 0U; goto ldv_66140; ldv_66139: tmp___13 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___14 = readl((void const volatile *)tmp___13); tmp___15 = __fswab32(tmp___14); fw->host_reg[cnt] = tmp___15; cnt = cnt + 1U; ldv_66140: ; if (cnt <= 31U) { goto ldv_66139; } else { } writel(0U, (void volatile *)(& reg->ictrl)); readl((void const volatile *)(& reg->ictrl)); writel(3952U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(2952790016U, (void volatile *)(& reg->iobase_select)); tmp___16 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___17 = __fswab32(tmp___16); fw->shadow_reg[0] = tmp___17; writel(2953838592U, (void volatile *)(& reg->iobase_select)); tmp___18 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___19 = __fswab32(tmp___18); fw->shadow_reg[1] = tmp___19; writel(2954887168U, (void volatile *)(& reg->iobase_select)); tmp___20 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___21 = __fswab32(tmp___20); fw->shadow_reg[2] = tmp___21; writel(2955935744U, (void volatile *)(& reg->iobase_select)); tmp___22 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___23 = __fswab32(tmp___22); fw->shadow_reg[3] = tmp___23; writel(2956984320U, (void volatile *)(& reg->iobase_select)); tmp___24 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___25 = __fswab32(tmp___24); fw->shadow_reg[4] = tmp___25; writel(2958032896U, (void volatile *)(& reg->iobase_select)); tmp___26 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___27 = __fswab32(tmp___26); fw->shadow_reg[5] = tmp___27; writel(2959081472U, (void volatile *)(& reg->iobase_select)); tmp___28 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___29 = __fswab32(tmp___28); fw->shadow_reg[6] = tmp___29; writel(2960130048U, (void volatile *)(& reg->iobase_select)); tmp___30 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___31 = __fswab32(tmp___30); fw->shadow_reg[7] = tmp___31; writel(2961178624U, (void volatile *)(& reg->iobase_select)); tmp___32 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___33 = __fswab32(tmp___32); fw->shadow_reg[8] = tmp___33; writel(2962227200U, (void volatile *)(& reg->iobase_select)); tmp___34 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___35 = __fswab32(tmp___34); fw->shadow_reg[9] = tmp___35; writel(2963275776U, (void volatile *)(& reg->iobase_select)); tmp___36 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___37 = __fswab32(tmp___36); fw->shadow_reg[10] = tmp___37; writel(16U, (void volatile *)(& reg->iobase_addr)); tmp___38 = readl((void const volatile *)(& reg->iobase_window)); tmp___39 = __fswab32(tmp___38); fw->risc_io_reg = tmp___39; mbx_reg = & reg->mailbox0; cnt = 0U; goto ldv_66143; ldv_66142: tmp___40 = mbx_reg; mbx_reg = mbx_reg + 1; tmp___41 = readw((void const volatile *)tmp___40); tmp___42 = __fswab16((int )tmp___41); fw->mailbox_reg[cnt] = tmp___42; cnt = cnt + 1U; ldv_66143: ; if (cnt <= 31U) { goto ldv_66142; } else { } iter_reg = (uint32_t *)(& fw->xseq_gp_reg); iter_reg = qla24xx_read_window(reg, 48896U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48928U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48960U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48992U, 16U, iter_reg); qla24xx_read_window(reg, 49008U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xseq_0_reg); iter_reg = qla24xx_read_window(reg, 49088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 49104U, 16U, iter_reg); qla24xx_read_window(reg, 49120U, 16U, iter_reg); qla24xx_read_window(reg, 49136U, 16U, (uint32_t *)(& fw->xseq_1_reg)); iter_reg = (uint32_t *)(& fw->rseq_gp_reg); iter_reg = qla24xx_read_window(reg, 65280U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65296U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65312U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65328U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65344U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65360U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65376U, 16U, iter_reg); qla24xx_read_window(reg, 65392U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rseq_0_reg); iter_reg = qla24xx_read_window(reg, 65472U, 16U, iter_reg); qla24xx_read_window(reg, 65488U, 16U, iter_reg); qla24xx_read_window(reg, 65504U, 16U, (uint32_t *)(& fw->rseq_1_reg)); qla24xx_read_window(reg, 65520U, 16U, (uint32_t *)(& fw->rseq_2_reg)); iter_reg = (uint32_t *)(& fw->aseq_gp_reg); iter_reg = qla24xx_read_window(reg, 45056U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45072U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45104U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45120U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45136U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45152U, 16U, iter_reg); qla24xx_read_window(reg, 45168U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->aseq_0_reg); iter_reg = qla24xx_read_window(reg, 45248U, 16U, iter_reg); qla24xx_read_window(reg, 45264U, 16U, iter_reg); qla24xx_read_window(reg, 45280U, 16U, (uint32_t *)(& fw->aseq_1_reg)); qla24xx_read_window(reg, 45296U, 16U, (uint32_t *)(& fw->aseq_2_reg)); qla24xx_read_window(reg, 28928U, 16U, (uint32_t *)(& fw->cmd_dma_reg)); iter_reg = (uint32_t *)(& fw->req0_dma_reg); iter_reg = qla24xx_read_window(reg, 29184U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_66146; ldv_66145: tmp___43 = iter_reg; iter_reg = iter_reg + 1; tmp___44 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___45 = readl((void const volatile *)tmp___44); tmp___46 = __fswab32(tmp___45); *tmp___43 = tmp___46; cnt = cnt + 1U; ldv_66146: ; if (cnt <= 6U) { goto ldv_66145; } else { } iter_reg = (uint32_t *)(& fw->resp0_dma_reg); iter_reg = qla24xx_read_window(reg, 29440U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_66149; ldv_66148: tmp___47 = iter_reg; iter_reg = iter_reg + 1; tmp___48 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___49 = readl((void const volatile *)tmp___48); tmp___50 = __fswab32(tmp___49); *tmp___47 = tmp___50; cnt = cnt + 1U; ldv_66149: ; if (cnt <= 6U) { goto ldv_66148; } else { } iter_reg = (uint32_t *)(& fw->req1_dma_reg); iter_reg = qla24xx_read_window(reg, 29696U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_66152; ldv_66151: tmp___51 = iter_reg; iter_reg = iter_reg + 1; tmp___52 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___53 = readl((void const volatile *)tmp___52); tmp___54 = __fswab32(tmp___53); *tmp___51 = tmp___54; cnt = cnt + 1U; ldv_66152: ; if (cnt <= 6U) { goto ldv_66151; } else { } iter_reg = (uint32_t *)(& fw->xmt0_dma_reg); iter_reg = qla24xx_read_window(reg, 30208U, 16U, iter_reg); qla24xx_read_window(reg, 30224U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt1_dma_reg); iter_reg = qla24xx_read_window(reg, 30240U, 16U, iter_reg); qla24xx_read_window(reg, 30256U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt2_dma_reg); iter_reg = qla24xx_read_window(reg, 30272U, 16U, iter_reg); qla24xx_read_window(reg, 30288U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt3_dma_reg); iter_reg = qla24xx_read_window(reg, 30304U, 16U, iter_reg); qla24xx_read_window(reg, 30320U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt4_dma_reg); iter_reg = qla24xx_read_window(reg, 30336U, 16U, iter_reg); qla24xx_read_window(reg, 30352U, 16U, iter_reg); qla24xx_read_window(reg, 30368U, 16U, (uint32_t *)(& fw->xmt_data_dma_reg)); iter_reg = (uint32_t *)(& fw->rcvt0_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30464U, 16U, iter_reg); qla24xx_read_window(reg, 30480U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rcvt1_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30496U, 16U, iter_reg); qla24xx_read_window(reg, 30512U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->risc_gp_reg); iter_reg = qla24xx_read_window(reg, 3840U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3856U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3872U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3888U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3904U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3920U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3936U, 16U, iter_reg); qla24xx_read_window(reg, 3952U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->lmc_reg); iter_reg = qla24xx_read_window(reg, 12288U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12304U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12320U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12336U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12352U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12368U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12384U, 16U, iter_reg); qla24xx_read_window(reg, 12400U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fpm_hdw_reg); iter_reg = qla24xx_read_window(reg, 16384U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16400U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16416U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16432U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16448U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16464U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16480U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16496U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16512U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16528U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16544U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16560U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16576U, 16U, iter_reg); qla24xx_read_window(reg, 16592U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fb_hdw_reg); iter_reg = qla24xx_read_window(reg, 24576U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24592U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24608U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24624U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24640U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24832U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24880U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25008U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25024U, 16U, iter_reg); qla24xx_read_window(reg, 28416U, 16U, iter_reg); nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + (unsigned long )ha->chain_offset, & last_chain); rval = qla24xx_soft_reset(ha); if (rval != 0) { goto qla81xx_fw_dump_failed_0; } else { } rval = qla24xx_dump_memory(ha, (uint32_t *)(& fw->code_ram), 32768U, & nxt); if (rval != 0) { goto qla81xx_fw_dump_failed_0; } else { } nxt = qla2xxx_copy_queues(ha, nxt); qla24xx_copy_eft(ha, nxt); nxt_chain = qla25xx_copy_fce(ha, nxt_chain, & last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, & last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, & last_chain); if ((unsigned long )last_chain != (unsigned long )((uint32_t *)0U)) { (ha->fw_dump)->version = (ha->fw_dump)->version | 128U; *last_chain = *last_chain | 128U; } else { } ha->fw_dump_len = (uint32_t )((long )nxt_chain) - (uint32_t )((long )ha->fw_dump); qla81xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla81xx_fw_dump_failed: ; if (hardware_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } return; } } void qla83xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { int rval ; uint32_t cnt ; uint32_t reg_data ; uint32_t risc_address ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; uint32_t *dmp_reg ; uint32_t *iter_reg ; uint16_t *mbx_reg ; unsigned long flags ; struct qla83xx_fw_dump *fw ; uint32_t ext_mem_cnt ; void *nxt ; void *nxt_chain ; uint32_t *last_chain ; struct scsi_qla_host *base_vha ; void *tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; __u32 tmp___2 ; uint32_t *tmp___3 ; unsigned int tmp___4 ; __u32 tmp___5 ; uint32_t *tmp___6 ; unsigned int tmp___7 ; __u32 tmp___8 ; unsigned int tmp___9 ; __u32 tmp___10 ; unsigned int tmp___11 ; __u32 tmp___12 ; uint32_t *tmp___13 ; unsigned int tmp___14 ; __u32 tmp___15 ; unsigned int tmp___16 ; __u32 tmp___17 ; unsigned int tmp___18 ; __u32 tmp___19 ; unsigned int tmp___20 ; __u32 tmp___21 ; unsigned int tmp___22 ; __u32 tmp___23 ; unsigned int tmp___24 ; __u32 tmp___25 ; unsigned int tmp___26 ; __u32 tmp___27 ; unsigned int tmp___28 ; __u32 tmp___29 ; unsigned int tmp___30 ; __u32 tmp___31 ; unsigned int tmp___32 ; __u32 tmp___33 ; unsigned int tmp___34 ; __u32 tmp___35 ; unsigned int tmp___36 ; __u32 tmp___37 ; unsigned int tmp___38 ; __u32 tmp___39 ; uint16_t *tmp___40 ; unsigned short tmp___41 ; __u16 tmp___42 ; uint32_t *tmp___43 ; uint32_t *tmp___44 ; unsigned int tmp___45 ; __u32 tmp___46 ; uint32_t *tmp___47 ; uint32_t *tmp___48 ; unsigned int tmp___49 ; __u32 tmp___50 ; uint32_t *tmp___51 ; uint32_t *tmp___52 ; unsigned int tmp___53 ; __u32 tmp___54 ; unsigned short tmp___55 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; last_chain = (uint32_t *)0U; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; ext_mem_cnt = 0U; risc_address = ext_mem_cnt; flags = 0UL; ha->fw_dump_cap_flags = 0UL; if (hardware_locked == 0) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 53260, "No buffer available for dump!!!\n"); goto qla83xx_fw_dump_failed; } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 53261, "Firmware has been previously dumped (%p) -- ignoring request...\n", ha->fw_dump); goto qla83xx_fw_dump_failed; } else { } fw = & (ha->fw_dump)->isp.isp83; qla2xxx_prep_dump(ha, ha->fw_dump); tmp___1 = readl((void const volatile *)(& reg->host_status)); tmp___2 = __fswab32(tmp___1); fw->host_status = tmp___2; qla24xx_pause_risc(reg, ha); writel(24576U, (void volatile *)(& reg->iobase_addr)); dmp_reg = & reg->iobase_window; reg_data = readl((void const volatile *)dmp_reg); writel(0U, (void volatile *)dmp_reg); dmp_reg = (uint32_t *)(& reg->unused_4_1); reg_data = readl((void const volatile *)dmp_reg); writel(0U, (void volatile *)dmp_reg); writel(24592U, (void volatile *)(& reg->iobase_addr)); dmp_reg = (uint32_t *)(& reg->unused_4_1) + 2UL; reg_data = readl((void const volatile *)dmp_reg); writel(0U, (void volatile *)dmp_reg); writel(3952U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(1610612736U, (void volatile *)(& reg->iobase_select)); iter_reg = (uint32_t *)(& fw->host_risc_reg); iter_reg = qla24xx_read_window(reg, 28672U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28688U, 16U, iter_reg); qla24xx_read_window(reg, 28736U, 16U, iter_reg); writel(31744U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(1U, (void volatile *)(& reg->iobase_window)); dmp_reg = & reg->iobase_c4; tmp___3 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___4 = readl((void const volatile *)tmp___3); tmp___5 = __fswab32(tmp___4); fw->pcie_regs[0] = tmp___5; tmp___6 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___7 = readl((void const volatile *)tmp___6); tmp___8 = __fswab32(tmp___7); fw->pcie_regs[1] = tmp___8; tmp___9 = readl((void const volatile *)dmp_reg); tmp___10 = __fswab32(tmp___9); fw->pcie_regs[2] = tmp___10; tmp___11 = readl((void const volatile *)(& reg->iobase_window)); tmp___12 = __fswab32(tmp___11); fw->pcie_regs[3] = tmp___12; writel(0U, (void volatile *)(& reg->iobase_window)); readl((void const volatile *)(& reg->iobase_window)); dmp_reg = & reg->flash_addr; cnt = 0U; goto ldv_66180; ldv_66179: tmp___13 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___14 = readl((void const volatile *)tmp___13); tmp___15 = __fswab32(tmp___14); fw->host_reg[cnt] = tmp___15; cnt = cnt + 1U; ldv_66180: ; if (cnt <= 31U) { goto ldv_66179; } else { } writel(0U, (void volatile *)(& reg->ictrl)); readl((void const volatile *)(& reg->ictrl)); writel(3952U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(2952790016U, (void volatile *)(& reg->iobase_select)); tmp___16 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___17 = __fswab32(tmp___16); fw->shadow_reg[0] = tmp___17; writel(2953838592U, (void volatile *)(& reg->iobase_select)); tmp___18 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___19 = __fswab32(tmp___18); fw->shadow_reg[1] = tmp___19; writel(2954887168U, (void volatile *)(& reg->iobase_select)); tmp___20 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___21 = __fswab32(tmp___20); fw->shadow_reg[2] = tmp___21; writel(2955935744U, (void volatile *)(& reg->iobase_select)); tmp___22 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___23 = __fswab32(tmp___22); fw->shadow_reg[3] = tmp___23; writel(2956984320U, (void volatile *)(& reg->iobase_select)); tmp___24 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___25 = __fswab32(tmp___24); fw->shadow_reg[4] = tmp___25; writel(2958032896U, (void volatile *)(& reg->iobase_select)); tmp___26 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___27 = __fswab32(tmp___26); fw->shadow_reg[5] = tmp___27; writel(2959081472U, (void volatile *)(& reg->iobase_select)); tmp___28 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___29 = __fswab32(tmp___28); fw->shadow_reg[6] = tmp___29; writel(2960130048U, (void volatile *)(& reg->iobase_select)); tmp___30 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___31 = __fswab32(tmp___30); fw->shadow_reg[7] = tmp___31; writel(2961178624U, (void volatile *)(& reg->iobase_select)); tmp___32 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___33 = __fswab32(tmp___32); fw->shadow_reg[8] = tmp___33; writel(2962227200U, (void volatile *)(& reg->iobase_select)); tmp___34 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___35 = __fswab32(tmp___34); fw->shadow_reg[9] = tmp___35; writel(2963275776U, (void volatile *)(& reg->iobase_select)); tmp___36 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___37 = __fswab32(tmp___36); fw->shadow_reg[10] = tmp___37; writel(16U, (void volatile *)(& reg->iobase_addr)); tmp___38 = readl((void const volatile *)(& reg->iobase_window)); tmp___39 = __fswab32(tmp___38); fw->risc_io_reg = tmp___39; mbx_reg = & reg->mailbox0; cnt = 0U; goto ldv_66183; ldv_66182: tmp___40 = mbx_reg; mbx_reg = mbx_reg + 1; tmp___41 = readw((void const volatile *)tmp___40); tmp___42 = __fswab16((int )tmp___41); fw->mailbox_reg[cnt] = tmp___42; cnt = cnt + 1U; ldv_66183: ; if (cnt <= 31U) { goto ldv_66182; } else { } iter_reg = (uint32_t *)(& fw->xseq_gp_reg); iter_reg = qla24xx_read_window(reg, 48640U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48656U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48672U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48688U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48704U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48720U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48736U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48752U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48896U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48928U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48960U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48992U, 16U, iter_reg); qla24xx_read_window(reg, 49008U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xseq_0_reg); iter_reg = qla24xx_read_window(reg, 49088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 49104U, 16U, iter_reg); qla24xx_read_window(reg, 49120U, 16U, iter_reg); qla24xx_read_window(reg, 49136U, 16U, (uint32_t *)(& fw->xseq_1_reg)); qla24xx_read_window(reg, 48880U, 16U, (uint32_t *)(& fw->xseq_2_reg)); iter_reg = (uint32_t *)(& fw->rseq_gp_reg); iter_reg = qla24xx_read_window(reg, 65024U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65040U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65056U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65072U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65104U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65120U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65136U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65280U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65296U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65312U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65328U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65344U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65360U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65376U, 16U, iter_reg); qla24xx_read_window(reg, 65392U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rseq_0_reg); iter_reg = qla24xx_read_window(reg, 65472U, 16U, iter_reg); qla24xx_read_window(reg, 65488U, 16U, iter_reg); qla24xx_read_window(reg, 65504U, 16U, (uint32_t *)(& fw->rseq_1_reg)); qla24xx_read_window(reg, 65520U, 16U, (uint32_t *)(& fw->rseq_2_reg)); qla24xx_read_window(reg, 65264U, 16U, (uint32_t *)(& fw->rseq_3_reg)); iter_reg = (uint32_t *)(& fw->aseq_gp_reg); iter_reg = qla24xx_read_window(reg, 45056U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45072U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45104U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45120U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45136U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45152U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45168U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45312U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45328U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45344U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45360U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45376U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45392U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45408U, 16U, iter_reg); qla24xx_read_window(reg, 45424U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->aseq_0_reg); iter_reg = qla24xx_read_window(reg, 45248U, 16U, iter_reg); qla24xx_read_window(reg, 45264U, 16U, iter_reg); qla24xx_read_window(reg, 45280U, 16U, (uint32_t *)(& fw->aseq_1_reg)); qla24xx_read_window(reg, 45296U, 16U, (uint32_t *)(& fw->aseq_2_reg)); qla24xx_read_window(reg, 45552U, 16U, (uint32_t *)(& fw->aseq_3_reg)); iter_reg = (uint32_t *)(& fw->cmd_dma_reg); iter_reg = qla24xx_read_window(reg, 28928U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28960U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28976U, 16U, iter_reg); qla24xx_read_window(reg, 29168U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->req0_dma_reg); iter_reg = qla24xx_read_window(reg, 29184U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_66186; ldv_66185: tmp___43 = iter_reg; iter_reg = iter_reg + 1; tmp___44 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___45 = readl((void const volatile *)tmp___44); tmp___46 = __fswab32(tmp___45); *tmp___43 = tmp___46; cnt = cnt + 1U; ldv_66186: ; if (cnt <= 6U) { goto ldv_66185; } else { } iter_reg = (uint32_t *)(& fw->resp0_dma_reg); iter_reg = qla24xx_read_window(reg, 29440U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_66189; ldv_66188: tmp___47 = iter_reg; iter_reg = iter_reg + 1; tmp___48 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___49 = readl((void const volatile *)tmp___48); tmp___50 = __fswab32(tmp___49); *tmp___47 = tmp___50; cnt = cnt + 1U; ldv_66189: ; if (cnt <= 6U) { goto ldv_66188; } else { } iter_reg = (uint32_t *)(& fw->req1_dma_reg); iter_reg = qla24xx_read_window(reg, 29696U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_66192; ldv_66191: tmp___51 = iter_reg; iter_reg = iter_reg + 1; tmp___52 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___53 = readl((void const volatile *)tmp___52); tmp___54 = __fswab32(tmp___53); *tmp___51 = tmp___54; cnt = cnt + 1U; ldv_66192: ; if (cnt <= 6U) { goto ldv_66191; } else { } iter_reg = (uint32_t *)(& fw->xmt0_dma_reg); iter_reg = qla24xx_read_window(reg, 30208U, 16U, iter_reg); qla24xx_read_window(reg, 30224U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt1_dma_reg); iter_reg = qla24xx_read_window(reg, 30240U, 16U, iter_reg); qla24xx_read_window(reg, 30256U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt2_dma_reg); iter_reg = qla24xx_read_window(reg, 30272U, 16U, iter_reg); qla24xx_read_window(reg, 30288U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt3_dma_reg); iter_reg = qla24xx_read_window(reg, 30304U, 16U, iter_reg); qla24xx_read_window(reg, 30320U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt4_dma_reg); iter_reg = qla24xx_read_window(reg, 30336U, 16U, iter_reg); qla24xx_read_window(reg, 30352U, 16U, iter_reg); qla24xx_read_window(reg, 30368U, 16U, (uint32_t *)(& fw->xmt_data_dma_reg)); iter_reg = (uint32_t *)(& fw->rcvt0_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30464U, 16U, iter_reg); qla24xx_read_window(reg, 30480U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rcvt1_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30496U, 16U, iter_reg); qla24xx_read_window(reg, 30512U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->risc_gp_reg); iter_reg = qla24xx_read_window(reg, 3840U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3856U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3872U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3888U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3904U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3920U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3936U, 16U, iter_reg); qla24xx_read_window(reg, 3952U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->lmc_reg); iter_reg = qla24xx_read_window(reg, 12288U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12304U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12320U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12336U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12352U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12368U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12384U, 16U, iter_reg); qla24xx_read_window(reg, 12400U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fpm_hdw_reg); iter_reg = qla24xx_read_window(reg, 16384U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16400U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16416U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16432U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16448U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16464U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16480U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16496U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16512U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16528U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16544U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16560U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16576U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16592U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16608U, 16U, iter_reg); qla24xx_read_window(reg, 16624U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rq0_array_reg); iter_reg = qla24xx_read_window(reg, 23552U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23568U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23584U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23600U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23616U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23632U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23648U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23664U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23680U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23696U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23712U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23728U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23744U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23760U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23776U, 16U, iter_reg); qla24xx_read_window(reg, 23792U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rq1_array_reg); iter_reg = qla24xx_read_window(reg, 23808U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23824U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23840U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23856U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23872U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23888U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23904U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23920U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23936U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23952U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23968U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23984U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24000U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24016U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24032U, 16U, iter_reg); qla24xx_read_window(reg, 24048U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rp0_array_reg); iter_reg = qla24xx_read_window(reg, 24064U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24080U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24096U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24112U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24128U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24144U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24160U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24176U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24192U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24208U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24224U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24240U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24256U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24272U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24288U, 16U, iter_reg); qla24xx_read_window(reg, 24304U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rp1_array_reg); iter_reg = qla24xx_read_window(reg, 24320U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24336U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24352U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24368U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24384U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24400U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24416U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24432U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24448U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24464U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24480U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24496U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24512U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24528U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24544U, 16U, iter_reg); qla24xx_read_window(reg, 24560U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->at0_array_reg); iter_reg = qla24xx_read_window(reg, 28800U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28816U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28832U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28848U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28864U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28880U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28896U, 16U, iter_reg); qla24xx_read_window(reg, 28912U, 16U, iter_reg); qla24xx_read_window(reg, 30720U, 16U, (uint32_t *)(& fw->queue_control_reg)); iter_reg = (uint32_t *)(& fw->fb_hdw_reg); iter_reg = qla24xx_read_window(reg, 24576U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24592U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24608U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24624U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24640U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24672U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24688U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24832U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24880U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25008U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25024U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25904U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25920U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25936U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25952U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25968U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25984U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 26000U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 26016U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 26032U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 26048U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 26064U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 26080U, 16U, iter_reg); qla24xx_read_window(reg, 28416U, 16U, iter_reg); nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + (unsigned long )ha->chain_offset, & last_chain); rval = qla24xx_soft_reset(ha); if (rval != 0) { ql_log(1U, vha, 53262, "SOFT RESET FAILED, forcing continuation of dump!!!\n"); rval = 0; ql_log(1U, vha, 53263, "try a bigger hammer!!!\n"); writel(268435456U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); writel(1073741824U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); writel(536870912U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); cnt = 30000U; goto ldv_66195; ldv_66194: __const_udelay(21475UL); cnt = cnt - 1U; ldv_66195: ; if (cnt != 0U) { tmp___55 = readw((void const volatile *)(& reg->mailbox0)); if ((unsigned int )tmp___55 != 0U) { goto ldv_66194; } else { goto ldv_66196; } } else { } ldv_66196: ; if (cnt == 0U) { nxt = (void *)(& fw->code_ram); nxt = nxt + 36864UL; nxt = nxt + (unsigned long )(ha->fw_memory_size - 1048575U); goto copy_queue; } else { set_bit(3L, (unsigned long volatile *)(& ha->fw_dump_cap_flags)); ql_log(1U, vha, 53264, "bigger hammer success?\n"); } } else { } rval = qla24xx_dump_memory(ha, (uint32_t *)(& fw->code_ram), 36864U, & nxt); if (rval != 0) { goto qla83xx_fw_dump_failed_0; } else { } copy_queue: nxt = qla2xxx_copy_queues(ha, nxt); qla24xx_copy_eft(ha, nxt); nxt_chain = qla25xx_copy_fce(ha, nxt_chain, & last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, & last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, & last_chain); if ((unsigned long )last_chain != (unsigned long )((uint32_t *)0U)) { (ha->fw_dump)->version = (ha->fw_dump)->version | 128U; *last_chain = *last_chain | 128U; } else { } ha->fw_dump_len = (uint32_t )((long )nxt_chain) - (uint32_t )((long )ha->fw_dump); qla83xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla83xx_fw_dump_failed: ; if (hardware_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } return; } } __inline static int ql_mask_match(uint32_t level ) { { if (ql2xextended_error_logging == 1) { ql2xextended_error_logging = 507510784; } else { } return ((level & (uint32_t )ql2xextended_error_logging) == level); } } void ql_dbg(uint32_t level , scsi_qla_host_t *vha , int32_t id , char const *fmt , ...) { va_list va ; struct va_format vaf ; int tmp ; struct pci_dev const *pdev ; char const *tmp___0 ; { tmp = ql_mask_match(level); if (tmp == 0) { return; } else { } ldv__builtin_va_start((va_list *)(& va)); vaf.fmt = fmt; vaf.va = & va; if ((unsigned long )vha != (unsigned long )((scsi_qla_host_t *)0)) { pdev = (struct pci_dev const *)(vha->hw)->pdev; tmp___0 = dev_name(& pdev->dev); printk("\f%s [%s]-%04x:%ld: %pV", (char *)"qla2xxx", tmp___0, (uint32_t )id + ql_dbg_offset, vha->host_no, & vaf); } else { printk("\f%s [%s]-%04x: : %pV", (char *)"qla2xxx", (char *)"0000:00:00.0", (uint32_t )id + ql_dbg_offset, & vaf); } ldv__builtin_va_end((va_list *)(& va)); return; } } void ql_dbg_pci(uint32_t level , struct pci_dev *pdev , int32_t id , char const *fmt , ...) { va_list va ; struct va_format vaf ; int tmp ; char const *tmp___0 ; { if ((unsigned long )pdev == (unsigned long )((struct pci_dev *)0)) { return; } else { } tmp = ql_mask_match(level); if (tmp == 0) { return; } else { } ldv__builtin_va_start((va_list *)(& va)); vaf.fmt = fmt; vaf.va = & va; tmp___0 = dev_name((struct device const *)(& pdev->dev)); printk("\f%s [%s]-%04x: : %pV", (char *)"qla2xxx", tmp___0, (uint32_t )id + ql_dbg_offset, & vaf); ldv__builtin_va_end((va_list *)(& va)); return; } } void ql_log(uint32_t level , scsi_qla_host_t *vha , int32_t id , char const *fmt , ...) { va_list va ; struct va_format vaf ; char pbuf[128U] ; struct pci_dev const *pdev ; char const *tmp ; { if ((uint32_t )ql_errlev < level) { return; } else { } if ((unsigned long )vha != (unsigned long )((scsi_qla_host_t *)0)) { pdev = (struct pci_dev const *)(vha->hw)->pdev; tmp = dev_name(& pdev->dev); snprintf((char *)(& pbuf), 128UL, "%s [%s]-%04x:%ld: ", (char *)"qla2xxx", tmp, id, vha->host_no); } else { snprintf((char *)(& pbuf), 128UL, "%s [%s]-%04x: : ", (char *)"qla2xxx", (char *)"0000:00:00.0", id); } pbuf[127UL] = 0; ldv__builtin_va_start((va_list *)(& va)); vaf.fmt = fmt; vaf.va = & va; switch (level) { case 0U: printk("\n%s%pV", (char *)(& pbuf), & vaf); goto ldv_66230; case 1U: printk("\v%s%pV", (char *)(& pbuf), & vaf); goto ldv_66230; case 2U: printk("\f%s%pV", (char *)(& pbuf), & vaf); goto ldv_66230; default: printk("\016%s%pV", (char *)(& pbuf), & vaf); goto ldv_66230; } ldv_66230: ldv__builtin_va_end((va_list *)(& va)); return; } } void ql_log_pci(uint32_t level , struct pci_dev *pdev , int32_t id , char const *fmt , ...) { va_list va ; struct va_format vaf ; char pbuf[128U] ; char const *tmp ; { if ((unsigned long )pdev == (unsigned long )((struct pci_dev *)0)) { return; } else { } if ((uint32_t )ql_errlev < level) { return; } else { } tmp = dev_name((struct device const *)(& pdev->dev)); snprintf((char *)(& pbuf), 128UL, "%s [%s]-%04x: : ", (char *)"qla2xxx", tmp, id); pbuf[127UL] = 0; ldv__builtin_va_start((va_list *)(& va)); vaf.fmt = fmt; vaf.va = & va; switch (level) { case 0U: printk("\n%s%pV", (char *)(& pbuf), & vaf); goto ldv_66244; case 1U: printk("\v%s%pV", (char *)(& pbuf), & vaf); goto ldv_66244; case 2U: printk("\f%s%pV", (char *)(& pbuf), & vaf); goto ldv_66244; default: printk("\016%s%pV", (char *)(& pbuf), & vaf); goto ldv_66244; } ldv_66244: ldv__builtin_va_end((va_list *)(& va)); return; } } void ql_dump_regs(uint32_t level , scsi_qla_host_t *vha , int32_t id ) { int i ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; struct device_reg_24xx *reg24 ; struct device_reg_82xx *reg82 ; uint16_t *mbx_reg ; int tmp ; uint16_t *tmp___0 ; unsigned short tmp___1 ; { ha = vha->hw; reg = & (ha->iobase)->isp; reg24 = & (ha->iobase)->isp24; reg82 = & (ha->iobase)->isp82; tmp = ql_mask_match(level); if (tmp == 0) { return; } else { } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { mbx_reg = (uint16_t *)(& reg82->mailbox_in); } else if ((ha->device_type & 134217728U) != 0U) { mbx_reg = & reg24->mailbox0; } else { mbx_reg = (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0; } ql_dbg(level, vha, id, "Mailbox registers:\n"); i = 0; goto ldv_66260; ldv_66259: tmp___0 = mbx_reg; mbx_reg = mbx_reg + 1; tmp___1 = readw((void const volatile *)tmp___0); ql_dbg(level, vha, id, "mbox[%d] 0x%04x\n", i, (int )tmp___1); i = i + 1; ldv_66260: ; if (i <= 5) { goto ldv_66259; } else { } return; } } void ql_dump_buffer(uint32_t level , scsi_qla_host_t *vha , int32_t id , uint8_t *b , uint32_t size ) { uint32_t cnt ; uint8_t c ; int tmp ; uint8_t *tmp___0 ; { tmp = ql_mask_match(level); if (tmp == 0) { return; } else { } ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh Fh\n"); ql_dbg(level, vha, id, "--------------------------------------------------------------\n"); ql_dbg(level, vha, id, " "); cnt = 0U; goto ldv_66272; ldv_66271: tmp___0 = b; b = b + 1; c = *tmp___0; printk("%02x", (unsigned int )c); cnt = cnt + 1U; if ((cnt & 15U) == 0U) { printk("\n"); } else { printk(" "); } ldv_66272: ; if (cnt < size) { goto ldv_66271; } else { } if ((cnt & 15U) != 0U) { ql_dbg(level, vha, id, "\n"); } else { } return; } } int reg_timer_20(struct timer_list *timer ) { { ldv_timer_list_20 = timer; ldv_timer_state_20 = 1; return (0); } } void disable_suitable_timer_20(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_20) { ldv_timer_state_20 = 0; return; } else { } return; } } void activate_pending_timer_20(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_20 == (unsigned long )timer) { if (ldv_timer_state_20 == 2 || pending_flag != 0) { ldv_timer_list_20 = timer; ldv_timer_list_20->data = data; ldv_timer_state_20 = 1; } else { } return; } else { } reg_timer_20(timer); ldv_timer_list_20->data = data; return; } } void choose_timer_20(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_20 = 2; return; } } bool ldv_queue_work_on_145(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_146(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_147(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_148(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_149(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_150(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } extern void ___might_sleep(char const * , int , int ) ; extern int scnprintf(char * , size_t , char const * , ...) ; extern int strncmp(char const * , char const * , __kernel_size_t ) ; bool ldv_queue_work_on_161(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_163(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_162(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_165(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_164(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void outw(unsigned short value , int port ) { { __asm__ volatile ("outw %w0, %w1": : "a" (value), "Nd" (port)); return; } } __inline static unsigned short inw(int port ) { unsigned short value ; { __asm__ volatile ("inw %w1, %w0": "=a" (value): "Nd" (port)); return (value); } } extern int _cond_resched(void) ; void choose_timer_21(struct timer_list *timer ) ; int ldv_scsi_add_host_with_dma_166(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; extern void scsi_unblock_requests(struct Scsi_Host * ) ; extern void scsi_block_requests(struct Scsi_Host * ) ; extern struct fc_vport *fc_vport_create(struct Scsi_Host * , int , struct fc_vport_identifiers * ) ; int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha , struct qla_fcp_prio_cfg *pri_cfg , uint8_t flag ) ; static void qla2x00_lock_nvram_access(struct qla_hw_data *ha ) { uint16_t data ; struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; if (((ha->device_type & 1U) == 0U && (ha->device_type & 2U) == 0U) && (ha->device_type & 4U) == 0U) { data = readw((void const volatile *)(& reg->nvram)); goto ldv_65910; ldv_65909: __const_udelay(429500UL); data = readw((void const volatile *)(& reg->nvram)); ldv_65910: ; if ((int )((short )data) < 0) { goto ldv_65909; } else { } writew(1, (void volatile *)(& reg->u.isp2300.host_semaphore)); readw((void const volatile *)(& reg->u.isp2300.host_semaphore)); __const_udelay(21475UL); data = readw((void const volatile *)(& reg->u.isp2300.host_semaphore)); goto ldv_65913; ldv_65912: __const_udelay(429500UL); writew(1, (void volatile *)(& reg->u.isp2300.host_semaphore)); readw((void const volatile *)(& reg->u.isp2300.host_semaphore)); __const_udelay(21475UL); data = readw((void const volatile *)(& reg->u.isp2300.host_semaphore)); ldv_65913: ; if (((int )data & 1) == 0) { goto ldv_65912; } else { } } else { } return; } } static void qla2x00_unlock_nvram_access(struct qla_hw_data *ha ) { struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; if (((ha->device_type & 1U) == 0U && (ha->device_type & 2U) == 0U) && (ha->device_type & 4U) == 0U) { writew(0, (void volatile *)(& reg->u.isp2300.host_semaphore)); readw((void const volatile *)(& reg->u.isp2300.host_semaphore)); } else { } return; } } static void qla2x00_nv_write(struct qla_hw_data *ha , uint16_t data ) { struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; writew((int )((unsigned int )data | 16386U), (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); writew((int )((unsigned int )data | 16387U), (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); writew((int )((unsigned int )data | 16386U), (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); return; } } static uint16_t qla2x00_nvram_request(struct qla_hw_data *ha , uint32_t nv_cmd ) { uint8_t cnt ; struct device_reg_2xxx *reg ; uint16_t data ; uint16_t reg_data ; { reg = & (ha->iobase)->isp; data = 0U; nv_cmd = nv_cmd << 5; cnt = 0U; goto ldv_65933; ldv_65932: ; if ((int )nv_cmd < 0) { qla2x00_nv_write(ha, 4); } else { qla2x00_nv_write(ha, 0); } nv_cmd = nv_cmd << 1; cnt = (uint8_t )((int )cnt + 1); ldv_65933: ; if ((unsigned int )cnt <= 10U) { goto ldv_65932; } else { } cnt = 0U; goto ldv_65936; ldv_65935: writew(3, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); data = (int )data << 1U; reg_data = readw((void const volatile *)(& reg->nvram)); if (((int )reg_data & 8) != 0) { data = (uint16_t )((unsigned int )data | 1U); } else { } writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); cnt = (uint8_t )((int )cnt + 1); ldv_65936: ; if ((unsigned int )cnt <= 15U) { goto ldv_65935; } else { } writew(0, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); return (data); } } static uint16_t qla2x00_get_nvram_word(struct qla_hw_data *ha , uint32_t addr ) { uint16_t data ; uint32_t nv_cmd ; { nv_cmd = addr << 16; nv_cmd = nv_cmd | 100663296U; data = qla2x00_nvram_request(ha, nv_cmd); return (data); } } static void qla2x00_nv_deselect(struct qla_hw_data *ha ) { struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; writew(0, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); return; } } static void qla2x00_write_nvram_word(struct qla_hw_data *ha , uint32_t addr , uint16_t data ) { int count ; uint16_t word ; uint32_t nv_cmd ; uint32_t wait_cnt ; struct device_reg_2xxx *reg ; scsi_qla_host_t *vha ; void *tmp ; { reg = & (ha->iobase)->isp; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; qla2x00_nv_write(ha, 4); qla2x00_nv_write(ha, 0); qla2x00_nv_write(ha, 0); word = 0U; goto ldv_65960; ldv_65959: qla2x00_nv_write(ha, 4); word = (uint16_t )((int )word + 1); ldv_65960: ; if ((unsigned int )word <= 7U) { goto ldv_65959; } else { } qla2x00_nv_deselect(ha); nv_cmd = (addr << 16) | 83886080U; nv_cmd = (uint32_t )data | nv_cmd; nv_cmd = nv_cmd << 5; count = 0; goto ldv_65963; ldv_65962: ; if ((int )nv_cmd < 0) { qla2x00_nv_write(ha, 4); } else { qla2x00_nv_write(ha, 0); } nv_cmd = nv_cmd << 1; count = count + 1; ldv_65963: ; if (count <= 26) { goto ldv_65962; } else { } qla2x00_nv_deselect(ha); writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); wait_cnt = 20000U; ldv_65966: wait_cnt = wait_cnt - 1U; if (wait_cnt == 0U) { ql_dbg(8388608U, vha, 28813, "NVRAM didn\'t go ready...\n"); goto ldv_65965; } else { } __const_udelay(42950UL); word = readw((void const volatile *)(& reg->nvram)); if (((int )word & 8) == 0) { goto ldv_65966; } else { } ldv_65965: qla2x00_nv_deselect(ha); qla2x00_nv_write(ha, 4); count = 0; goto ldv_65968; ldv_65967: qla2x00_nv_write(ha, 0); count = count + 1; ldv_65968: ; if (count <= 9) { goto ldv_65967; } else { } qla2x00_nv_deselect(ha); return; } } static int qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha , uint32_t addr , uint16_t data , uint32_t tmo ) { int ret ; int count ; uint16_t word ; uint32_t nv_cmd ; struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; ret = 0; qla2x00_nv_write(ha, 4); qla2x00_nv_write(ha, 0); qla2x00_nv_write(ha, 0); word = 0U; goto ldv_65982; ldv_65981: qla2x00_nv_write(ha, 4); word = (uint16_t )((int )word + 1); ldv_65982: ; if ((unsigned int )word <= 7U) { goto ldv_65981; } else { } qla2x00_nv_deselect(ha); nv_cmd = (addr << 16) | 83886080U; nv_cmd = (uint32_t )data | nv_cmd; nv_cmd = nv_cmd << 5; count = 0; goto ldv_65985; ldv_65984: ; if ((int )nv_cmd < 0) { qla2x00_nv_write(ha, 4); } else { qla2x00_nv_write(ha, 0); } nv_cmd = nv_cmd << 1; count = count + 1; ldv_65985: ; if (count <= 26) { goto ldv_65984; } else { } qla2x00_nv_deselect(ha); writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); ldv_65988: __const_udelay(42950UL); word = readw((void const volatile *)(& reg->nvram)); tmo = tmo - 1U; if (tmo == 0U) { ret = 258; goto ldv_65987; } else { } if (((int )word & 8) == 0) { goto ldv_65988; } else { } ldv_65987: qla2x00_nv_deselect(ha); qla2x00_nv_write(ha, 4); count = 0; goto ldv_65990; ldv_65989: qla2x00_nv_write(ha, 0); count = count + 1; ldv_65990: ; if (count <= 9) { goto ldv_65989; } else { } qla2x00_nv_deselect(ha); return (ret); } } static int qla2x00_clear_nvram_protection(struct qla_hw_data *ha ) { int ret ; int stat ; struct device_reg_2xxx *reg ; uint32_t word ; uint32_t wait_cnt ; uint16_t wprot ; uint16_t wprot_old ; scsi_qla_host_t *vha ; void *tmp ; unsigned short tmp___0 ; { reg = & (ha->iobase)->isp; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ret = 258; wprot_old = qla2x00_get_nvram_word(ha, (uint32_t )ha->nvram_base); stat = qla2x00_write_nvram_word_tmo(ha, (uint32_t )ha->nvram_base, 4660, 100000U); wprot = qla2x00_get_nvram_word(ha, (uint32_t )ha->nvram_base); if (stat != 0 || (unsigned int )wprot != 4660U) { qla2x00_nv_write(ha, 4); qla2x00_nv_write(ha, 0); qla2x00_nv_write(ha, 0); word = 0U; goto ldv_66004; ldv_66003: qla2x00_nv_write(ha, 4); word = word + 1U; ldv_66004: ; if (word <= 7U) { goto ldv_66003; } else { } qla2x00_nv_deselect(ha); qla2x00_nv_write(ha, 8196); qla2x00_nv_write(ha, 8192); qla2x00_nv_write(ha, 8192); word = 0U; goto ldv_66007; ldv_66006: qla2x00_nv_write(ha, 8196); word = word + 1U; ldv_66007: ; if (word <= 7U) { goto ldv_66006; } else { } qla2x00_nv_deselect(ha); qla2x00_nv_write(ha, 8196); qla2x00_nv_write(ha, 8196); qla2x00_nv_write(ha, 8196); word = 0U; goto ldv_66010; ldv_66009: qla2x00_nv_write(ha, 8196); word = word + 1U; ldv_66010: ; if (word <= 7U) { goto ldv_66009; } else { } qla2x00_nv_deselect(ha); writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); wait_cnt = 20000U; ldv_66013: wait_cnt = wait_cnt - 1U; if (wait_cnt == 0U) { ql_dbg(8388608U, vha, 28814, "NVRAM didn\'t go ready...\n"); goto ldv_66012; } else { } __const_udelay(42950UL); tmp___0 = readw((void const volatile *)(& reg->nvram)); word = (uint32_t )tmp___0; if ((word & 8U) == 0U) { goto ldv_66013; } else { } ldv_66012: ; if (wait_cnt != 0U) { ret = 0; } else { } } else { qla2x00_write_nvram_word(ha, (uint32_t )ha->nvram_base, (int )wprot_old); } return (ret); } } static void qla2x00_set_nvram_protection(struct qla_hw_data *ha , int stat ) { struct device_reg_2xxx *reg ; uint32_t word ; uint32_t wait_cnt ; scsi_qla_host_t *vha ; void *tmp ; unsigned short tmp___0 ; { reg = & (ha->iobase)->isp; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; if (stat != 0) { return; } else { } qla2x00_nv_write(ha, 4); qla2x00_nv_write(ha, 0); qla2x00_nv_write(ha, 0); word = 0U; goto ldv_66023; ldv_66022: qla2x00_nv_write(ha, 4); word = word + 1U; ldv_66023: ; if (word <= 7U) { goto ldv_66022; } else { } qla2x00_nv_deselect(ha); qla2x00_nv_write(ha, 8196); qla2x00_nv_write(ha, 8192); qla2x00_nv_write(ha, 8192); word = 0U; goto ldv_66026; ldv_66025: qla2x00_nv_write(ha, 8196); word = word + 1U; ldv_66026: ; if (word <= 7U) { goto ldv_66025; } else { } qla2x00_nv_deselect(ha); qla2x00_nv_write(ha, 8196); qla2x00_nv_write(ha, 8192); qla2x00_nv_write(ha, 8196); word = 0U; goto ldv_66029; ldv_66028: qla2x00_nv_write(ha, 8192); word = word + 1U; ldv_66029: ; if (word <= 7U) { goto ldv_66028; } else { } qla2x00_nv_deselect(ha); writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); wait_cnt = 20000U; ldv_66032: wait_cnt = wait_cnt - 1U; if (wait_cnt == 0U) { ql_dbg(8388608U, vha, 28815, "NVRAM didn\'t go ready...\n"); goto ldv_66031; } else { } __const_udelay(42950UL); tmp___0 = readw((void const volatile *)(& reg->nvram)); word = (uint32_t )tmp___0; if ((word & 8U) == 0U) { goto ldv_66032; } else { } ldv_66031: ; return; } } __inline static uint32_t flash_conf_addr(struct qla_hw_data *ha , uint32_t faddr ) { { return (ha->flash_conf_off | faddr); } } __inline static uint32_t flash_data_addr(struct qla_hw_data *ha , uint32_t faddr ) { { return (ha->flash_data_off | faddr); } } __inline static uint32_t nvram_conf_addr(struct qla_hw_data *ha , uint32_t naddr ) { { return (ha->nvram_conf_off | naddr); } } __inline static uint32_t nvram_data_addr(struct qla_hw_data *ha , uint32_t naddr ) { { return (ha->nvram_data_off | naddr); } } static uint32_t qla24xx_read_flash_dword(struct qla_hw_data *ha , uint32_t addr ) { int rval ; uint32_t cnt ; uint32_t data ; struct device_reg_24xx *reg ; unsigned int tmp ; { reg = & (ha->iobase)->isp24; writel(addr & 2147483647U, (void volatile *)(& reg->flash_addr)); rval = 0; cnt = 3000U; goto ldv_66059; ldv_66058: ; if (cnt != 0U) { __const_udelay(42950UL); } else { rval = 256; } ___might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_sup.c", 470, 0); _cond_resched(); cnt = cnt - 1U; ldv_66059: tmp = readl((void const volatile *)(& reg->flash_addr)); if ((int )tmp >= 0 && rval == 0) { goto ldv_66058; } else { } data = 3735936685U; if (rval == 0) { data = readl((void const volatile *)(& reg->flash_data)); } else { } return (data); } } uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *vha , uint32_t *dwptr , uint32_t faddr , uint32_t dwords ) { uint32_t i ; struct qla_hw_data *ha ; uint32_t tmp ; { ha = vha->hw; i = 0U; goto ldv_66070; ldv_66069: tmp = flash_data_addr(ha, faddr); *(dwptr + (unsigned long )i) = qla24xx_read_flash_dword(ha, tmp); i = i + 1U; faddr = faddr + 1U; ldv_66070: ; if (i < dwords) { goto ldv_66069; } else { } return (dwptr); } } static int qla24xx_write_flash_dword(struct qla_hw_data *ha , uint32_t addr , uint32_t data ) { int rval ; uint32_t cnt ; struct device_reg_24xx *reg ; unsigned int tmp ; { reg = & (ha->iobase)->isp24; writel(data, (void volatile *)(& reg->flash_data)); readl((void const volatile *)(& reg->flash_data)); writel(addr | 2147483648U, (void volatile *)(& reg->flash_addr)); rval = 0; cnt = 500000U; goto ldv_66082; ldv_66081: ; if (cnt != 0U) { __const_udelay(42950UL); } else { rval = 256; } ___might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_sup.c", 514, 0); _cond_resched(); cnt = cnt - 1U; ldv_66082: tmp = readl((void const volatile *)(& reg->flash_addr)); if ((int )tmp < 0 && rval == 0) { goto ldv_66081; } else { } return (rval); } } static void qla24xx_get_flash_manufacturer(struct qla_hw_data *ha , uint8_t *man_id , uint8_t *flash_id ) { uint32_t ids ; uint32_t tmp ; uint32_t tmp___0 ; { tmp = flash_conf_addr(ha, 939U); ids = qla24xx_read_flash_dword(ha, tmp); *man_id = (unsigned char )ids; *flash_id = (unsigned char )((int )((unsigned short )ids) >> 8); if (ids != 3735936685U && ((unsigned int )*man_id == 0U || (unsigned int )*flash_id == 0U)) { tmp___0 = flash_conf_addr(ha, 159U); ids = qla24xx_read_flash_dword(ha, tmp___0); *man_id = (unsigned char )ids; *flash_id = (unsigned char )((int )((unsigned short )ids) >> 8); } else { } return; } } static int qla2xxx_find_flt_start(scsi_qla_host_t *vha , uint32_t *start ) { char const *loc ; char const *locations[2U] ; uint32_t pcihdr ; uint32_t pcids ; uint32_t *dcode ; uint8_t *buf ; uint8_t *bcode ; uint8_t last_image ; uint16_t cnt ; uint16_t chksum ; uint16_t *wptr ; struct qla_flt_location *fltl ; struct qla_hw_data *ha ; struct req_que *req ; uint16_t *tmp ; { locations[0] = "DEF"; locations[1] = "PCI"; ha = vha->hw; req = *(ha->req_q_map); loc = locations[0]; *start = 0U; if ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) { *start = 70656U; } else if ((ha->device_type & 2048U) != 0U) { *start = 328704U; } else if ((ha->device_type & 8192U) != 0U) { *start = 885760U; } else if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { *start = 1033216U; goto end; } else if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { *start = 1033216U; goto end; } else { } buf = (uint8_t *)req->ring; dcode = (uint32_t *)req->ring; pcihdr = 0U; last_image = 1U; ldv_66109: qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 32U); bcode = buf + ((unsigned long )pcihdr & 3UL); if ((unsigned int )*bcode != 85U || (unsigned int )*(bcode + 1UL) != 170U) { goto end; } else { } pcids = (uint32_t )(((int )*(bcode + 25UL) << 8) | (int )*(bcode + 24UL)) + pcihdr; qla24xx_read_flash_data(vha, dcode, pcids >> 2, 32U); bcode = buf + ((unsigned long )pcihdr & 3UL); if ((((unsigned int )*bcode != 80U || (unsigned int )*(bcode + 1UL) != 67U) || (unsigned int )*(bcode + 2UL) != 73U) || (unsigned int )*(bcode + 3UL) != 82U) { goto end; } else { } last_image = (unsigned int )*(bcode + 21UL) & 128U; pcihdr = (uint32_t )((((int )*(bcode + 17UL) << 8) | (int )*(bcode + 16UL)) * 512) + pcihdr; if ((unsigned int )last_image == 0U) { goto ldv_66109; } else { } fltl = (struct qla_flt_location *)req->ring; qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 4U); if ((((unsigned int )fltl->sig[0] != 81U || (unsigned int )fltl->sig[1] != 70U) || (unsigned int )fltl->sig[2] != 76U) || (unsigned int )fltl->sig[3] != 84U) { goto end; } else { } wptr = (uint16_t *)req->ring; cnt = 8U; chksum = 0U; goto ldv_66112; ldv_66111: tmp = wptr; wptr = wptr + 1; chksum = (int )*tmp + (int )chksum; cnt = (uint16_t )((int )cnt - 1); ldv_66112: ; if ((unsigned int )cnt != 0U) { goto ldv_66111; } else { } if ((unsigned int )chksum != 0U) { ql_log(0U, vha, 69, "Inconsistent FLTL detected: checksum=0x%x.\n", (int )chksum); ql_dump_buffer(1073872896U, vha, 270, buf, 16U); return (258); } else { } loc = locations[1]; *start = (uint32_t )((((int )fltl->start_hi << 16) | (int )fltl->start_lo) >> 2); end: ql_dbg(1073741824U, vha, 70, "FLTL[%s] = 0x%x.\n", loc, *start); return (0); } } static void qla2xxx_get_flt_info(scsi_qla_host_t *vha , uint32_t flt_addr ) { char const *loc ; char const *locations[2U] ; uint32_t def_fw[3U] ; uint32_t def_boot[3U] ; uint32_t def_vpd_nvram[3U] ; uint32_t def_vpd0[3U] ; uint32_t def_vpd1[3U] ; uint32_t def_nvram0[3U] ; uint32_t def_nvram1[3U] ; uint32_t def_fdt[3U] ; uint32_t def_npiv_conf0[3U] ; uint32_t def_npiv_conf1[3U] ; uint32_t fcp_prio_cfg0[3U] ; uint32_t fcp_prio_cfg1[3U] ; uint32_t def ; uint16_t *wptr ; uint16_t cnt ; uint16_t chksum ; uint32_t start ; struct qla_flt_header *flt ; struct qla_flt_region *region ; struct qla_hw_data *ha ; struct req_que *req ; uint16_t *tmp ; { locations[0] = "DEF"; locations[1] = "FLT"; def_fw[0] = 131072U; def_fw[1] = 131072U; def_fw[2] = 655360U; def_boot[0] = 0U; def_boot[1] = 0U; def_boot[2] = 524288U; def_vpd_nvram[0] = 294912U; def_vpd_nvram[1] = 294912U; def_vpd_nvram[2] = 851968U; def_vpd0[0] = 0U; def_vpd0[1] = 0U; def_vpd0[2] = 851968U; def_vpd1[0] = 0U; def_vpd1[1] = 0U; def_vpd1[2] = 852992U; def_nvram0[0] = 0U; def_nvram0[1] = 0U; def_nvram0[2] = 852096U; def_nvram1[0] = 0U; def_nvram1[1] = 0U; def_nvram1[2] = 852352U; def_fdt[0] = 69632U; def_fdt[1] = 327680U; def_fdt[2] = 884736U; def_npiv_conf0[0] = 90112U; def_npiv_conf0[1] = 376832U; def_npiv_conf0[2] = 856064U; def_npiv_conf1[0] = 94208U; def_npiv_conf1[1] = 380928U; def_npiv_conf1[2] = 860160U; fcp_prio_cfg0[0] = 65536U; fcp_prio_cfg0[1] = 245760U; fcp_prio_cfg0[2] = 0U; fcp_prio_cfg1[0] = 73728U; fcp_prio_cfg1[1] = 253952U; fcp_prio_cfg1[2] = 0U; ha = vha->hw; req = *(ha->req_q_map); def = 0U; if ((ha->device_type & 2048U) != 0U) { def = 1U; } else if ((ha->device_type & 8192U) != 0U) { def = 2U; } else { } ha->flt_region_fcp_prio = (unsigned int )ha->port_no == 0U ? fcp_prio_cfg0[def] : fcp_prio_cfg1[def]; ha->flt_region_flt = flt_addr; wptr = (uint16_t *)req->ring; flt = (struct qla_flt_header *)req->ring; region = (struct qla_flt_region *)flt + 1U; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)req->ring, flt_addr << 2, 4096U); if ((unsigned int )*wptr == 65535U) { goto no_flash_data; } else { } if ((unsigned int )flt->version != 1U) { ql_log(1U, vha, 71, "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", (int )flt->version, (int )flt->length, (int )flt->checksum); goto no_flash_data; } else { } cnt = (uint16_t )(((unsigned long )flt->length + 8UL) >> 1); chksum = 0U; goto ldv_66143; ldv_66142: tmp = wptr; wptr = wptr + 1; chksum = (int )*tmp + (int )chksum; cnt = (uint16_t )((int )cnt - 1); ldv_66143: ; if ((unsigned int )cnt != 0U) { goto ldv_66142; } else { } if ((unsigned int )chksum != 0U) { ql_log(0U, vha, 72, "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", (int )flt->version, (int )flt->length, (int )flt->checksum); goto no_flash_data; } else { } loc = locations[1]; cnt = (uint16_t )((unsigned int )flt->length / 16U); goto ldv_66173; ldv_66172: start = region->start >> 2; ql_dbg(1073741824U, vha, 73, "FLT[%02x]: start=0x%x end=0x%x size=0x%x.\n", region->code & 255U, start, region->end >> 2, region->size); switch (region->code & 255U) { case 164U: ; if ((ha->device_type & 65536U) == 0U) { goto ldv_66146; } else { } ha->flt_region_fw = start; goto ldv_66146; case 1U: ; if ((ha->device_type & 65536U) != 0U) { goto ldv_66146; } else { } ha->flt_region_fw = start; goto ldv_66146; case 7U: ha->flt_region_boot = start; goto ldv_66146; case 20U: ; if ((ha->device_type & 65536U) != 0U) { goto ldv_66146; } else { } ha->flt_region_vpd_nvram = start; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { goto ldv_66146; } else { } if ((unsigned int )ha->port_no == 0U) { ha->flt_region_vpd = start; } else { } goto ldv_66146; case 22U: ; if (((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) || (ha->device_type & 65536U) != 0U) { goto ldv_66146; } else { } if ((unsigned int )ha->port_no == 1U) { ha->flt_region_vpd = start; } else { } goto ldv_66146; case 212U: ; if ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U) { goto ldv_66146; } else { } if ((unsigned int )ha->port_no == 2U) { ha->flt_region_vpd = start; } else { } goto ldv_66146; case 214U: ; if ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U) { goto ldv_66146; } else { } if ((unsigned int )ha->port_no == 3U) { ha->flt_region_vpd = start; } else { } goto ldv_66146; case 21U: ; if ((ha->device_type & 65536U) != 0U) { goto ldv_66146; } else { } if ((unsigned int )ha->port_no == 0U) { ha->flt_region_nvram = start; } else { } goto ldv_66146; case 23U: ; if ((ha->device_type & 65536U) != 0U) { goto ldv_66146; } else { } if ((unsigned int )ha->port_no == 1U) { ha->flt_region_nvram = start; } else { } goto ldv_66146; case 213U: ; if ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U) { goto ldv_66146; } else { } if ((unsigned int )ha->port_no == 2U) { ha->flt_region_nvram = start; } else { } goto ldv_66146; case 215U: ; if ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U) { goto ldv_66146; } else { } if ((unsigned int )ha->port_no == 3U) { ha->flt_region_nvram = start; } else { } goto ldv_66146; case 26U: ha->flt_region_fdt = start; goto ldv_66146; case 41U: ; if ((unsigned int )ha->port_no == 0U) { ha->flt_region_npiv_conf = start; } else { } goto ldv_66146; case 42U: ; if ((unsigned int )ha->port_no == 1U) { ha->flt_region_npiv_conf = start; } else { } goto ldv_66146; case 47U: ha->flt_region_gold_fw = start; goto ldv_66146; case 135U: ; if ((unsigned int )ha->port_no == 0U) { ha->flt_region_fcp_prio = start; } else { } goto ldv_66146; case 136U: ; if ((unsigned int )ha->port_no == 1U) { ha->flt_region_fcp_prio = start; } else { } goto ldv_66146; case 120U: ha->flt_region_boot = start; goto ldv_66146; case 162U: ; if ((ha->device_type & 262144U) != 0U) { ha->flt_region_boot = start; } else { } goto ldv_66146; case 116U: ha->flt_region_fw = start; goto ldv_66146; case 151U: ; if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { ha->flt_region_fw = start; } else { } goto ldv_66146; case 117U: ha->flt_region_gold_fw = start; goto ldv_66146; case 114U: ha->flt_region_bootload = start; goto ldv_66146; case 129U: ; if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { ha->flt_region_vpd = start; } else { } goto ldv_66146; case 170U: ; if ((ha->device_type & 65536U) == 0U && (ha->device_type & 262144U) == 0U) { goto ldv_66146; } else { } if ((unsigned int )ha->port_no == 0U) { ha->flt_region_nvram = start; } else { } goto ldv_66146; case 172U: ; if ((ha->device_type & 65536U) == 0U && (ha->device_type & 262144U) == 0U) { goto ldv_66146; } else { } if ((unsigned int )ha->port_no == 1U) { ha->flt_region_nvram = start; } else { } goto ldv_66146; } ldv_66146: cnt = (uint16_t )((int )cnt - 1); region = region + 1; ldv_66173: ; if ((unsigned int )cnt != 0U) { goto ldv_66172; } else { } goto done; no_flash_data: loc = locations[0]; ha->flt_region_fw = def_fw[def]; ha->flt_region_boot = def_boot[def]; ha->flt_region_vpd_nvram = def_vpd_nvram[def]; ha->flt_region_vpd = (unsigned int )ha->port_no == 0U ? def_vpd0[def] : def_vpd1[def]; ha->flt_region_nvram = (unsigned int )ha->port_no == 0U ? def_nvram0[def] : def_nvram1[def]; ha->flt_region_fdt = def_fdt[def]; ha->flt_region_npiv_conf = (unsigned int )ha->port_no == 0U ? def_npiv_conf0[def] : def_npiv_conf1[def]; done: ql_dbg(1073741824U, vha, 74, "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n", loc, ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram, ha->flt_region_vpd, ha->flt_region_nvram, ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf, ha->flt_region_fcp_prio); return; } } static void qla2xxx_get_fdt_info(scsi_qla_host_t *vha ) { char const *loc ; char const *locations[2U] ; uint16_t cnt ; uint16_t chksum ; uint16_t *wptr ; struct qla_fdt_layout *fdt ; uint8_t man_id ; uint8_t flash_id ; uint16_t mid ; uint16_t fid ; struct qla_hw_data *ha ; struct req_que *req ; uint16_t *tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; { locations[0] = "MID"; locations[1] = "FDT"; mid = 0U; fid = 0U; ha = vha->hw; req = *(ha->req_q_map); wptr = (uint16_t *)req->ring; fdt = (struct qla_fdt_layout *)req->ring; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)req->ring, ha->flt_region_fdt << 2, 4096U); if ((unsigned int )*wptr == 65535U) { goto no_flash_data; } else { } if ((((unsigned int )fdt->sig[0] != 81U || (unsigned int )fdt->sig[1] != 76U) || (unsigned int )fdt->sig[2] != 73U) || (unsigned int )fdt->sig[3] != 68U) { goto no_flash_data; } else { } cnt = 0U; chksum = 0U; goto ldv_66193; ldv_66192: tmp = wptr; wptr = wptr + 1; chksum = (int )*tmp + (int )chksum; cnt = (uint16_t )((int )cnt + 1); ldv_66193: ; if ((unsigned int )cnt <= 63U) { goto ldv_66192; } else { } if ((unsigned int )chksum != 0U) { ql_dbg(1073741824U, vha, 76, "Inconsistent FDT detected: checksum=0x%x id=%c version0x%x.\n", (int )chksum, (int )fdt->sig[0], (int )fdt->version); ql_dump_buffer(1073872896U, vha, 275, (uint8_t *)fdt, 128U); goto no_flash_data; } else { } loc = locations[1]; mid = fdt->man_id; fid = fdt->id; ha->fdt_wrt_disable = (uint32_t )fdt->wrt_disable_bits; ha->fdt_wrt_enable = (uint32_t )fdt->wrt_enable_bits; ha->fdt_wrt_sts_reg_cmd = (uint32_t )fdt->wrt_sts_reg_cmd; if ((ha->device_type & 262144U) != 0U) { ha->fdt_erase_cmd = (uint32_t )fdt->erase_cmd; } else { ha->fdt_erase_cmd = flash_conf_addr(ha, (uint32_t )((int )fdt->erase_cmd | 768)); } ha->fdt_block_size = fdt->block_size; if ((unsigned int )fdt->unprotect_sec_cmd != 0U) { ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, (uint32_t )((int )fdt->unprotect_sec_cmd | 768)); if ((unsigned int )fdt->protect_sec_cmd != 0U) { tmp___0 = flash_conf_addr(ha, (uint32_t )((int )fdt->protect_sec_cmd | 768)); ha->fdt_protect_sec_cmd = tmp___0; } else { tmp___1 = flash_conf_addr(ha, 822U); ha->fdt_protect_sec_cmd = tmp___1; } } else { } goto done; no_flash_data: loc = locations[0]; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { ha->fdt_block_size = 65536U; goto done; } else { } qla24xx_get_flash_manufacturer(ha, & man_id, & flash_id); mid = (uint16_t )man_id; fid = (uint16_t )flash_id; ha->fdt_wrt_disable = 156U; ha->fdt_erase_cmd = flash_conf_addr(ha, 984U); switch ((int )man_id) { case 191: ; if ((unsigned int )flash_id == 142U) { ha->fdt_block_size = 65536U; } else { ha->fdt_block_size = 32768U; } if ((unsigned int )flash_id == 128U) { ha->fdt_erase_cmd = flash_conf_addr(ha, 850U); } else { } goto ldv_66197; case 19: ha->fdt_block_size = 65536U; goto ldv_66197; case 31: ha->fdt_block_size = 4096U; ha->fdt_erase_cmd = flash_conf_addr(ha, 800U); ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 825U); ha->fdt_protect_sec_cmd = flash_conf_addr(ha, 822U); goto ldv_66197; default: ha->fdt_block_size = 65536U; goto ldv_66197; } ldv_66197: ; done: ql_dbg(1073741824U, vha, 77, "FDT[%s]: (0x%x/0x%x) erase=0x%x pr=%x wrtd=0x%x blk=0x%x.\n", loc, (int )mid, (int )fid, ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, ha->fdt_wrt_disable, ha->fdt_block_size); return; } } static void qla2xxx_get_idc_param(scsi_qla_host_t *vha ) { uint32_t *wptr ; struct qla_hw_data *ha ; struct req_que *req ; uint32_t *tmp ; { ha = vha->hw; req = *(ha->req_q_map); if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { return; } else { } wptr = (uint32_t *)req->ring; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)req->ring, 4098140U, 8U); if (*wptr == 4294967295U) { ha->fcoe_dev_init_timeout = 30U; ha->fcoe_reset_timeout = 10U; } else { tmp = wptr; wptr = wptr + 1; ha->fcoe_dev_init_timeout = *tmp; ha->fcoe_reset_timeout = *wptr; } ql_dbg(1073741824U, vha, 78, "fcoe_dev_init_timeout=%d fcoe_reset_timeout=%d.\n", ha->fcoe_dev_init_timeout, ha->fcoe_reset_timeout); return; } } int qla2xxx_get_flash_info(scsi_qla_host_t *vha ) { int ret ; uint32_t flt_addr ; struct qla_hw_data *ha ; { ha = vha->hw; if ((((((((ha->device_type & 128U) == 0U && (ha->device_type & 256U) == 0U) && ((ha->device_type & 512U) == 0U && (ha->device_type & 1024U) == 0U)) && (ha->device_type & 4096U) == 0U) && (ha->device_type & 2048U) == 0U) && ((((ha->device_type & 8192U) == 0U && (ha->device_type & 16384U) == 0U) && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U)) && (ha->device_type & 32768U) == 0U) && ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U)) { return (0); } else { } ret = qla2xxx_find_flt_start(vha, & flt_addr); if (ret != 0) { return (ret); } else { } qla2xxx_get_flt_info(vha, flt_addr); qla2xxx_get_fdt_info(vha); qla2xxx_get_idc_param(vha); return (0); } } void qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha ) { void *data ; uint16_t *wptr ; uint16_t cnt ; uint16_t chksum ; int i ; struct qla_npiv_header hdr ; struct qla_npiv_entry *entry ; struct qla_hw_data *ha ; uint16_t *tmp ; uint16_t flags ; struct fc_vport_identifiers vid ; struct fc_vport *vport ; { ha = vha->hw; if (((((((ha->device_type & 128U) == 0U && (ha->device_type & 256U) == 0U) && ((ha->device_type & 512U) == 0U && (ha->device_type & 1024U) == 0U)) && (ha->device_type & 4096U) == 0U) && (ha->device_type & 2048U) == 0U) && ((((ha->device_type & 8192U) == 0U && (ha->device_type & 16384U) == 0U) && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U)) && (ha->device_type & 32768U) == 0U) { return; } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { return; } else { } if ((ha->device_type & 262144U) != 0U) { return; } else { } (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)(& hdr), ha->flt_region_npiv_conf << 2, 16U); if ((unsigned int )hdr.version == 65535U) { return; } else { } if ((unsigned int )hdr.version != 1U) { ql_dbg(8388608U, vha, 28816, "Unsupported NPIV-Config detected: version=0x%x entries=0x%x checksum=0x%x.\n", (int )hdr.version, (int )hdr.entries, (int )hdr.checksum); return; } else { } data = kmalloc(16384UL, 208U); if ((unsigned long )data == (unsigned long )((void *)0)) { ql_log(1U, vha, 28817, "Unable to allocate memory for data.\n"); return; } else { } (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)data, ha->flt_region_npiv_conf << 2, 16384U); cnt = (uint16_t )(((unsigned long )hdr.entries * 24UL + 16UL) >> 1); wptr = (uint16_t *)data; chksum = 0U; goto ldv_66225; ldv_66224: tmp = wptr; wptr = wptr + 1; chksum = (int )*tmp + (int )chksum; cnt = (uint16_t )((int )cnt - 1); ldv_66225: ; if ((unsigned int )cnt != 0U) { goto ldv_66224; } else { } if ((unsigned int )chksum != 0U) { ql_dbg(8388608U, vha, 28818, "Inconsistent NPIV-Config detected: version=0x%x entries=0x%x checksum=0x%x.\n", (int )hdr.version, (int )hdr.entries, (int )hdr.checksum); goto done; } else { } entry = (struct qla_npiv_entry *)data + 16U; cnt = hdr.entries; i = 0; goto ldv_66233; ldv_66232: memcpy((void *)ha->npiv_info + (unsigned long )i, (void const *)entry, 24UL); flags = entry->flags; if ((unsigned int )flags == 65535U) { goto ldv_66231; } else { } if (((int )flags & 1) == 0) { goto ldv_66231; } else { } memset((void *)(& vid), 0, 96UL); vid.roles = 2U; vid.vport_type = 7; vid.disable = 0; vid.port_name = wwn_to_u64((u8 *)(& entry->port_name)); vid.node_name = wwn_to_u64((u8 *)(& entry->node_name)); ql_dbg(8388608U, vha, 28819, "NPIV[%02x]: wwpn=%llx wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", (int )cnt, vid.port_name, vid.node_name, (int )entry->vf_id, (int )entry->q_qos, (int )entry->f_qos); if (i <= 31) { vport = fc_vport_create(vha->host, 0, & vid); if ((unsigned long )vport == (unsigned long )((struct fc_vport *)0)) { ql_log(1U, vha, 28820, "NPIV-Config Failed to create vport [%02x]: wwpn=%llx wwnn=%llx.\n", (int )cnt, vid.port_name, vid.node_name); } else { } } else { } ldv_66231: cnt = (uint16_t )((int )cnt - 1); entry = entry + 1; i = i + 1; ldv_66233: ; if ((unsigned int )cnt != 0U) { goto ldv_66232; } else { } done: kfree((void const *)data); return; } } static int qla24xx_unprotect_flash(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct device_reg_24xx *reg ; int tmp ; unsigned int tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; if (*((unsigned long *)ha + 2UL) != 0UL) { tmp = qla81xx_fac_do_write_enable(vha, 1); return (tmp); } else { } tmp___0 = readl((void const volatile *)(& reg->ctrl_status)); writel(tmp___0 | 2U, (void volatile *)(& reg->ctrl_status)); readl((void const volatile *)(& reg->ctrl_status)); if (ha->fdt_wrt_disable == 0U) { goto done; } else { } tmp___1 = flash_conf_addr(ha, 257U); qla24xx_write_flash_dword(ha, tmp___1, 0U); tmp___2 = flash_conf_addr(ha, 257U); qla24xx_write_flash_dword(ha, tmp___2, 0U); done: ; return (0); } } static int qla24xx_protect_flash(scsi_qla_host_t *vha ) { uint32_t cnt ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; int tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; unsigned int tmp___3 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; if (*((unsigned long *)ha + 2UL) != 0UL) { tmp = qla81xx_fac_do_write_enable(vha, 0); return (tmp); } else { } if (ha->fdt_wrt_disable == 0U) { goto skip_wrt_protect; } else { } tmp___0 = flash_conf_addr(ha, 257U); qla24xx_write_flash_dword(ha, tmp___0, ha->fdt_wrt_disable); cnt = 300U; goto ldv_66249; ldv_66248: __const_udelay(42950UL); cnt = cnt - 1U; ldv_66249: ; if (cnt != 0U) { tmp___1 = flash_conf_addr(ha, 5U); tmp___2 = qla24xx_read_flash_dword(ha, tmp___1); if ((int )tmp___2 & 1) { goto ldv_66248; } else { goto ldv_66250; } } else { } ldv_66250: ; skip_wrt_protect: tmp___3 = readl((void const volatile *)(& reg->ctrl_status)); writel(tmp___3 & 4294967293U, (void volatile *)(& reg->ctrl_status)); readl((void const volatile *)(& reg->ctrl_status)); return (0); } } static int qla24xx_erase_sector(scsi_qla_host_t *vha , uint32_t fdata ) { struct qla_hw_data *ha ; uint32_t start ; uint32_t finish ; uint32_t tmp ; uint32_t tmp___0 ; int tmp___1 ; int tmp___2 ; { ha = vha->hw; if (*((unsigned long *)ha + 2UL) != 0UL) { start = fdata >> 2; finish = ((ha->fdt_block_size >> 2) + start) - 1U; tmp = flash_data_addr(ha, finish); tmp___0 = flash_data_addr(ha, start); tmp___1 = qla81xx_fac_erase_sector(vha, tmp___0, tmp); return (tmp___1); } else { } tmp___2 = qla24xx_write_flash_dword(ha, ha->fdt_erase_cmd, ((fdata & 65280U) | ((fdata << 16) & 16711680U)) | ((fdata >> 16) & 255U)); return (tmp___2); } } static int qla24xx_write_flash_data(scsi_qla_host_t *vha , uint32_t *dwptr , uint32_t faddr , uint32_t dwords ) { int ret ; uint32_t liter ; uint32_t sec_mask ; uint32_t rest_addr ; uint32_t fdata ; dma_addr_t optrom_dma ; void *optrom ; struct qla_hw_data *ha ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; { optrom = (void *)0; ha = vha->hw; if ((((((ha->device_type & 2048U) != 0U || (ha->device_type & 8192U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) && (faddr & 4095U) == 0U) && dwords > 1024U) { optrom = dma_alloc_attrs(& (ha->pdev)->dev, 4096UL, & optrom_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )optrom == (unsigned long )((void *)0)) { ql_log(1U, vha, 28821, "Unable to allocate memory for optrom burst write (%x KB).\n", 4); } else { } } else { } rest_addr = (ha->fdt_block_size >> 2) - 1U; sec_mask = ~ rest_addr; ret = qla24xx_unprotect_flash(vha); if (ret != 0) { ql_log(1U, vha, 28822, "Unable to unprotect flash for update.\n"); goto done; } else { } liter = 0U; goto ldv_66276; ldv_66275: fdata = (faddr & sec_mask) << 2; if ((faddr & rest_addr) == 0U) { if (ha->fdt_unprotect_sec_cmd != 0U) { qla24xx_write_flash_dword(ha, ha->fdt_unprotect_sec_cmd, ((fdata & 65280U) | ((fdata << 16) & 16711680U)) | ((fdata >> 16) & 255U)); } else { } ret = qla24xx_erase_sector(vha, fdata); if (ret != 0) { ql_dbg(8388608U, vha, 28679, "Unable to erase erase sector: address=%x.\n", faddr); goto ldv_66273; } else { } } else { } if ((unsigned long )optrom != (unsigned long )((void *)0) && liter + 1024U <= dwords) { memcpy(optrom, (void const *)dwptr, 4096UL); tmp = flash_data_addr(ha, faddr); ret = qla2x00_load_ram(vha, optrom_dma, tmp, 1024U); if (ret != 0) { tmp___0 = flash_data_addr(ha, faddr); ql_log(1U, vha, 28823, "Unable to burst-write optrom segment (%x/%x/%llx).\n", ret, tmp___0, optrom_dma); ql_log(1U, vha, 28824, "Reverting to slow-write.\n"); dma_free_attrs(& (ha->pdev)->dev, 4096UL, optrom, optrom_dma, (struct dma_attrs *)0); optrom = (void *)0; } else { liter = liter + 1023U; faddr = faddr + 1023U; dwptr = dwptr + 1023UL; goto ldv_66274; } } else { } tmp___1 = flash_data_addr(ha, faddr); ret = qla24xx_write_flash_dword(ha, tmp___1, *dwptr); if (ret != 0) { ql_dbg(8388608U, vha, 28678, "Unable to program flash address=%x data=%x.\n", faddr, *dwptr); goto ldv_66273; } else { } if (ha->fdt_unprotect_sec_cmd != 0U && (faddr & rest_addr) == rest_addr) { qla24xx_write_flash_dword(ha, ha->fdt_protect_sec_cmd, ((fdata & 65280U) | ((fdata << 16) & 16711680U)) | ((fdata >> 16) & 255U)); } else { } ldv_66274: liter = liter + 1U; faddr = faddr + 1U; dwptr = dwptr + 1; ldv_66276: ; if (liter < dwords) { goto ldv_66275; } else { } ldv_66273: ret = qla24xx_protect_flash(vha); if (ret != 0) { ql_log(1U, vha, 28825, "Unable to protect flash after update.\n"); } else { } done: ; if ((unsigned long )optrom != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 4096UL, optrom, optrom_dma, (struct dma_attrs *)0); } else { } return (ret); } } uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) { uint32_t i ; uint16_t *wptr ; struct qla_hw_data *ha ; { ha = vha->hw; wptr = (uint16_t *)buf; qla2x00_lock_nvram_access(ha); i = 0U; goto ldv_66287; ldv_66286: *(wptr + (unsigned long )i) = qla2x00_get_nvram_word(ha, naddr); i = i + 1U; naddr = naddr + 1U; ldv_66287: ; if (bytes >> 1 > i) { goto ldv_66286; } else { } qla2x00_unlock_nvram_access(ha); return (buf); } } uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) { uint32_t i ; uint32_t *dwptr ; struct qla_hw_data *ha ; uint32_t tmp ; { ha = vha->hw; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return (buf); } else { } dwptr = (uint32_t *)buf; i = 0U; goto ldv_66299; ldv_66298: tmp = nvram_data_addr(ha, naddr); *(dwptr + (unsigned long )i) = qla24xx_read_flash_dword(ha, tmp); i = i + 1U; naddr = naddr + 1U; ldv_66299: ; if (bytes >> 2 > i) { goto ldv_66298; } else { } return (buf); } } int qla2x00_write_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) { int ret ; int stat ; uint32_t i ; uint16_t *wptr ; unsigned long flags ; struct qla_hw_data *ha ; raw_spinlock_t *tmp ; { ha = vha->hw; ret = 0; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); qla2x00_lock_nvram_access(ha); stat = qla2x00_clear_nvram_protection(ha); wptr = (uint16_t *)buf; i = 0U; goto ldv_66317; ldv_66316: qla2x00_write_nvram_word(ha, naddr, (int )*wptr); wptr = wptr + 1; i = i + 1U; naddr = naddr + 1U; ldv_66317: ; if (bytes >> 1 > i) { goto ldv_66316; } else { } qla2x00_set_nvram_protection(ha, stat); qla2x00_unlock_nvram_access(ha); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (ret); } } int qla24xx_write_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) { int ret ; uint32_t i ; uint32_t *dwptr ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; unsigned int tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; unsigned int tmp___4 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; ret = 0; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return (ret); } else { } tmp = readl((void const volatile *)(& reg->ctrl_status)); writel(tmp | 2U, (void volatile *)(& reg->ctrl_status)); readl((void const volatile *)(& reg->ctrl_status)); tmp___0 = nvram_conf_addr(ha, 257U); qla24xx_write_flash_dword(ha, tmp___0, 0U); tmp___1 = nvram_conf_addr(ha, 257U); qla24xx_write_flash_dword(ha, tmp___1, 0U); dwptr = (uint32_t *)buf; i = 0U; goto ldv_66332; ldv_66331: tmp___2 = nvram_data_addr(ha, naddr); ret = qla24xx_write_flash_dword(ha, tmp___2, *dwptr); if (ret != 0) { ql_dbg(8388608U, vha, 28826, "Unable to program nvram address=%x data=%x.\n", naddr, *dwptr); goto ldv_66330; } else { } i = i + 1U; naddr = naddr + 1U; dwptr = dwptr + 1; ldv_66332: ; if (bytes >> 2 > i) { goto ldv_66331; } else { } ldv_66330: tmp___3 = nvram_conf_addr(ha, 257U); qla24xx_write_flash_dword(ha, tmp___3, 140U); tmp___4 = readl((void const volatile *)(& reg->ctrl_status)); writel(tmp___4 & 4294967293U, (void volatile *)(& reg->ctrl_status)); readl((void const volatile *)(& reg->ctrl_status)); return (ret); } } uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) { uint32_t i ; uint32_t *dwptr ; struct qla_hw_data *ha ; uint32_t tmp ; { ha = vha->hw; dwptr = (uint32_t *)buf; i = 0U; goto ldv_66343; ldv_66342: tmp = flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr); *(dwptr + (unsigned long )i) = qla24xx_read_flash_dword(ha, tmp); i = i + 1U; naddr = naddr + 1U; ldv_66343: ; if (bytes >> 2 > i) { goto ldv_66342; } else { } return (buf); } } int qla25xx_write_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) { struct qla_hw_data *ha ; uint8_t *dbuf ; void *tmp ; { ha = vha->hw; tmp = vmalloc(65536UL); dbuf = (uint8_t *)tmp; if ((unsigned long )dbuf == (unsigned long )((uint8_t *)0U)) { return (259); } else { } (*((ha->isp_ops)->read_optrom))(vha, dbuf, ha->flt_region_vpd_nvram << 2, 65536U); memcpy((void *)dbuf + (unsigned long )(naddr << 2), (void const *)buf, (size_t )bytes); (*((ha->isp_ops)->write_optrom))(vha, dbuf, ha->flt_region_vpd_nvram << 2, 65536U); vfree((void const *)dbuf); return (0); } } __inline static void qla2x00_flip_colors(struct qla_hw_data *ha , uint16_t *pflags ) { { if ((ha->device_type & 16U) != 0U) { if ((unsigned int )ha->beacon_color_state == 7U) { ha->beacon_color_state = 0U; *pflags = 0U; } else { ha->beacon_color_state = 7U; *pflags = 193U; } } else if ((unsigned int )ha->beacon_color_state == 1U) { ha->beacon_color_state = 0U; *pflags = 0U; } else { ha->beacon_color_state = 1U; *pflags = 64U; } return; } } void qla2x00_beacon_blink(struct scsi_qla_host *vha ) { uint16_t gpio_enable ; uint16_t gpio_data ; uint16_t led_color ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; { led_color = 0U; ha = vha->hw; reg = & (ha->iobase)->isp; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if (ha->pio_address != 0ULL) { gpio_enable = inw((int )((unsigned int )ha->pio_address + 206U)); gpio_data = inw((int )((unsigned int )ha->pio_address + 204U)); } else { gpio_enable = readw((void const volatile *)(& reg->gpioe)); gpio_data = readw((void const volatile *)(& reg->gpiod)); } gpio_enable = (uint16_t )((unsigned int )gpio_enable | 192U); if (ha->pio_address != 0ULL) { outw((int )gpio_enable, (int )((unsigned int )ha->pio_address + 206U)); } else { writew((int )gpio_enable, (void volatile *)(& reg->gpioe)); readw((void const volatile *)(& reg->gpioe)); } qla2x00_flip_colors(ha, & led_color); gpio_data = (unsigned int )gpio_data & 65343U; gpio_data = (uint16_t )((int )gpio_data | (int )led_color); if (ha->pio_address != 0ULL) { outw((int )gpio_data, (int )((unsigned int )ha->pio_address + 204U)); } else { writew((int )gpio_data, (void volatile *)(& reg->gpiod)); readw((void const volatile *)(& reg->gpiod)); } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } int qla2x00_beacon_on(struct scsi_qla_host *vha ) { uint16_t gpio_enable ; uint16_t gpio_data ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; int tmp ; raw_spinlock_t *tmp___0 ; { ha = vha->hw; reg = & (ha->iobase)->isp; ha->fw_options[1] = (unsigned int )ha->fw_options[1] & 65279U; ha->fw_options[1] = (uint16_t )((unsigned int )ha->fw_options[1] | 64U); tmp = qla2x00_set_fw_options(vha, (uint16_t *)(& ha->fw_options)); if (tmp != 0) { ql_log(1U, vha, 28827, "Unable to update fw options (beacon on).\n"); return (258); } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); if (ha->pio_address != 0ULL) { gpio_enable = inw((int )((unsigned int )ha->pio_address + 206U)); gpio_data = inw((int )((unsigned int )ha->pio_address + 204U)); } else { gpio_enable = readw((void const volatile *)(& reg->gpioe)); gpio_data = readw((void const volatile *)(& reg->gpiod)); } gpio_enable = (uint16_t )((unsigned int )gpio_enable | 192U); if (ha->pio_address != 0ULL) { outw((int )gpio_enable, (int )((unsigned int )ha->pio_address + 206U)); } else { writew((int )gpio_enable, (void volatile *)(& reg->gpioe)); readw((void const volatile *)(& reg->gpioe)); } gpio_data = (unsigned int )gpio_data & 65343U; if (ha->pio_address != 0ULL) { outw((int )gpio_data, (int )((unsigned int )ha->pio_address + 204U)); } else { writew((int )gpio_data, (void volatile *)(& reg->gpiod)); readw((void const volatile *)(& reg->gpiod)); } spin_unlock_irqrestore(& ha->hardware_lock, flags); ha->beacon_blink_led = 1U; ha->beacon_color_state = 0U; return (0); } } int qla2x00_beacon_off(struct scsi_qla_host *vha ) { int rval ; struct qla_hw_data *ha ; { rval = 0; ha = vha->hw; ha->beacon_blink_led = 0U; if ((ha->device_type & 16U) != 0U) { ha->beacon_color_state = 7U; } else { ha->beacon_color_state = 1U; } (*((ha->isp_ops)->beacon_blink))(vha); ha->fw_options[1] = (unsigned int )ha->fw_options[1] & 65279U; ha->fw_options[1] = (unsigned int )ha->fw_options[1] & 65471U; rval = qla2x00_set_fw_options(vha, (uint16_t *)(& ha->fw_options)); if (rval != 0) { ql_log(1U, vha, 28828, "Unable to update fw options (beacon off).\n"); } else { } return (rval); } } __inline static void qla24xx_flip_colors(struct qla_hw_data *ha , uint16_t *pflags ) { { if ((unsigned int )ha->beacon_color_state == 7U) { ha->beacon_color_state = 0U; *pflags = 0U; } else { ha->beacon_color_state = 7U; *pflags = 20U; } return; } } void qla24xx_beacon_blink(struct scsi_qla_host *vha ) { uint16_t led_color ; uint32_t gpio_data ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; raw_spinlock_t *tmp ; { led_color = 0U; ha = vha->hw; reg = & (ha->iobase)->isp24; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); gpio_data = readl((void const volatile *)(& reg->gpiod)); gpio_data = gpio_data | 1835008U; writel(gpio_data, (void volatile *)(& reg->gpiod)); gpio_data = readl((void const volatile *)(& reg->gpiod)); qla24xx_flip_colors(ha, & led_color); gpio_data = gpio_data & 4294967267U; gpio_data = (uint32_t )led_color | gpio_data; writel(gpio_data, (void volatile *)(& reg->gpiod)); gpio_data = readl((void const volatile *)(& reg->gpiod)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static uint32_t qla83xx_select_led_port(struct qla_hw_data *ha ) { uint32_t led_select_value ; { led_select_value = 0U; if ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) { goto out; } else { } if ((unsigned int )ha->port_no == 0U) { led_select_value = 2102048U; } else { led_select_value = 2102056U; } out: ; return (led_select_value); } } void qla83xx_beacon_blink(struct scsi_qla_host *vha ) { uint32_t led_select_value ; struct qla_hw_data *ha ; uint16_t led_cfg[6U] ; uint16_t orig_led_cfg[6U] ; uint32_t led_10_value ; uint32_t led_43_value ; int rval ; { ha = vha->hw; if ((((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) && (ha->device_type & 8192U) == 0U) && ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U)) { return; } else { } if ((unsigned int )ha->beacon_blink_led == 0U) { return; } else { } if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { qla2x00_write_ram_word(vha, 4099U, 1073742384U); qla2x00_write_ram_word(vha, 4100U, 1073742384U); } else if ((ha->device_type & 32768U) != 0U) { led_select_value = qla83xx_select_led_port(ha); qla83xx_wr_reg(vha, led_select_value, 1073742384U); qla83xx_wr_reg(vha, led_select_value + 4U, 1073742384U); } else if ((ha->device_type & 65536U) != 0U) { led_select_value = qla83xx_select_led_port(ha); qla83xx_rd_reg(vha, led_select_value, & led_10_value); qla83xx_rd_reg(vha, led_select_value + 16U, & led_43_value); qla83xx_wr_reg(vha, led_select_value, 32784384U); msleep(500U); qla83xx_wr_reg(vha, led_select_value, 1073742324U); msleep(1000U); qla83xx_wr_reg(vha, led_select_value, led_10_value); qla83xx_wr_reg(vha, led_select_value + 16U, led_43_value); } else if ((ha->device_type & 8192U) != 0U) { rval = qla81xx_get_led_config(vha, (uint16_t *)(& orig_led_cfg)); if (rval == 0) { if ((ha->device_type & 8192U) != 0U) { led_cfg[0] = 16384U; led_cfg[1] = 8192U; led_cfg[2] = 0U; led_cfg[3] = 0U; led_cfg[4] = 0U; led_cfg[5] = 0U; } else { led_cfg[0] = 16384U; led_cfg[1] = 16384U; led_cfg[2] = 16384U; led_cfg[3] = 8192U; led_cfg[4] = 0U; led_cfg[5] = 8192U; } rval = qla81xx_set_led_config(vha, (uint16_t *)(& led_cfg)); msleep(1000U); if ((ha->device_type & 8192U) != 0U) { led_cfg[0] = 16384U; led_cfg[1] = 8192U; led_cfg[2] = 0U; } else { led_cfg[0] = 16384U; led_cfg[1] = 8192U; led_cfg[2] = 16384U; led_cfg[3] = 16384U; led_cfg[4] = 0U; led_cfg[5] = 8192U; } rval = qla81xx_set_led_config(vha, (uint16_t *)(& led_cfg)); } else { } qla81xx_set_led_config(vha, (uint16_t *)(& orig_led_cfg)); } else { } return; } } int qla24xx_beacon_on(struct scsi_qla_host *vha ) { uint32_t gpio_data ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; int tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return (0); } else { } if ((ha->device_type & 65536U) != 0U || (ha->device_type & 8192U) != 0U) { goto skip_gpio; } else { } if ((unsigned int )ha->beacon_blink_led == 0U) { ha->fw_options[1] = (uint16_t )((unsigned int )ha->fw_options[1] | 64U); tmp = qla2x00_set_fw_options(vha, (uint16_t *)(& ha->fw_options)); if (tmp != 0) { return (258); } else { } tmp___0 = qla2x00_get_fw_options(vha, (uint16_t *)(& ha->fw_options)); if (tmp___0 != 0) { ql_log(1U, vha, 28681, "Unable to update fw options (beacon on).\n"); return (258); } else { } if ((ha->device_type & 32768U) != 0U || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { goto skip_gpio; } else { } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); gpio_data = readl((void const volatile *)(& reg->gpiod)); gpio_data = gpio_data | 1835008U; writel(gpio_data, (void volatile *)(& reg->gpiod)); readl((void const volatile *)(& reg->gpiod)); spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } ha->beacon_color_state = 0U; skip_gpio: ha->beacon_blink_led = 1U; return (0); } } int qla24xx_beacon_off(struct scsi_qla_host *vha ) { uint32_t gpio_data ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; raw_spinlock_t *tmp ; int tmp___0 ; int tmp___1 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return (0); } else { } ha->beacon_blink_led = 0U; if ((ha->device_type & 32768U) != 0U || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { goto set_fw_options; } else { } if ((ha->device_type & 65536U) != 0U || (ha->device_type & 8192U) != 0U) { return (0); } else { } ha->beacon_color_state = 7U; (*((ha->isp_ops)->beacon_blink))(vha); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); gpio_data = readl((void const volatile *)(& reg->gpiod)); gpio_data = gpio_data & 4293132287U; writel(gpio_data, (void volatile *)(& reg->gpiod)); readl((void const volatile *)(& reg->gpiod)); spin_unlock_irqrestore(& ha->hardware_lock, flags); set_fw_options: ha->fw_options[1] = (unsigned int )ha->fw_options[1] & 65471U; tmp___0 = qla2x00_set_fw_options(vha, (uint16_t *)(& ha->fw_options)); if (tmp___0 != 0) { ql_log(1U, vha, 28749, "Unable to update fw options (beacon on).\n"); return (258); } else { } tmp___1 = qla2x00_get_fw_options(vha, (uint16_t *)(& ha->fw_options)); if (tmp___1 != 0) { ql_log(1U, vha, 28750, "Unable to update fw options (beacon on).\n"); return (258); } else { } return (0); } } static void qla2x00_flash_enable(struct qla_hw_data *ha ) { uint16_t data ; struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; data = readw((void const volatile *)(& reg->ctrl_status)); data = (uint16_t )((unsigned int )data | 2U); writew((int )data, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); return; } } static void qla2x00_flash_disable(struct qla_hw_data *ha ) { uint16_t data ; struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; data = readw((void const volatile *)(& reg->ctrl_status)); data = (unsigned int )data & 65533U; writew((int )data, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); return; } } static uint8_t qla2x00_read_flash_byte(struct qla_hw_data *ha , uint32_t addr ) { uint16_t data ; uint16_t bank_select ; struct device_reg_2xxx *reg ; uint16_t data2 ; { reg = & (ha->iobase)->isp; bank_select = readw((void const volatile *)(& reg->ctrl_status)); if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { bank_select = (unsigned int )bank_select & 65287U; bank_select = ((unsigned int )((uint16_t )(addr >> 12)) & 240U) | (unsigned int )bank_select; bank_select = (uint16_t )((unsigned int )bank_select | 8U); writew((int )bank_select, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); writew((int )((unsigned short )addr), (void volatile *)(& reg->flash_address)); data = readw((void const volatile *)(& reg->flash_data)); return ((uint8_t )data); } else { } if ((addr & 65536U) != 0U && ((int )bank_select & 8) == 0) { bank_select = (uint16_t )((unsigned int )bank_select | 8U); writew((int )bank_select, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); } else if ((addr & 65536U) == 0U && ((int )bank_select & 8) != 0) { bank_select = (unsigned int )bank_select & 65527U; writew((int )bank_select, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); } else { } if (ha->pio_address != 0ULL) { outw((int )((unsigned short )addr), (int )ha->pio_address); ldv_66455: data = inw((int )((unsigned int )ha->pio_address + 2U)); __asm__ volatile ("": : : "memory"); cpu_relax(); data2 = inw((int )((unsigned int )ha->pio_address + 2U)); if ((int )data != (int )data2) { goto ldv_66455; } else { } } else { writew((int )((unsigned short )addr), (void volatile *)(& reg->flash_address)); data = qla2x00_debounce_register___0((uint16_t volatile *)(& reg->flash_data)); } return ((uint8_t )data); } } static void qla2x00_write_flash_byte(struct qla_hw_data *ha , uint32_t addr , uint8_t data ) { uint16_t bank_select ; struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; bank_select = readw((void const volatile *)(& reg->ctrl_status)); if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { bank_select = (unsigned int )bank_select & 65287U; bank_select = ((unsigned int )((uint16_t )(addr >> 12)) & 240U) | (unsigned int )bank_select; bank_select = (uint16_t )((unsigned int )bank_select | 8U); writew((int )bank_select, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); writew((int )((unsigned short )addr), (void volatile *)(& reg->flash_address)); readw((void const volatile *)(& reg->ctrl_status)); writew((int )data, (void volatile *)(& reg->flash_data)); readw((void const volatile *)(& reg->ctrl_status)); return; } else { } if ((addr & 65536U) != 0U && ((int )bank_select & 8) == 0) { bank_select = (uint16_t )((unsigned int )bank_select | 8U); writew((int )bank_select, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); } else if ((addr & 65536U) == 0U && ((int )bank_select & 8) != 0) { bank_select = (unsigned int )bank_select & 65527U; writew((int )bank_select, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); } else { } if (ha->pio_address != 0ULL) { outw((int )((unsigned short )addr), (int )ha->pio_address); outw((int )data, (int )((unsigned int )ha->pio_address + 2U)); } else { writew((int )((unsigned short )addr), (void volatile *)(& reg->flash_address)); readw((void const volatile *)(& reg->ctrl_status)); writew((int )data, (void volatile *)(& reg->flash_data)); readw((void const volatile *)(& reg->ctrl_status)); } return; } } static int qla2x00_poll_flash(struct qla_hw_data *ha , uint32_t addr , uint8_t poll_data , uint8_t man_id , uint8_t flash_id ) { int status ; uint8_t flash_data ; uint32_t cnt ; { status = 1; poll_data = (unsigned int )poll_data & 128U; cnt = 3000000U; goto ldv_66477; ldv_66476: flash_data = qla2x00_read_flash_byte(ha, addr); if (((int )flash_data & 128) == (int )poll_data) { status = 0; goto ldv_66474; } else { } if ((unsigned int )man_id != 64U && (unsigned int )man_id != 218U) { if (((int )flash_data & 32) != 0 && cnt > 2U) { cnt = 2U; } else { } } else { } __const_udelay(42950UL); __asm__ volatile ("": : : "memory"); ___might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_sup.c", 2087, 0); _cond_resched(); cnt = cnt - 1U; ldv_66477: ; if (cnt != 0U) { goto ldv_66476; } else { } ldv_66474: ; return (status); } } static int qla2x00_program_flash_address(struct qla_hw_data *ha , uint32_t addr , uint8_t data , uint8_t man_id , uint8_t flash_id ) { int tmp ; { if ((ha->device_type & 536870912U) != 0U) { qla2x00_write_flash_byte(ha, 2730U, 170); qla2x00_write_flash_byte(ha, 1365U, 85); qla2x00_write_flash_byte(ha, 2730U, 160); qla2x00_write_flash_byte(ha, addr, (int )data); } else if ((unsigned int )man_id == 218U && (unsigned int )flash_id == 193U) { qla2x00_write_flash_byte(ha, addr, (int )data); if ((addr & 126U) != 0U) { return (0); } else { } } else { qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 160); qla2x00_write_flash_byte(ha, addr, (int )data); } __const_udelay(644250UL); tmp = qla2x00_poll_flash(ha, addr, (int )data, (int )man_id, (int )flash_id); return (tmp); } } static int qla2x00_erase_flash(struct qla_hw_data *ha , uint8_t man_id , uint8_t flash_id ) { int tmp ; { if ((ha->device_type & 536870912U) != 0U) { qla2x00_write_flash_byte(ha, 2730U, 170); qla2x00_write_flash_byte(ha, 1365U, 85); qla2x00_write_flash_byte(ha, 2730U, 128); qla2x00_write_flash_byte(ha, 2730U, 170); qla2x00_write_flash_byte(ha, 1365U, 85); qla2x00_write_flash_byte(ha, 2730U, 16); } else { qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 128); qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 16); } __const_udelay(644250UL); tmp = qla2x00_poll_flash(ha, 0U, 128, (int )man_id, (int )flash_id); return (tmp); } } static int qla2x00_erase_flash_sector(struct qla_hw_data *ha , uint32_t addr , uint32_t sec_mask , uint8_t man_id , uint8_t flash_id ) { int tmp ; { qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 128); qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); if ((unsigned int )man_id == 31U && (unsigned int )flash_id == 19U) { qla2x00_write_flash_byte(ha, addr & sec_mask, 16); } else { qla2x00_write_flash_byte(ha, addr & sec_mask, 48); } __const_udelay(644250UL); tmp = qla2x00_poll_flash(ha, addr, 128, (int )man_id, (int )flash_id); return (tmp); } } static void qla2x00_get_flash_manufacturer(struct qla_hw_data *ha , uint8_t *man_id , uint8_t *flash_id ) { { qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 144); *man_id = qla2x00_read_flash_byte(ha, 0U); *flash_id = qla2x00_read_flash_byte(ha, 1U); qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 240); return; } } static void qla2x00_read_flash_data(struct qla_hw_data *ha , uint8_t *tmp_buf , uint32_t saddr , uint32_t length ) { struct device_reg_2xxx *reg ; uint32_t midpoint ; uint32_t ilength ; uint8_t data ; { reg = & (ha->iobase)->isp; midpoint = length / 2U; writew(0, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); ilength = 0U; goto ldv_66514; ldv_66513: ; if (ilength == midpoint) { writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); } else { } data = qla2x00_read_flash_byte(ha, saddr); if (saddr % 100U != 0U) { __const_udelay(42950UL); } else { } *tmp_buf = data; ___might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_sup.c", 2236, 0); _cond_resched(); saddr = saddr + 1U; ilength = ilength + 1U; tmp_buf = tmp_buf + 1; ldv_66514: ; if (ilength < length) { goto ldv_66513; } else { } return; } } __inline static void qla2x00_suspend_hba(struct scsi_qla_host *vha ) { int cnt ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; unsigned short tmp___0 ; { ha = vha->hw; reg = & (ha->iobase)->isp; scsi_block_requests(vha->host); (*((ha->isp_ops)->disable_intrs))(ha); set_bit(3L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writew(8192, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); if (((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) || (ha->device_type & 4U) != 0U) { cnt = 0; goto ldv_66528; ldv_66527: tmp___0 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___0 & 32) != 0) { goto ldv_66526; } else { } __const_udelay(429500UL); cnt = cnt + 1; ldv_66528: ; if (cnt <= 29999) { goto ldv_66527; } else { } ldv_66526: ; } else { __const_udelay(42950UL); } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } __inline static void qla2x00_resume_hba(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; clear_bit(3L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); return; } } uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { uint32_t addr ; uint32_t midpoint ; uint8_t *data ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; { ha = vha->hw; reg = & (ha->iobase)->isp; qla2x00_suspend_hba(vha); midpoint = ha->optrom_size / 2U; qla2x00_flash_enable(ha); writew(0, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); addr = offset; data = buf; goto ldv_66545; ldv_66544: ; if (addr == midpoint) { writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); } else { } *data = qla2x00_read_flash_byte(ha, addr); addr = addr + 1U; data = data + 1; ldv_66545: ; if (addr < length) { goto ldv_66544; } else { } qla2x00_flash_disable(ha); qla2x00_resume_hba(vha); return (buf); } } int qla2x00_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { int rval ; uint8_t man_id ; uint8_t flash_id ; uint8_t sec_number ; uint8_t data ; uint16_t wd ; uint32_t addr ; uint32_t liter ; uint32_t sec_mask ; uint32_t rest_addr ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; int tmp ; int tmp___0 ; int tmp___1 ; { ha = vha->hw; reg = & (ha->iobase)->isp; qla2x00_suspend_hba(vha); rval = 0; sec_number = 0U; writew(1, (void volatile *)(& reg->ctrl_status)); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & wd); qla2x00_flash_enable(ha); if ((ha->device_type & 536870912U) != 0U) { flash_id = 0U; man_id = flash_id; rest_addr = 65535U; sec_mask = 65536U; goto update_flash; } else { } qla2x00_get_flash_manufacturer(ha, & man_id, & flash_id); switch ((int )man_id) { case 32: ; if ((unsigned int )flash_id == 210U || (unsigned int )flash_id == 227U) { rest_addr = 65535U; sec_mask = 65536U; goto ldv_66567; } else { } rest_addr = 16383U; sec_mask = 114688U; goto ldv_66567; case 64: rest_addr = 511U; sec_mask = 130560U; goto ldv_66567; case 191: rest_addr = 4095U; sec_mask = 126976U; goto ldv_66567; case 218: rest_addr = 127U; sec_mask = 130944U; goto ldv_66567; case 194: ; if ((unsigned int )flash_id == 56U || (unsigned int )flash_id == 79U) { rest_addr = 65535U; sec_mask = 65536U; goto ldv_66567; } else { } case 31: ; if ((unsigned int )flash_id == 19U) { rest_addr = 2147483647U; sec_mask = 2147483648U; goto ldv_66567; } else { } case 1: ; if (((unsigned int )flash_id == 56U || (unsigned int )flash_id == 64U) || (unsigned int )flash_id == 79U) { rest_addr = 65535U; sec_mask = 65536U; goto ldv_66567; } else if ((unsigned int )flash_id == 62U) { rest_addr = 65535U; sec_mask = 65536U; goto ldv_66567; } else if ((unsigned int )flash_id == 32U || (unsigned int )flash_id == 110U) { rest_addr = 16383U; sec_mask = 114688U; goto ldv_66567; } else if ((unsigned int )flash_id == 109U) { rest_addr = 8191U; sec_mask = 122880U; goto ldv_66567; } else { } default: rest_addr = 16383U; sec_mask = 114688U; goto ldv_66567; } ldv_66567: ; update_flash: ; if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { tmp = qla2x00_erase_flash(ha, (int )man_id, (int )flash_id); if (tmp != 0) { rval = 258; goto ldv_66575; } else { } } else { } addr = offset; liter = 0U; goto ldv_66579; ldv_66578: data = *(buf + (unsigned long )liter); if ((addr & rest_addr) == 0U) { if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { if (addr > 65535U) { if (((addr >> 12) & 240U) != 0U && (((unsigned int )man_id == 1U && (unsigned int )flash_id == 62U) || ((unsigned int )man_id == 32U && (unsigned int )flash_id == 210U))) { sec_number = (uint8_t )((int )sec_number + 1); if ((unsigned int )sec_number == 1U) { rest_addr = 32767U; sec_mask = 98304U; } else if ((unsigned int )sec_number == 2U || (unsigned int )sec_number == 3U) { rest_addr = 8191U; sec_mask = 122880U; } else if ((unsigned int )sec_number == 4U) { rest_addr = 16383U; sec_mask = 114688U; } else { } } else { } } else { } } else if (ha->optrom_size / 2U == addr) { writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); } else { } if ((unsigned int )flash_id == 218U && (unsigned int )man_id == 193U) { qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 160); } else if ((ha->device_type & 16U) == 0U && (ha->device_type & 64U) == 0U) { tmp___0 = qla2x00_erase_flash_sector(ha, addr, sec_mask, (int )man_id, (int )flash_id); if (tmp___0 != 0) { rval = 258; goto ldv_66576; } else { } if ((unsigned int )man_id == 1U && (unsigned int )flash_id == 109U) { sec_number = (uint8_t )((int )sec_number + 1); } else { } } else { } } else { } if ((unsigned int )man_id == 1U && (unsigned int )flash_id == 109U) { if ((unsigned int )sec_number == 1U && rest_addr - 1U == addr) { rest_addr = 4095U; sec_mask = 126976U; } else if ((unsigned int )sec_number == 3U && (addr & 32766U) != 0U) { rest_addr = 16383U; sec_mask = 114688U; } else { } } else { } tmp___1 = qla2x00_program_flash_address(ha, addr, (int )data, (int )man_id, (int )flash_id); if (tmp___1 != 0) { rval = 258; goto ldv_66576; } else { } ___might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_sup.c", 2523, 0); _cond_resched(); liter = liter + 1U; addr = addr + 1U; ldv_66579: ; if (liter < length) { goto ldv_66578; } else { } ldv_66576: ; ldv_66575: qla2x00_flash_disable(ha); qla2x00_resume_hba(vha); return (rval); } } uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { struct qla_hw_data *ha ; { ha = vha->hw; scsi_block_requests(vha->host); set_bit(3L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); qla24xx_read_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2); clear_bit(3L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); scsi_unblock_requests(vha->host); return (buf); } } int qla24xx_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { int rval ; struct qla_hw_data *ha ; { ha = vha->hw; scsi_block_requests(vha->host); set_bit(3L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2); clear_bit(3L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); scsi_unblock_requests(vha->host); return (rval); } } uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { int rval ; dma_addr_t optrom_dma ; void *optrom ; uint8_t *pbuf ; uint32_t faddr ; uint32_t left ; uint32_t burst ; struct qla_hw_data *ha ; uint32_t tmp ; uint32_t tmp___0 ; uint8_t *tmp___1 ; { ha = vha->hw; if ((((ha->device_type & 2048U) != 0U || (ha->device_type & 8192U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { goto try_fast; } else { } if ((offset & 4095U) != 0U) { goto slow_read; } else { } if (length <= 4095U) { goto slow_read; } else { } try_fast: optrom = dma_alloc_attrs(& (ha->pdev)->dev, 4096UL, & optrom_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )optrom == (unsigned long )((void *)0)) { ql_log(1U, vha, 204, "Unable to allocate memory for optrom burst read (%x KB).\n", 4); goto slow_read; } else { } pbuf = buf; faddr = offset >> 2; left = length >> 2; burst = 1024U; goto ldv_66612; ldv_66611: ; if (burst > left) { burst = left; } else { } tmp = flash_data_addr(ha, faddr); rval = qla2x00_dump_ram(vha, optrom_dma, tmp, burst); if (rval != 0) { tmp___0 = flash_data_addr(ha, faddr); ql_log(1U, vha, 245, "Unable to burst-read optrom segment (%x/%x/%llx).\n", rval, tmp___0, optrom_dma); ql_log(1U, vha, 246, "Reverting to slow-read.\n"); dma_free_attrs(& (ha->pdev)->dev, 4096UL, optrom, optrom_dma, (struct dma_attrs *)0); goto slow_read; } else { } memcpy((void *)pbuf, (void const *)optrom, (size_t )(burst * 4U)); left = left - burst; faddr = faddr + burst; pbuf = pbuf + (unsigned long )(burst * 4U); ldv_66612: ; if (left != 0U) { goto ldv_66611; } else { } dma_free_attrs(& (ha->pdev)->dev, 4096UL, optrom, optrom_dma, (struct dma_attrs *)0); return (buf); slow_read: tmp___1 = qla24xx_read_optrom_data(vha, buf, offset, length); return (tmp___1); } } static void qla2x00_get_fcode_version(struct qla_hw_data *ha , uint32_t pcids ) { int ret ; uint32_t istart ; uint32_t iend ; uint32_t iter ; uint32_t vend ; uint8_t do_next ; uint8_t rbyte ; uint8_t *vbyte ; uint8_t tmp ; uint8_t tmp___0 ; uint8_t tmp___1 ; uint8_t tmp___2 ; uint8_t tmp___3 ; uint8_t tmp___4 ; uint8_t *tmp___5 ; { ret = 258; memset((void *)(& ha->fcode_revision), 0, 16UL); tmp = qla2x00_read_flash_byte(ha, pcids + 11U); tmp___0 = qla2x00_read_flash_byte(ha, pcids + 10U); istart = (uint32_t )(((int )tmp << 8) | (int )tmp___0) + pcids; iend = istart + 256U; do_next = 0U; iter = istart; goto ldv_66627; ldv_66626: iter = iter + 1U; tmp___3 = qla2x00_read_flash_byte(ha, iter); if ((unsigned int )tmp___3 == 47U) { tmp___2 = qla2x00_read_flash_byte(ha, iter + 2U); if ((unsigned int )tmp___2 == 47U) { do_next = (uint8_t )((int )do_next + 1); } else { tmp___1 = qla2x00_read_flash_byte(ha, iter + 3U); if ((unsigned int )tmp___1 == 47U) { do_next = (uint8_t )((int )do_next + 1); } else { } } } else { } ldv_66627: ; if (iter < iend && (unsigned int )do_next == 0U) { goto ldv_66626; } else { } if ((unsigned int )do_next == 0U) { goto ldv_66629; } else { } do_next = 0U; goto ldv_66631; ldv_66630: iter = iter - 1U; tmp___4 = qla2x00_read_flash_byte(ha, iter); if ((unsigned int )tmp___4 == 32U) { do_next = (uint8_t )((int )do_next + 1); } else { } ldv_66631: ; if (iter > istart && (unsigned int )do_next == 0U) { goto ldv_66630; } else { } if ((unsigned int )do_next == 0U) { goto ldv_66629; } else { } vend = iter - 1U; do_next = 0U; goto ldv_66634; ldv_66633: iter = iter - 1U; rbyte = qla2x00_read_flash_byte(ha, iter); if (((unsigned int )rbyte == 32U || (unsigned int )rbyte == 13U) || (unsigned int )rbyte == 16U) { do_next = (uint8_t )((int )do_next + 1); } else { } ldv_66634: ; if (iter > istart && (unsigned int )do_next == 0U) { goto ldv_66633; } else { } if ((unsigned int )do_next == 0U) { goto ldv_66629; } else { } iter = iter + 1U; if (vend != iter && vend - iter <= 15U) { vbyte = (uint8_t *)(& ha->fcode_revision); goto ldv_66637; ldv_66636: tmp___5 = vbyte; vbyte = vbyte + 1; *tmp___5 = qla2x00_read_flash_byte(ha, iter); iter = iter + 1U; ldv_66637: ; if (iter <= vend) { goto ldv_66636; } else { } ret = 0; } else { } ldv_66629: ; if (ret != 0) { memset((void *)(& ha->fcode_revision), 0, 16UL); } else { } return; } } int qla2x00_get_flash_version(scsi_qla_host_t *vha , void *mbuf ) { int ret ; uint8_t code_type ; uint8_t last_image ; uint32_t pcihdr ; uint32_t pcids ; uint8_t *dbyte ; uint16_t *dcode ; struct qla_hw_data *ha ; uint8_t tmp ; uint8_t tmp___0 ; uint8_t tmp___1 ; uint8_t tmp___2 ; uint8_t tmp___3 ; uint8_t tmp___4 ; uint8_t tmp___5 ; uint8_t tmp___6 ; uint8_t tmp___7 ; uint8_t tmp___8 ; uint8_t tmp___9 ; { ret = 0; ha = vha->hw; if (ha->pio_address == 0ULL || (unsigned long )mbuf == (unsigned long )((void *)0)) { return (258); } else { } memset((void *)(& ha->bios_revision), 0, 2UL); memset((void *)(& ha->efi_revision), 0, 2UL); memset((void *)(& ha->fcode_revision), 0, 16UL); memset((void *)(& ha->fw_revision), 0, 16UL); qla2x00_flash_enable(ha); pcihdr = 0U; last_image = 1U; ldv_66657: tmp = qla2x00_read_flash_byte(ha, pcihdr); if ((unsigned int )tmp != 85U) { ql_log(0U, vha, 80, "No matching ROM signature.\n"); ret = 258; goto ldv_66651; } else { tmp___0 = qla2x00_read_flash_byte(ha, pcihdr + 1U); if ((unsigned int )tmp___0 != 170U) { ql_log(0U, vha, 80, "No matching ROM signature.\n"); ret = 258; goto ldv_66651; } else { } } tmp___1 = qla2x00_read_flash_byte(ha, pcihdr + 25U); tmp___2 = qla2x00_read_flash_byte(ha, pcihdr + 24U); pcids = (uint32_t )(((int )tmp___1 << 8) | (int )tmp___2) + pcihdr; tmp___3 = qla2x00_read_flash_byte(ha, pcids); if ((unsigned int )tmp___3 != 80U) { ql_log(0U, vha, 81, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = 258; goto ldv_66651; } else { tmp___4 = qla2x00_read_flash_byte(ha, pcids + 1U); if ((unsigned int )tmp___4 != 67U) { ql_log(0U, vha, 81, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = 258; goto ldv_66651; } else { tmp___5 = qla2x00_read_flash_byte(ha, pcids + 2U); if ((unsigned int )tmp___5 != 73U) { ql_log(0U, vha, 81, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = 258; goto ldv_66651; } else { tmp___6 = qla2x00_read_flash_byte(ha, pcids + 3U); if ((unsigned int )tmp___6 != 82U) { ql_log(0U, vha, 81, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = 258; goto ldv_66651; } else { } } } } code_type = qla2x00_read_flash_byte(ha, pcids + 20U); switch ((int )code_type) { case 0: ha->bios_revision[0] = qla2x00_read_flash_byte(ha, pcids + 18U); ha->bios_revision[1] = qla2x00_read_flash_byte(ha, pcids + 19U); ql_dbg(1073741824U, vha, 82, "Read BIOS %d.%d.\n", (int )ha->bios_revision[1], (int )ha->bios_revision[0]); goto ldv_66653; case 1: qla2x00_get_fcode_version(ha, pcids); goto ldv_66653; case 3: ha->efi_revision[0] = qla2x00_read_flash_byte(ha, pcids + 18U); ha->efi_revision[1] = qla2x00_read_flash_byte(ha, pcids + 19U); ql_dbg(1073741824U, vha, 83, "Read EFI %d.%d.\n", (int )ha->efi_revision[1], (int )ha->efi_revision[0]); goto ldv_66653; default: ql_log(1U, vha, 84, "Unrecognized code type %x at pcids %x.\n", (int )code_type, pcids); goto ldv_66653; } ldv_66653: tmp___7 = qla2x00_read_flash_byte(ha, pcids + 21U); last_image = (unsigned int )tmp___7 & 128U; tmp___8 = qla2x00_read_flash_byte(ha, pcids + 17U); tmp___9 = qla2x00_read_flash_byte(ha, pcids + 16U); pcihdr = (uint32_t )((((int )tmp___8 << 8) | (int )tmp___9) * 512) + pcihdr; if ((unsigned int )last_image == 0U) { goto ldv_66657; } else { } ldv_66651: ; if ((ha->device_type & 16U) != 0U) { memset((void *)(& ha->fw_revision), 0, 16UL); dbyte = (uint8_t *)mbuf; memset((void *)dbyte, 0, 8UL); dcode = (uint16_t *)dbyte; qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4U + 10U, 8U); ql_dbg(1073872896U, vha, 266, "Dumping fw ver from flash:.\n"); ql_dump_buffer(1073872896U, vha, 267, dbyte, 8U); if (((((unsigned int )*dcode == 65535U && (unsigned int )*(dcode + 1UL) == 65535U) && (unsigned int )*(dcode + 2UL) == 65535U) && (unsigned int )*(dcode + 3UL) == 65535U) || ((((unsigned int )*dcode == 0U && (unsigned int )*(dcode + 1UL) == 0U) && (unsigned int )*(dcode + 2UL) == 0U) && (unsigned int )*(dcode + 3UL) == 0U)) { ql_log(1U, vha, 87, "Unrecognized fw revision at %x.\n", ha->flt_region_fw * 4U); } else { ha->fw_revision[0] = (uint32_t )(((int )*dbyte << 16) | (int )*(dbyte + 1UL)); ha->fw_revision[1] = (uint32_t )(((int )*(dbyte + 2UL) << 16) | (int )*(dbyte + 3UL)); ha->fw_revision[2] = (uint32_t )(((int )*(dbyte + 4UL) << 16) | (int )*(dbyte + 5UL)); ql_dbg(1073741824U, vha, 88, "FW Version: %d.%d.%d.\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2]); } } else { } qla2x00_flash_disable(ha); return (ret); } } int qla82xx_get_flash_version(scsi_qla_host_t *vha , void *mbuf ) { int ret ; uint32_t pcihdr ; uint32_t pcids ; uint32_t *dcode ; uint8_t *bcode ; uint8_t code_type ; uint8_t last_image ; struct qla_hw_data *ha ; { ret = 0; ha = vha->hw; if ((unsigned long )mbuf == (unsigned long )((void *)0)) { return (258); } else { } memset((void *)(& ha->bios_revision), 0, 2UL); memset((void *)(& ha->efi_revision), 0, 2UL); memset((void *)(& ha->fcode_revision), 0, 16UL); memset((void *)(& ha->fw_revision), 0, 16UL); dcode = (uint32_t *)mbuf; pcihdr = ha->flt_region_boot << 2; last_image = 1U; ldv_66676: (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)dcode, pcihdr, 128U); bcode = (uint8_t *)(mbuf + ((unsigned long )pcihdr & 3UL)); if ((unsigned int )*bcode != 85U || (unsigned int )*(bcode + 1UL) != 170U) { ql_log(0U, vha, 340, "No matching ROM signature.\n"); ret = 258; goto ldv_66670; } else { } pcids = (uint32_t )(((int )*(bcode + 25UL) << 8) | (int )*(bcode + 24UL)) + pcihdr; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)dcode, pcids, 128U); bcode = (uint8_t *)(mbuf + ((unsigned long )pcihdr & 3UL)); if ((((unsigned int )*bcode != 80U || (unsigned int )*(bcode + 1UL) != 67U) || (unsigned int )*(bcode + 2UL) != 73U) || (unsigned int )*(bcode + 3UL) != 82U) { ql_log(0U, vha, 341, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = 258; goto ldv_66670; } else { } code_type = *(bcode + 20UL); switch ((int )code_type) { case 0: ha->bios_revision[0] = *(bcode + 18UL); ha->bios_revision[1] = *(bcode + 19UL); ql_dbg(1073741824U, vha, 342, "Read BIOS %d.%d.\n", (int )ha->bios_revision[1], (int )ha->bios_revision[0]); goto ldv_66672; case 1: ha->fcode_revision[0] = *(bcode + 18UL); ha->fcode_revision[1] = *(bcode + 19UL); ql_dbg(1073741824U, vha, 343, "Read FCODE %d.%d.\n", (int )ha->fcode_revision[1], (int )ha->fcode_revision[0]); goto ldv_66672; case 3: ha->efi_revision[0] = *(bcode + 18UL); ha->efi_revision[1] = *(bcode + 19UL); ql_dbg(1073741824U, vha, 344, "Read EFI %d.%d.\n", (int )ha->efi_revision[1], (int )ha->efi_revision[0]); goto ldv_66672; default: ql_log(1U, vha, 345, "Unrecognized code type %x at pcids %x.\n", (int )code_type, pcids); goto ldv_66672; } ldv_66672: last_image = (unsigned int )*(bcode + 21UL) & 128U; pcihdr = (uint32_t )((((int )*(bcode + 17UL) << 8) | (int )*(bcode + 16UL)) * 512) + pcihdr; if ((unsigned int )last_image == 0U) { goto ldv_66676; } else { } ldv_66670: memset((void *)(& ha->fw_revision), 0, 16UL); dcode = (uint32_t *)mbuf; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)dcode, ha->flt_region_fw << 2, 32U); bcode = (uint8_t *)(mbuf + ((unsigned long )pcihdr & 3UL)); if ((((unsigned int )*bcode == 3U && (unsigned int )*(bcode + 1UL) == 0U) && (unsigned int )*(bcode + 2UL) == 64U) && (unsigned int )*(bcode + 3UL) == 64U) { ha->fw_revision[0] = (uint32_t )*(bcode + 4UL); ha->fw_revision[1] = (uint32_t )*(bcode + 5UL); ha->fw_revision[2] = (uint32_t )*(bcode + 6UL); ql_dbg(1073741824U, vha, 339, "Firmware revision %d.%d.%d\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2]); } else { } return (ret); } } int qla24xx_get_flash_version(scsi_qla_host_t *vha , void *mbuf ) { int ret ; uint32_t pcihdr ; uint32_t pcids ; uint32_t *dcode ; uint8_t *bcode ; uint8_t code_type ; uint8_t last_image ; int i ; struct qla_hw_data *ha ; __u32 tmp ; __u32 tmp___0 ; { ret = 0; ha = vha->hw; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return (ret); } else { } if ((unsigned long )mbuf == (unsigned long )((void *)0)) { return (258); } else { } memset((void *)(& ha->bios_revision), 0, 2UL); memset((void *)(& ha->efi_revision), 0, 2UL); memset((void *)(& ha->fcode_revision), 0, 16UL); memset((void *)(& ha->fw_revision), 0, 16UL); dcode = (uint32_t *)mbuf; pcihdr = ha->flt_region_boot << 2; last_image = 1U; ldv_66696: qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 32U); bcode = (uint8_t *)(mbuf + ((unsigned long )pcihdr & 3UL)); if ((unsigned int )*bcode != 85U || (unsigned int )*(bcode + 1UL) != 170U) { ql_log(0U, vha, 89, "No matching ROM signature.\n"); ret = 258; goto ldv_66690; } else { } pcids = (uint32_t )(((int )*(bcode + 25UL) << 8) | (int )*(bcode + 24UL)) + pcihdr; qla24xx_read_flash_data(vha, dcode, pcids >> 2, 32U); bcode = (uint8_t *)(mbuf + ((unsigned long )pcihdr & 3UL)); if ((((unsigned int )*bcode != 80U || (unsigned int )*(bcode + 1UL) != 67U) || (unsigned int )*(bcode + 2UL) != 73U) || (unsigned int )*(bcode + 3UL) != 82U) { ql_log(0U, vha, 90, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = 258; goto ldv_66690; } else { } code_type = *(bcode + 20UL); switch ((int )code_type) { case 0: ha->bios_revision[0] = *(bcode + 18UL); ha->bios_revision[1] = *(bcode + 19UL); ql_dbg(1073741824U, vha, 91, "Read BIOS %d.%d.\n", (int )ha->bios_revision[1], (int )ha->bios_revision[0]); goto ldv_66692; case 1: ha->fcode_revision[0] = *(bcode + 18UL); ha->fcode_revision[1] = *(bcode + 19UL); ql_dbg(1073741824U, vha, 92, "Read FCODE %d.%d.\n", (int )ha->fcode_revision[1], (int )ha->fcode_revision[0]); goto ldv_66692; case 3: ha->efi_revision[0] = *(bcode + 18UL); ha->efi_revision[1] = *(bcode + 19UL); ql_dbg(1073741824U, vha, 93, "Read EFI %d.%d.\n", (int )ha->efi_revision[1], (int )ha->efi_revision[0]); goto ldv_66692; default: ql_log(1U, vha, 94, "Unrecognized code type %x at pcids %x.\n", (int )code_type, pcids); goto ldv_66692; } ldv_66692: last_image = (unsigned int )*(bcode + 21UL) & 128U; pcihdr = (uint32_t )((((int )*(bcode + 17UL) << 8) | (int )*(bcode + 16UL)) * 512) + pcihdr; if ((unsigned int )last_image == 0U) { goto ldv_66696; } else { } ldv_66690: memset((void *)(& ha->fw_revision), 0, 16UL); dcode = (uint32_t *)mbuf; qla24xx_read_flash_data(vha, dcode, ha->flt_region_fw + 4U, 4U); i = 0; goto ldv_66698; ldv_66697: tmp = __fswab32(*(dcode + (unsigned long )i)); *(dcode + (unsigned long )i) = tmp; i = i + 1; ldv_66698: ; if (i <= 3) { goto ldv_66697; } else { } if ((((*dcode == 4294967295U && *(dcode + 1UL) == 4294967295U) && *(dcode + 2UL) == 4294967295U) && *(dcode + 3UL) == 4294967295U) || (((*dcode == 0U && *(dcode + 1UL) == 0U) && *(dcode + 2UL) == 0U) && *(dcode + 3UL) == 0U)) { ql_log(1U, vha, 95, "Unrecognized fw revision at %x.\n", ha->flt_region_fw * 4U); } else { ha->fw_revision[0] = *dcode; ha->fw_revision[1] = *(dcode + 1UL); ha->fw_revision[2] = *(dcode + 2UL); ha->fw_revision[3] = *(dcode + 3UL); ql_dbg(1073741824U, vha, 96, "Firmware revision %d.%d.%d (%x).\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], ha->fw_revision[3]); } if ((ha->device_type & 8192U) == 0U) { return (ret); } else { } memset((void *)(& ha->gold_fw_version), 0, 16UL); dcode = (uint32_t *)mbuf; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)dcode, ha->flt_region_gold_fw << 2, 32U); if (((*(dcode + 4UL) == 4294967295U && *(dcode + 5UL) == 4294967295U) && *(dcode + 6UL) == 4294967295U) && *(dcode + 7UL) == 4294967295U) { ql_log(1U, vha, 86, "Unrecognized golden fw at 0x%x.\n", ha->flt_region_gold_fw * 4U); return (ret); } else { } i = 4; goto ldv_66701; ldv_66700: tmp___0 = __fswab32(*(dcode + (unsigned long )i)); ha->gold_fw_version[i + -4] = tmp___0; i = i + 1; ldv_66701: ; if (i <= 7) { goto ldv_66700; } else { } return (ret); } } static int qla2xxx_is_vpd_valid(uint8_t *pos , uint8_t *end ) { { if ((unsigned long )pos >= (unsigned long )end || (unsigned int )*pos != 130U) { return (0); } else { } pos = pos + (unsigned long )((int )*(pos + 1UL) + 3); if ((unsigned long )pos >= (unsigned long )end || (unsigned int )*pos != 144U) { return (0); } else { } pos = pos + (unsigned long )((int )*(pos + 1UL) + 3); if ((unsigned long )pos >= (unsigned long )end || (unsigned int )*pos != 120U) { return (0); } else { } return (1); } } int qla2xxx_get_vpd_field(scsi_qla_host_t *vha , char *key , char *str , size_t size ) { struct qla_hw_data *ha ; uint8_t *pos ; uint8_t *end ; int len ; int tmp ; size_t tmp___0 ; int tmp___1 ; int tmp___2 ; { ha = vha->hw; pos = (uint8_t *)ha->vpd; end = pos + (unsigned long )ha->vpd_size; len = 0; if ((ha->device_type & 134217728U) == 0U) { return (0); } else { tmp = qla2xxx_is_vpd_valid(pos, end); if (tmp == 0) { return (0); } else { } } goto ldv_66719; ldv_66718: len = (unsigned int )*pos == 130U ? (int )*(pos + 1UL) : (int )*(pos + 2UL); tmp___0 = strlen((char const *)key); tmp___1 = strncmp((char const *)pos, (char const *)key, tmp___0); if (tmp___1 == 0) { goto ldv_66717; } else { } if ((unsigned int )*pos != 144U && (unsigned int )*pos != 145U) { pos = pos + (unsigned long )len; } else { } pos = pos + 3UL; ldv_66719: ; if ((unsigned long )pos < (unsigned long )end && (unsigned int )*pos != 120U) { goto ldv_66718; } else { } ldv_66717: ; if ((unsigned long )(end + - ((unsigned long )len)) > (unsigned long )pos && (unsigned int )*pos != 120U) { tmp___2 = scnprintf(str, size, "%.*s", len, pos + 3UL); return (tmp___2); } else { } return (0); } } int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha ) { int len ; int max_len ; uint32_t fcp_prio_addr ; struct qla_hw_data *ha ; void *tmp ; int tmp___0 ; int tmp___1 ; { ha = vha->hw; if ((unsigned long )ha->fcp_prio_cfg == (unsigned long )((struct qla_fcp_prio_cfg *)0)) { tmp = vmalloc(32768UL); ha->fcp_prio_cfg = (struct qla_fcp_prio_cfg *)tmp; if ((unsigned long )ha->fcp_prio_cfg == (unsigned long )((struct qla_fcp_prio_cfg *)0)) { ql_log(1U, vha, 213, "Unable to allocate memory for fcp priorty data (%x).\n", 32768); return (258); } else { } } else { } memset((void *)ha->fcp_prio_cfg, 0, 32768UL); fcp_prio_addr = ha->flt_region_fcp_prio; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->fcp_prio_cfg, fcp_prio_addr << 2, 16U); tmp___0 = qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0); if (tmp___0 == 0) { goto fail; } else { } fcp_prio_addr = fcp_prio_addr + 4U; len = (int )(ha->fcp_prio_cfg)->num_entries * 32; max_len = 32752; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)(& (ha->fcp_prio_cfg)->entry), fcp_prio_addr << 2, (uint32_t )(len < max_len ? len : max_len)); tmp___1 = qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1); if (tmp___1 == 0) { goto fail; } else { } ha->flags.fcp_prio_enabled = 1U; return (0); fail: vfree((void const *)ha->fcp_prio_cfg); ha->fcp_prio_cfg = (struct qla_fcp_prio_cfg *)0; return (258); } } int reg_timer_21(struct timer_list *timer ) { { ldv_timer_list_21 = timer; ldv_timer_state_21 = 1; return (0); } } void disable_suitable_timer_21(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_21) { ldv_timer_state_21 = 0; return; } else { } return; } } void activate_pending_timer_21(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_21 == (unsigned long )timer) { if (ldv_timer_state_21 == 2 || pending_flag != 0) { ldv_timer_list_21 = timer; ldv_timer_list_21->data = data; ldv_timer_state_21 = 1; } else { } return; } else { } reg_timer_21(timer); ldv_timer_list_21->data = data; return; } } void choose_timer_21(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_21 = 2; return; } } bool ldv_queue_work_on_161(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_162(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_163(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_164(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_165(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_166(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern long simple_strtol(char const * , char ** , unsigned int ) ; extern int sscanf(char const * , char const * , ...) ; extern ssize_t memory_read_from_buffer(void * , size_t , loff_t * , void const * , size_t ) ; bool ldv_queue_work_on_177(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_179(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_178(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_181(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_180(struct workqueue_struct *ldv_func_arg1 ) ; extern int sysfs_create_bin_file(struct kobject * , struct bin_attribute const * ) ; extern void sysfs_remove_bin_file(struct kobject * , struct bin_attribute const * ) ; extern bool capable(int ) ; void choose_timer_22(struct timer_list *timer ) ; void disable_suitable_timer_22(struct timer_list *timer ) ; void activate_pending_timer_22(struct timer_list *timer , unsigned long data , int pending_flag ) ; int reg_timer_22(struct timer_list *timer ) ; extern int scsi_is_host_device(struct device const * ) ; __inline static struct Scsi_Host *dev_to_shost(struct device *dev ) { int tmp ; struct device const *__mptr ; { goto ldv_36704; ldv_36703: ; if ((unsigned long )dev->parent == (unsigned long )((struct device *)0)) { return ((struct Scsi_Host *)0); } else { } dev = dev->parent; ldv_36704: tmp = scsi_is_host_device((struct device const *)dev); if (tmp == 0) { goto ldv_36703; } else { } __mptr = (struct device const *)dev; return ((struct Scsi_Host *)__mptr + 0xfffffffffffffc48UL); } } int ldv_scsi_add_host_with_dma_182(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; int ldv_scsi_add_host_with_dma_183(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; void ldv_scsi_remove_host_184(struct Scsi_Host *shost ) ; static char const * const port_state_str___2[5U] = { "Unknown", "UNCONFIGURED", "DEAD", "LOST", "ONLINE"}; void qla24xx_deallocate_vp_id(scsi_qla_host_t *vha ) ; int qla24xx_disable_vp(scsi_qla_host_t *vha ) ; int qla24xx_enable_vp(scsi_qla_host_t *vha ) ; void qla2x00_vp_stop_timer(scsi_qla_host_t *vha ) ; int qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport ) ; scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *fc_vport ) ; int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha ) ; int qla25xx_delete_req_que(struct scsi_qla_host *vha , struct req_que *req ) ; uint32_t qlafx00_fw_state_show(struct device *dev , struct device_attribute *attr , char *buf ) ; void qlafx00_get_host_speed(struct Scsi_Host *shost ) ; int qla24xx_bsg_request(struct fc_bsg_job *bsg_job ) ; int qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job ) ; void qla82xx_md_prep(scsi_qla_host_t *vha ) ; void qla82xx_set_reset_owner(scsi_qla_host_t *vha ) ; uint32_t qla8044_rd_reg(struct qla_hw_data *ha , ulong addr ) ; void qla8044_wr_reg(struct qla_hw_data *ha , ulong addr , uint32_t val ) ; void qlt_vport_create(struct scsi_qla_host *vha , struct qla_hw_data *ha ) ; __inline static void qla2x00_set_fcport_state___1(fc_port_t *fcport , int state ) { int old_state ; { old_state = atomic_read((atomic_t const *)(& fcport->state)); atomic_set(& fcport->state, state); if (old_state != 0 && old_state != state) { ql_dbg(268435456U, fcport->vha, 8317, "FCPort state transitioned from %s to %s - portid=%02x%02x%02x.\n", port_state_str___2[old_state], port_state_str___2[state], (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } return; } } static int qla24xx_vport_disable(struct fc_vport *fc_vport , bool disable ) ; static ssize_t qla2x00_sysfs_read_fw_dump(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; int rval ; ssize_t tmp___1 ; ssize_t tmp___2 ; ssize_t tmp___3 ; ssize_t tmp___4 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; rval = 0; if (ha->fw_dump_reading == 0 && ha->mctp_dump_reading == 0) { return (0L); } else { } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { if ((loff_t )ha->md_template_size > off) { tmp___1 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->md_tmplt_hdr, (size_t )ha->md_template_size); rval = (int )tmp___1; return ((ssize_t )rval); } else { } off = off - (loff_t )ha->md_template_size; tmp___2 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->md_dump, (size_t )ha->md_dump_size); rval = (int )tmp___2; return ((ssize_t )rval); } else if (ha->mctp_dumped != 0 && ha->mctp_dump_reading != 0) { tmp___3 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->mctp_dump, 548964UL); return (tmp___3); } else if (ha->fw_dump_reading != 0) { tmp___4 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->fw_dump, (size_t )ha->fw_dump_len); return (tmp___4); } else { return (0L); } } } static ssize_t qla2x00_sysfs_write_fw_dump(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; int reading ; long tmp___1 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; if (off != 0LL) { return (0L); } else { } tmp___1 = simple_strtol((char const *)buf, (char **)0, 10U); reading = (int )tmp___1; switch (reading) { case 0: ; if (ha->fw_dump_reading == 0) { goto ldv_66008; } else { } ql_log(2U, vha, 28765, "Firmware dump cleared on (%ld).\n", vha->host_no); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { qla82xx_md_free(vha); qla82xx_md_prep(vha); } else { } ha->fw_dump_reading = 0; ha->fw_dumped = 0; goto ldv_66008; case 1: ; if (ha->fw_dumped != 0 && ha->fw_dump_reading == 0) { ha->fw_dump_reading = 1; ql_log(2U, vha, 28766, "Raw firmware dump ready for read on (%ld).\n", vha->host_no); } else { } goto ldv_66008; case 2: qla2x00_alloc_fw_dump(vha); goto ldv_66008; case 3: ; if ((ha->device_type & 16384U) != 0U) { qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); qla82xx_set_reset_owner(vha); qla8044_idc_unlock(ha); } else { qla2x00_system_error(vha); } goto ldv_66008; case 4: ; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { if ((unsigned long )ha->md_tmplt_hdr != (unsigned long )((void *)0)) { ql_dbg(8388608U, vha, 28763, "MiniDump supported with this firmware.\n"); } else { ql_dbg(8388608U, vha, 28829, "MiniDump not supported with this firmware.\n"); } } else { } goto ldv_66008; case 5: ; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } goto ldv_66008; case 6: ; if (ha->mctp_dump_reading == 0) { goto ldv_66008; } else { } ql_log(2U, vha, 28865, "MCTP dump cleared on (%ld).\n", vha->host_no); ha->mctp_dump_reading = 0; ha->mctp_dumped = 0; goto ldv_66008; case 7: ; if (ha->mctp_dumped != 0 && ha->mctp_dump_reading == 0) { ha->mctp_dump_reading = 1; ql_log(2U, vha, 28866, "Raw mctp dump ready for read on (%ld).\n", vha->host_no); } else { } goto ldv_66008; } ldv_66008: ; return ((ssize_t )count); } } static struct bin_attribute sysfs_fw_dump_attr = {{"fw_dump", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, & qla2x00_sysfs_read_fw_dump, & qla2x00_sysfs_write_fw_dump, 0}; static ssize_t qla2x00_sysfs_read_fw_dump_template(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; ssize_t tmp___1 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; if ((unsigned long )ha->fw_dump_template == (unsigned long )((void *)0) || ha->fw_dump_template_len == 0U) { return (0L); } else { } ql_dbg(8388608U, vha, 28898, "chunk <- off=%llx count=%zx\n", off, count); tmp___1 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->fw_dump_template, (size_t )ha->fw_dump_template_len); return (tmp___1); } } static ssize_t qla2x00_sysfs_write_fw_dump_template(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; uint32_t size ; ulong tmp___1 ; ulong tmp___2 ; void *tmp___3 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; if (off == 0LL) { if ((unsigned long )ha->fw_dump != (unsigned long )((struct qla2xxx_fw_dump *)0)) { vfree((void const *)ha->fw_dump); } else { } if ((unsigned long )ha->fw_dump_template != (unsigned long )((void *)0)) { vfree((void const *)ha->fw_dump_template); } else { } ha->fw_dump = (struct qla2xxx_fw_dump *)0; ha->fw_dump_len = 0U; ha->fw_dump_template = (void *)0; ha->fw_dump_template_len = 0U; tmp___1 = qla27xx_fwdt_template_size((void *)buf); size = (uint32_t )tmp___1; ql_dbg(8388608U, vha, 28881, "-> allocating fwdt (%x bytes)...\n", size); ha->fw_dump_template = vmalloc((unsigned long )size); if ((unsigned long )ha->fw_dump_template == (unsigned long )((void *)0)) { ql_log(1U, vha, 28882, "Failed allocate fwdt (%x bytes).\n", size); return (-12L); } else { } ha->fw_dump_template_len = size; } else { } if ((unsigned long long )off + (unsigned long long )count > (unsigned long long )ha->fw_dump_template_len) { count = (size_t )((loff_t )ha->fw_dump_template_len - off); ql_dbg(8388608U, vha, 28883, "chunk -> truncating to %zx bytes.\n", count); } else { } ql_dbg(8388608U, vha, 28884, "chunk -> off=%llx count=%zx\n", off, count); memcpy(ha->fw_dump_template + (unsigned long )off, (void const *)buf, count); if ((unsigned long long )off + (unsigned long long )count == (unsigned long long )ha->fw_dump_template_len) { tmp___2 = qla27xx_fwdt_calculate_dump_size(vha); size = (uint32_t )tmp___2; ql_dbg(8388608U, vha, 28885, "-> allocating fwdump (%x bytes)...\n", size); tmp___3 = vmalloc((unsigned long )size); ha->fw_dump = (struct qla2xxx_fw_dump *)tmp___3; if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 28886, "Failed allocate fwdump (%x bytes).\n", size); return (-12L); } else { } ha->fw_dump_len = size; } else { } return ((ssize_t )count); } } static struct bin_attribute sysfs_fw_dump_template_attr = {{"fw_dump_template", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, & qla2x00_sysfs_read_fw_dump_template, & qla2x00_sysfs_write_fw_dump_template, 0}; static ssize_t qla2x00_sysfs_read_nvram(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; bool tmp___1 ; int tmp___2 ; ssize_t tmp___3 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = capable(21); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0L); } else { } if (((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->nvram, ha->flt_region_nvram << 2, (uint32_t )ha->nvram_size); } else { } tmp___3 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->nvram, (size_t )ha->nvram_size); return (tmp___3); } } static ssize_t qla2x00_sysfs_write_nvram(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; uint16_t cnt ; bool tmp___1 ; int tmp___2 ; uint32_t *iter ; uint32_t chksum ; uint32_t *tmp___3 ; uint8_t *iter___0 ; uint8_t chksum___0 ; uint8_t *tmp___4 ; int tmp___5 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = capable(21); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (((tmp___2 || off != 0LL) || (size_t )ha->nvram_size != count) || (unsigned long )(ha->isp_ops)->write_nvram == (unsigned long )((int (*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0)) { return (-22L); } else { } if ((ha->device_type & 134217728U) != 0U) { iter = (uint32_t *)buf; chksum = 0U; cnt = 0U; goto ldv_66071; ldv_66070: tmp___3 = iter; iter = iter + 1; chksum = *tmp___3 + chksum; cnt = (uint16_t )((int )cnt + 1); ldv_66071: ; if ((size_t )cnt < (count >> 2) - 1UL) { goto ldv_66070; } else { } chksum = - chksum; *iter = chksum; } else { iter___0 = (uint8_t *)buf; chksum___0 = 0U; cnt = 0U; goto ldv_66076; ldv_66075: tmp___4 = iter___0; iter___0 = iter___0 + 1; chksum___0 = (int )*tmp___4 + (int )chksum___0; cnt = (uint16_t )((int )cnt + 1); ldv_66076: ; if ((size_t )cnt < count - 1UL) { goto ldv_66075; } else { } chksum___0 = - ((int )chksum___0); *iter___0 = chksum___0; } tmp___5 = qla2x00_wait_for_hba_online(vha); if (tmp___5 != 0) { ql_log(1U, vha, 28767, "HBA not online, failing NVRAM update.\n"); return (-11L); } else { } (*((ha->isp_ops)->write_nvram))(vha, (uint8_t *)buf, (uint32_t )ha->nvram_base, (uint32_t )count); (*((ha->isp_ops)->read_nvram))(vha, (uint8_t *)ha->nvram, (uint32_t )ha->nvram_base, (uint32_t )count); ql_dbg(8388608U, vha, 28768, "Setting ISP_ABORT_NEEDED\n"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); return ((ssize_t )count); } } static struct bin_attribute sysfs_nvram_attr = {{"nvram", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 512UL, 0, & qla2x00_sysfs_read_nvram, & qla2x00_sysfs_write_nvram, 0}; static ssize_t qla2x00_sysfs_read_optrom(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; ssize_t rval ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; rval = 0L; if (ha->optrom_state != 1) { return (0L); } else { } mutex_lock_nested(& ha->optrom_mutex, 0U); rval = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->optrom_buffer, (size_t )ha->optrom_region_size); mutex_unlock(& ha->optrom_mutex); return (rval); } } static ssize_t qla2x00_sysfs_write_optrom(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; if (ha->optrom_state != 2) { return (-22L); } else { } if ((loff_t )ha->optrom_region_size < off) { return (-34L); } else { } if ((unsigned long long )off + (unsigned long long )count > (unsigned long long )ha->optrom_region_size) { count = (size_t )((loff_t )ha->optrom_region_size - off); } else { } mutex_lock_nested(& ha->optrom_mutex, 0U); memcpy((void *)ha->optrom_buffer + (unsigned long )off, (void const *)buf, count); mutex_unlock(& ha->optrom_mutex); return ((ssize_t )count); } } static struct bin_attribute sysfs_optrom_attr = {{"optrom", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, & qla2x00_sysfs_read_optrom, & qla2x00_sysfs_write_optrom, 0}; static ssize_t qla2x00_sysfs_write_optrom_ctl(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; uint32_t start ; uint32_t size ; int val ; int valid ; ssize_t rval ; int tmp___1 ; long tmp___2 ; int tmp___3 ; void *tmp___4 ; int tmp___5 ; void *tmp___6 ; int tmp___7 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; start = 0U; size = ha->optrom_size; rval = (ssize_t )count; if (off != 0LL) { return (-22L); } else { } tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { return (-11L); } else { } tmp___3 = sscanf((char const *)buf, "%d:%x:%x", & val, & start, & size); if (tmp___3 <= 0) { return (-22L); } else { } if (ha->optrom_size < start) { return (-22L); } else { } mutex_lock_nested(& ha->optrom_mutex, 0U); switch (val) { case 0: ; if (ha->optrom_state != 1 && ha->optrom_state != 2) { rval = -22L; goto out; } else { } ha->optrom_state = 0; ql_dbg(8388608U, vha, 28769, "Freeing flash region allocation -- 0x%x bytes.\n", ha->optrom_region_size); vfree((void const *)ha->optrom_buffer); ha->optrom_buffer = (char *)0; goto ldv_66124; case 1: ; if (ha->optrom_state != 0) { rval = -22L; goto out; } else { } ha->optrom_region_start = start; ha->optrom_region_size = start + size > ha->optrom_size ? ha->optrom_size - start : size; ha->optrom_state = 1; tmp___4 = vmalloc((unsigned long )ha->optrom_region_size); ha->optrom_buffer = (char *)tmp___4; if ((unsigned long )ha->optrom_buffer == (unsigned long )((char *)0)) { ql_log(1U, vha, 28770, "Unable to allocate memory for optrom retrieval (%x).\n", ha->optrom_region_size); ha->optrom_state = 0; rval = -12L; goto out; } else { } tmp___5 = qla2x00_wait_for_hba_online(vha); if (tmp___5 != 0) { ql_log(1U, vha, 28771, "HBA not online, failing NVRAM update.\n"); rval = -11L; goto out; } else { } ql_dbg(8388608U, vha, 28772, "Reading flash region -- 0x%x/0x%x.\n", ha->optrom_region_start, ha->optrom_region_size); memset((void *)ha->optrom_buffer, 0, (size_t )ha->optrom_region_size); (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); goto ldv_66124; case 2: ; if (ha->optrom_state != 0) { rval = -22L; goto out; } else { } valid = 0; if (ha->optrom_size == 131072U && start == 0U) { valid = 1; } else if (ha->flt_region_boot * 4U == start || ha->flt_region_fw * 4U == start) { valid = 1; } else if ((((((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) || ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U)) || (ha->device_type & 32768U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { valid = 1; } else { } if (valid == 0) { ql_log(1U, vha, 28773, "Invalid start region 0x%x/0x%x.\n", start, size); rval = -22L; goto out; } else { } ha->optrom_region_start = start; ha->optrom_region_size = start + size > ha->optrom_size ? ha->optrom_size - start : size; ha->optrom_state = 2; tmp___6 = vmalloc((unsigned long )ha->optrom_region_size); ha->optrom_buffer = (char *)tmp___6; if ((unsigned long )ha->optrom_buffer == (unsigned long )((char *)0)) { ql_log(1U, vha, 28774, "Unable to allocate memory for optrom update (%x)\n", ha->optrom_region_size); ha->optrom_state = 0; rval = -12L; goto out; } else { } ql_dbg(8388608U, vha, 28775, "Staging flash region write -- 0x%x/0x%x.\n", ha->optrom_region_start, ha->optrom_region_size); memset((void *)ha->optrom_buffer, 0, (size_t )ha->optrom_region_size); goto ldv_66124; case 3: ; if (ha->optrom_state != 2) { rval = -22L; goto out; } else { } tmp___7 = qla2x00_wait_for_hba_online(vha); if (tmp___7 != 0) { ql_log(1U, vha, 28776, "HBA not online, failing flash update.\n"); rval = -11L; goto out; } else { } ql_dbg(8388608U, vha, 28777, "Writing flash region -- 0x%x/0x%x.\n", ha->optrom_region_start, ha->optrom_region_size); (*((ha->isp_ops)->write_optrom))(vha, (uint8_t *)ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); goto ldv_66124; default: rval = -22L; } ldv_66124: ; out: mutex_unlock(& ha->optrom_mutex); return (rval); } } static struct bin_attribute sysfs_optrom_ctl_attr = {{"optrom_ctl", 128U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, 0, & qla2x00_sysfs_write_optrom_ctl, 0}; static ssize_t qla2x00_sysfs_read_vpd(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; ssize_t tmp___5 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { return (-11L); } else { } tmp___3 = capable(21); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return (-22L); } else { } if (((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->vpd, ha->flt_region_vpd << 2, (uint32_t )ha->vpd_size); } else { } tmp___5 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->vpd, (size_t )ha->vpd_size); return (tmp___5); } } static ssize_t qla2x00_sysfs_write_vpd(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; uint8_t *tmp_data ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; int tmp___5 ; void *tmp___6 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { return (0L); } else { } tmp___3 = capable(21); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (((tmp___4 || off != 0LL) || (size_t )ha->vpd_size != count) || (unsigned long )(ha->isp_ops)->write_nvram == (unsigned long )((int (*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0)) { return (0L); } else { } tmp___5 = qla2x00_wait_for_hba_online(vha); if (tmp___5 != 0) { ql_log(1U, vha, 28778, "HBA not online, failing VPD update.\n"); return (-11L); } else { } (*((ha->isp_ops)->write_nvram))(vha, (uint8_t *)buf, (uint32_t )ha->vpd_base, (uint32_t )count); (*((ha->isp_ops)->read_nvram))(vha, (uint8_t *)ha->vpd, (uint32_t )ha->vpd_base, (uint32_t )count); if ((ha->device_type & 134217728U) == 0U) { return (-22L); } else { } tmp___6 = vmalloc(256UL); tmp_data = (uint8_t *)tmp___6; if ((unsigned long )tmp_data == (unsigned long )((uint8_t *)0U)) { ql_log(1U, vha, 28779, "Unable to allocate memory for VPD information update.\n"); return (-12L); } else { } (*((ha->isp_ops)->get_flash_version))(vha, (void *)tmp_data); vfree((void const *)tmp_data); return ((ssize_t )count); } } static struct bin_attribute sysfs_vpd_attr = {{"vpd", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, & qla2x00_sysfs_read_vpd, & qla2x00_sysfs_write_vpd, 0}; static ssize_t qla2x00_sysfs_read_sfp(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; uint16_t iter ; uint16_t addr ; uint16_t offset ; int rval ; bool tmp___1 ; int tmp___2 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = capable(21); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if ((tmp___2 || off != 0LL) || count != 512UL) { return (0L); } else { } if ((unsigned long )ha->sfp_data != (unsigned long )((void *)0)) { goto do_read; } else { } ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, 208U, & ha->sfp_data_dma); if ((unsigned long )ha->sfp_data == (unsigned long )((void *)0)) { ql_log(1U, vha, 28780, "Unable to allocate memory for SFP read-data.\n"); return (0L); } else { } do_read: memset(ha->sfp_data, 0, 64UL); addr = 160U; iter = 0U; offset = 0U; goto ldv_66174; ldv_66173: ; if ((unsigned int )iter == 4U) { addr = 162U; offset = 0U; } else { } rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, (uint8_t *)ha->sfp_data, (int )addr, (int )offset, 64, 2); if (rval != 0) { ql_log(1U, vha, 28781, "Unable to read SFP data (%x/%x/%x).\n", rval, (int )addr, (int )offset); return (-5L); } else { } memcpy((void *)buf, (void const *)ha->sfp_data, 64UL); buf = buf + 64UL; iter = (uint16_t )((int )iter + 1); offset = (unsigned int )offset + 64U; ldv_66174: ; if ((unsigned int )iter <= 7U) { goto ldv_66173; } else { } return ((ssize_t )count); } } static struct bin_attribute sysfs_sfp_attr = {{"sfp", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 512UL, 0, & qla2x00_sysfs_read_sfp, 0, 0}; static ssize_t qla2x00_sysfs_write_reset(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp___1 ; int type ; uint32_t idc_control ; uint8_t *tmp_data ; long tmp___2 ; uint32_t idc_control___0 ; int tmp___3 ; void *tmp___4 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp___1; tmp_data = (uint8_t *)0U; if (off != 0LL) { return (-22L); } else { } tmp___2 = simple_strtol((char const *)buf, (char **)0, 10U); type = (int )tmp___2; switch (type) { case 131676: ql_log(2U, vha, 28782, "Issuing ISP reset.\n"); scsi_block_requests(vha->host); if ((ha->device_type & 16384U) != 0U) { ha->flags.isp82xx_no_md_cap = 1U; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); idc_control = qla8044_rd_reg(ha, 14224UL); qla8044_wr_reg(ha, 14224UL, idc_control | 2U); qla82xx_set_reset_owner(vha); qla8044_idc_unlock(ha); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); goto ldv_66194; case 131677: ; if ((ha->device_type & 8192U) == 0U && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) { return (-1L); } else { } ql_log(2U, vha, 28783, "Issuing MPI reset.\n"); if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { qla83xx_idc_lock(vha, 0); __qla83xx_get_idc_control(vha, & idc_control___0); idc_control___0 = idc_control___0 | 2U; __qla83xx_set_idc_control(vha, idc_control___0); qla83xx_wr_reg(vha, 571483012U, 4U); qla83xx_idc_audit(vha, 0); qla83xx_idc_unlock(vha, 0); goto ldv_66194; } else { qla2x00_wait_for_hba_online(vha); scsi_block_requests(vha->host); tmp___3 = qla81xx_restart_mpi_firmware(vha); if (tmp___3 != 0) { ql_log(1U, vha, 28784, "MPI reset failed.\n"); } else { } scsi_unblock_requests(vha->host); goto ldv_66194; } case 131678: ; if (((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) || (unsigned long )vha != (unsigned long )base_vha) { ql_log(2U, vha, 28785, "FCoE ctx reset no supported.\n"); return (-1L); } else { } ql_log(2U, vha, 28786, "Issuing FCoE ctx reset.\n"); set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); qla2x00_wait_for_fcoe_ctx_reset(vha); goto ldv_66194; case 131679: ; if ((ha->device_type & 65536U) == 0U) { return (-1L); } else { } ql_log(2U, vha, 28860, "Disabling Reset by IDC control\n"); qla83xx_idc_lock(vha, 0); __qla83xx_get_idc_control(vha, & idc_control); idc_control = idc_control | 1U; __qla83xx_set_idc_control(vha, idc_control); qla83xx_idc_unlock(vha, 0); goto ldv_66194; case 131680: ; if ((ha->device_type & 65536U) == 0U) { return (-1L); } else { } ql_log(2U, vha, 28861, "Enabling Reset by IDC control\n"); qla83xx_idc_lock(vha, 0); __qla83xx_get_idc_control(vha, & idc_control); idc_control = idc_control & 4294967294U; __qla83xx_set_idc_control(vha, idc_control); qla83xx_idc_unlock(vha, 0); goto ldv_66194; case 131681: ql_dbg(8388608U, vha, 28896, "Updating cache versions without reset "); tmp___4 = vmalloc(256UL); tmp_data = (uint8_t *)tmp___4; if ((unsigned long )tmp_data == (unsigned long )((uint8_t *)0U)) { ql_log(1U, vha, 28897, "Unable to allocate memory for VPD information update.\n"); return (-12L); } else { } (*((ha->isp_ops)->get_flash_version))(vha, (void *)tmp_data); vfree((void const *)tmp_data); goto ldv_66194; } ldv_66194: ; return ((ssize_t )count); } } static struct bin_attribute sysfs_reset_attr = {{"reset", 128U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, 0, & qla2x00_sysfs_write_reset, 0}; static ssize_t qla2x00_sysfs_read_xgmac_stats(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; int rval ; uint16_t actual_size ; bool tmp___1 ; int tmp___2 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = capable(21); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if ((tmp___2 || off != 0LL) || count > 4096UL) { return (0L); } else { } if ((unsigned long )ha->xgmac_data != (unsigned long )((void *)0)) { goto do_read; } else { } ha->xgmac_data = dma_alloc_attrs(& (ha->pdev)->dev, 4096UL, & ha->xgmac_data_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )ha->xgmac_data == (unsigned long )((void *)0)) { ql_log(1U, vha, 28790, "Unable to allocate memory for XGMAC read-data.\n"); return (0L); } else { } do_read: actual_size = 0U; memset(ha->xgmac_data, 0, 4096UL); rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 4096, & actual_size); if (rval != 0) { ql_log(1U, vha, 28791, "Unable to read XGMAC data (%x).\n", rval); count = 0UL; } else { } count = count < (size_t )actual_size ? count : (size_t )actual_size; memcpy((void *)buf, (void const *)ha->xgmac_data, count); return ((ssize_t )count); } } static struct bin_attribute sysfs_xgmac_stats_attr = {{"xgmac_stats", 256U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, & qla2x00_sysfs_read_xgmac_stats, 0, 0}; static ssize_t qla2x00_sysfs_read_dcbx_tlv(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; int rval ; uint16_t actual_size ; bool tmp___1 ; int tmp___2 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = capable(21); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if ((tmp___2 || off != 0LL) || count > 4096UL) { return (0L); } else { } if ((unsigned long )ha->dcbx_tlv != (unsigned long )((void *)0)) { goto do_read; } else { } ha->dcbx_tlv = dma_alloc_attrs(& (ha->pdev)->dev, 4096UL, & ha->dcbx_tlv_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )ha->dcbx_tlv == (unsigned long )((void *)0)) { ql_log(1U, vha, 28792, "Unable to allocate memory for DCBX TLV read-data.\n"); return (-12L); } else { } do_read: actual_size = 0U; memset(ha->dcbx_tlv, 0, 4096UL); rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 4096); if (rval != 0) { ql_log(1U, vha, 28793, "Unable to read DCBX TLV (%x).\n", rval); return (-5L); } else { } memcpy((void *)buf, (void const *)ha->dcbx_tlv, count); return ((ssize_t )count); } } static struct bin_attribute sysfs_dcbx_tlv_attr = {{"dcbx_tlv", 256U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, & qla2x00_sysfs_read_dcbx_tlv, 0, 0}; static struct sysfs_entry bin_file_entries[11U] = { {(char *)"fw_dump", & sysfs_fw_dump_attr, 0}, {(char *)"fw_dump_template", & sysfs_fw_dump_template_attr, 39}, {(char *)"nvram", & sysfs_nvram_attr, 0}, {(char *)"optrom", & sysfs_optrom_attr, 0}, {(char *)"optrom_ctl", & sysfs_optrom_ctl_attr, 0}, {(char *)"vpd", & sysfs_vpd_attr, 1}, {(char *)"sfp", & sysfs_sfp_attr, 1}, {(char *)"reset", & sysfs_reset_attr, 0}, {(char *)"xgmac_stats", & sysfs_xgmac_stats_attr, 3}, {(char *)"dcbx_tlv", & sysfs_dcbx_tlv_attr, 3}, {(char *)0, 0, 0}}; void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha ) { struct Scsi_Host *host ; struct sysfs_entry *iter ; int ret ; { host = vha->host; iter = (struct sysfs_entry *)(& bin_file_entries); goto ldv_66247; ldv_66246: ; if (iter->is4GBp_only != 0 && ((vha->hw)->device_type & 134217728U) == 0U) { goto ldv_66245; } else { } if (iter->is4GBp_only == 2 && ((vha->hw)->device_type & 2048U) == 0U) { goto ldv_66245; } else { } if (iter->is4GBp_only == 3 && (((((vha->hw)->device_type & 8192U) == 0U && ((vha->hw)->device_type & 16384U) == 0U) && ((vha->hw)->device_type & 65536U) == 0U) && ((vha->hw)->device_type & 262144U) == 0U)) { goto ldv_66245; } else { } if (iter->is4GBp_only == 39 && (((vha->hw)->device_type & 524288U) == 0U && ((vha->hw)->device_type & 1048576U) == 0U)) { goto ldv_66245; } else { } ret = sysfs_create_bin_file(& host->shost_gendev.kobj, (struct bin_attribute const *)iter->attr); if (ret != 0) { ql_log(1U, vha, 243, "Unable to create sysfs %s binary attribute (%d).\n", iter->name, ret); } else { ql_dbg(1073741824U, vha, 244, "Successfully created sysfs %s binary attribure.\n", iter->name); } ldv_66245: iter = iter + 1; ldv_66247: ; if ((unsigned long )iter->name != (unsigned long )((char *)0)) { goto ldv_66246; } else { } return; } } void qla2x00_free_sysfs_attr(scsi_qla_host_t *vha , bool stop_beacon ) { struct Scsi_Host *host ; struct sysfs_entry *iter ; struct qla_hw_data *ha ; { host = vha->host; ha = vha->hw; iter = (struct sysfs_entry *)(& bin_file_entries); goto ldv_66258; ldv_66257: ; if (iter->is4GBp_only != 0 && (ha->device_type & 134217728U) == 0U) { goto ldv_66256; } else { } if (iter->is4GBp_only == 2 && (ha->device_type & 2048U) == 0U) { goto ldv_66256; } else { } if (iter->is4GBp_only == 3 && (((((vha->hw)->device_type & 8192U) == 0U && ((vha->hw)->device_type & 16384U) == 0U) && ((vha->hw)->device_type & 65536U) == 0U) && ((vha->hw)->device_type & 262144U) == 0U)) { goto ldv_66256; } else { } if (iter->is4GBp_only == 39 && (((vha->hw)->device_type & 524288U) == 0U && ((vha->hw)->device_type & 1048576U) == 0U)) { goto ldv_66256; } else { } sysfs_remove_bin_file(& host->shost_gendev.kobj, (struct bin_attribute const *)iter->attr); ldv_66256: iter = iter + 1; ldv_66258: ; if ((unsigned long )iter->name != (unsigned long )((char *)0)) { goto ldv_66257; } else { } if ((int )stop_beacon && (unsigned int )ha->beacon_blink_led == 1U) { (*((ha->isp_ops)->beacon_off))(vha); } else { } return; } } static ssize_t qla2x00_drvr_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { int tmp ; { tmp = scnprintf(buf, 4096UL, "%s\n", (char *)(& qla2x00_version_str)); return ((ssize_t )tmp); } } static ssize_t qla2x00_fw_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; char fw_str[128U] ; char *tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = (*((ha->isp_ops)->fw_version_str))(vha, (char *)(& fw_str), 128UL); tmp___1 = scnprintf(buf, 4096UL, "%s\n", tmp___0); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_serial_num_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; uint32_t sn ; int tmp___0 ; char *tmp___1 ; size_t tmp___2 ; int tmp___3 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if (((vha->hw)->device_type & 131072U) != 0U) { tmp___0 = scnprintf(buf, 4096UL, "%s\n", (uint8_t *)(& (vha->hw)->mr.serial_num)); return ((ssize_t )tmp___0); } else if ((ha->device_type & 134217728U) != 0U) { qla2xxx_get_vpd_field(vha, (char *)"SN", buf, 4095UL); tmp___1 = strcat(buf, "\n"); tmp___2 = strlen((char const *)tmp___1); return ((ssize_t )tmp___2); } else { } sn = (uint32_t )(((((int )ha->serial0 & 31) << 16) | ((int )ha->serial2 << 8)) | (int )ha->serial1); tmp___3 = scnprintf(buf, 4096UL, "%c%05d\n", sn / 100000U + 65U, sn % 100000U); return ((ssize_t )tmp___3); } } static ssize_t qla2x00_isp_name_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; tmp___0 = scnprintf(buf, 4096UL, "ISP%04X\n", (int )((vha->hw)->pdev)->device); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_isp_id_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if (((vha->hw)->device_type & 131072U) != 0U) { tmp___0 = scnprintf(buf, 4096UL, "%s\n", (uint8_t *)(& (vha->hw)->mr.hw_version)); return ((ssize_t )tmp___0); } else { } tmp___1 = scnprintf(buf, 4096UL, "%04x %04x %04x %04x\n", (int )ha->product_id[0], (int )ha->product_id[1], (int )ha->product_id[2], (int )ha->product_id[3]); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_model_name_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; tmp___0 = scnprintf(buf, 4096UL, "%s\n", (uint8_t *)(& (vha->hw)->model_number)); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_model_desc_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; tmp___0 = scnprintf(buf, 4096UL, "%s\n", (unsigned long )(& (vha->hw)->model_desc) != (unsigned long )((char (*)[80])0) ? (char *)(& (vha->hw)->model_desc) : (char *)""); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_pci_info_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; char pci_info[30U] ; char *tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; tmp___0 = (*(((vha->hw)->isp_ops)->pci_info_str))(vha, (char *)(& pci_info)); tmp___1 = scnprintf(buf, 4096UL, "%s\n", tmp___0); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_link_state_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int len ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; len = 0; tmp___7 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___7 == 2) { len = scnprintf(buf, 4096UL, "Link Down\n"); } else { tmp___8 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___8 == 6) { len = scnprintf(buf, 4096UL, "Link Down\n"); } else if ((vha->device_flags & 2U) != 0U) { len = scnprintf(buf, 4096UL, "Link Down\n"); } else { tmp___5 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___5 != 5) { len = scnprintf(buf, 4096UL, "Unknown Link State\n"); } else { tmp___6 = qla2x00_reset_active(vha); if (tmp___6 != 0) { len = scnprintf(buf, 4096UL, "Unknown Link State\n"); } else { len = scnprintf(buf, 4096UL, "Link Up - "); switch ((int )ha->current_topology) { case 1: tmp___0 = scnprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "Loop\n"); len = tmp___0 + len; goto ldv_66338; case 4: tmp___1 = scnprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "FL_Port\n"); len = tmp___1 + len; goto ldv_66338; case 2: tmp___2 = scnprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "N_Port to N_Port\n"); len = tmp___2 + len; goto ldv_66338; case 8: tmp___3 = scnprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "F_Port\n"); len = tmp___3 + len; goto ldv_66338; default: tmp___4 = scnprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "Loop\n"); len = tmp___4 + len; goto ldv_66338; } ldv_66338: ; } } } } return ((ssize_t )len); } } static ssize_t qla2x00_zio_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int len ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; len = 0; switch ((int )(vha->hw)->zio_mode) { case 6: tmp___0 = scnprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "Mode 6\n"); len = tmp___0 + len; goto ldv_66353; case 0: tmp___1 = scnprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "Disabled\n"); len = tmp___1 + len; goto ldv_66353; } ldv_66353: ; return ((ssize_t )len); } } static ssize_t qla2x00_zio_store(struct device *dev , struct device_attribute *attr , char const *buf , size_t count ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int val ; uint16_t zio_mode ; int tmp___0 ; size_t tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; val = 0; if ((ha->device_type & 268435456U) == 0U) { return (-524L); } else { } tmp___0 = sscanf(buf, "%d", & val); if (tmp___0 != 1) { return (-22L); } else { } if (val != 0) { zio_mode = 6U; } else { zio_mode = 0U; } if ((unsigned int )zio_mode != 0U || (unsigned int )ha->zio_mode != 0U) { ha->zio_mode = zio_mode; set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } tmp___1 = strlen(buf); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_zio_timer_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; tmp___0 = scnprintf(buf, 4096UL, "%d us\n", (int )(vha->hw)->zio_timer * 100); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_zio_timer_store(struct device *dev , struct device_attribute *attr , char const *buf , size_t count ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int val ; uint16_t zio_timer ; int tmp___0 ; size_t tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; val = 0; tmp___0 = sscanf(buf, "%d", & val); if (tmp___0 != 1) { return (-22L); } else { } if (val > 25500 || val <= 99) { return (-34L); } else { } zio_timer = (unsigned short )(val / 100); (vha->hw)->zio_timer = zio_timer; tmp___1 = strlen(buf); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_beacon_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int len ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; len = 0; if ((unsigned int )(vha->hw)->beacon_blink_led != 0U) { tmp___0 = scnprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "Enabled\n"); len = tmp___0 + len; } else { tmp___1 = scnprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "Disabled\n"); len = tmp___1 + len; } return ((ssize_t )len); } } static ssize_t qla2x00_beacon_store(struct device *dev , struct device_attribute *attr , char const *buf , size_t count ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int val ; int rval ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; val = 0; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { return (-1L); } else { } tmp___0 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { ql_log(1U, vha, 28794, "Abort ISP active -- ignoring beacon request.\n"); return (-16L); } else { } tmp___1 = sscanf(buf, "%d", & val); if (tmp___1 != 1) { return (-22L); } else { } if (val != 0) { rval = (*((ha->isp_ops)->beacon_on))(vha); } else { rval = (*((ha->isp_ops)->beacon_off))(vha); } if (rval != 0) { count = 0UL; } else { } return ((ssize_t )count); } } static ssize_t qla2x00_optrom_bios_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = scnprintf(buf, 4096UL, "%d.%02d\n", (int )ha->bios_revision[1], (int )ha->bios_revision[0]); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_optrom_efi_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = scnprintf(buf, 4096UL, "%d.%02d\n", (int )ha->efi_revision[1], (int )ha->efi_revision[0]); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_optrom_fcode_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = scnprintf(buf, 4096UL, "%d.%02d\n", (int )ha->fcode_revision[1], (int )ha->fcode_revision[0]); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_optrom_fw_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = scnprintf(buf, 4096UL, "%d.%02d.%02d %d\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], ha->fw_revision[3]); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_optrom_gold_fw_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if (((ha->device_type & 8192U) == 0U && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) && ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U)) { tmp___0 = scnprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = scnprintf(buf, 4096UL, "%d.%02d.%02d (%d)\n", ha->gold_fw_version[0], ha->gold_fw_version[1], ha->gold_fw_version[2], ha->gold_fw_version[3]); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_total_isp_aborts_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; tmp___0 = scnprintf(buf, 4096UL, "%d\n", vha->qla_stats.total_isp_aborts); return ((ssize_t )tmp___0); } } static ssize_t qla24xx_84xx_fw_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { int rval ; uint16_t status[2U] ; scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { rval = 0; status[0] = 0U; status[1] = 0U; __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if ((ha->device_type & 4096U) == 0U) { tmp___0 = scnprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } if ((ha->cs84xx)->op_fw_version == 0U) { rval = qla84xx_verify_chip(vha, (uint16_t *)(& status)); } else { } if (rval == 0 && (unsigned int )status[0] == 0U) { tmp___1 = scnprintf(buf, 4096UL, "%u\n", (ha->cs84xx)->op_fw_version); return ((ssize_t )tmp___1); } else { } tmp___2 = scnprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___2); } } static ssize_t qla2x00_mpi_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if (((ha->device_type & 8192U) == 0U && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U) { tmp___0 = scnprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = scnprintf(buf, 4096UL, "%d.%02d.%02d (%x)\n", (int )ha->mpi_version[0], (int )ha->mpi_version[1], (int )ha->mpi_version[2], ha->mpi_capabilities); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_phy_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if ((ha->device_type & 8192U) == 0U && (ha->device_type & 65536U) == 0U) { tmp___0 = scnprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = scnprintf(buf, 4096UL, "%d.%02d.%02d\n", (int )ha->phy_version[0], (int )ha->phy_version[1], (int )ha->phy_version[2]); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_flash_block_size_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = scnprintf(buf, 4096UL, "0x%x\n", ha->fdt_block_size); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_vlan_id_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; if (((((vha->hw)->device_type & 8192U) == 0U && ((vha->hw)->device_type & 16384U) == 0U) && ((vha->hw)->device_type & 65536U) == 0U) && ((vha->hw)->device_type & 262144U) == 0U) { tmp___0 = scnprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = scnprintf(buf, 4096UL, "%d\n", (int )vha->fcoe_vlan_id); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_vn_port_mac_address_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; if (((((vha->hw)->device_type & 8192U) == 0U && ((vha->hw)->device_type & 16384U) == 0U) && ((vha->hw)->device_type & 65536U) == 0U) && ((vha->hw)->device_type & 262144U) == 0U) { tmp___0 = scnprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = scnprintf(buf, 4096UL, "%pMR\n", (uint8_t *)(& vha->fcoe_vn_port_mac)); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_fabric_param_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; tmp___0 = scnprintf(buf, 4096UL, "%d\n", (int )(vha->hw)->switch_cap); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_thermal_temp_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; uint16_t temp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; temp = 0U; tmp___0 = qla2x00_reset_active(vha); if (tmp___0 != 0) { ql_log(1U, vha, 28892, "ISP reset active.\n"); goto done; } else { } if (*((unsigned long *)vha->hw + 2UL) != 0UL) { ql_log(1U, vha, 28893, "PCI EEH busy.\n"); goto done; } else { } tmp___2 = qla2x00_get_thermal_temp(vha, & temp); if (tmp___2 == 0) { tmp___1 = scnprintf(buf, 4096UL, "%d\n", (int )temp); return ((ssize_t )tmp___1); } else { } done: tmp___3 = scnprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___3); } } static ssize_t qla2x00_fw_state_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int rval ; uint16_t state[6U] ; uint32_t pstate ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; rval = 258; if (((vha->hw)->device_type & 131072U) != 0U) { pstate = qlafx00_fw_state_show(dev, attr, buf); tmp___0 = scnprintf(buf, 4096UL, "0x%x\n", pstate); return ((ssize_t )tmp___0); } else { } tmp___1 = qla2x00_reset_active(vha); if (tmp___1 != 0) { ql_log(1U, vha, 28796, "ISP reset active.\n"); } else if (*((unsigned long *)vha->hw + 2UL) == 0UL) { rval = qla2x00_get_firmware_state(vha, (uint16_t *)(& state)); } else { } if (rval != 0) { memset((void *)(& state), -1, 12UL); } else { } tmp___2 = scnprintf(buf, 4096UL, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", (int )state[0], (int )state[1], (int )state[2], (int )state[3], (int )state[4], (int )state[5]); return ((ssize_t )tmp___2); } } static ssize_t qla2x00_diag_requests_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; if (((vha->hw)->device_type & 2048U) == 0U && ((vha->hw)->device_type & 32768U) == 0U) { tmp___0 = scnprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = scnprintf(buf, 4096UL, "%llu\n", vha->bidi_stats.io_count); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_diag_megabytes_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; if (((vha->hw)->device_type & 2048U) == 0U && ((vha->hw)->device_type & 32768U) == 0U) { tmp___0 = scnprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = scnprintf(buf, 4096UL, "%llu\n", vha->bidi_stats.transfer_bytes >> 20); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_fw_dump_size_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; uint32_t size ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if (ha->fw_dumped == 0) { size = 0U; } else if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { size = ha->md_template_size + ha->md_dump_size; } else { size = ha->fw_dump_len; } tmp___0 = scnprintf(buf, 4096UL, "%d\n", size); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_allow_cna_fw_dump_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; if (((vha->hw)->device_type & 16384U) == 0U && ((vha->hw)->device_type & 262144U) == 0U) { tmp___0 = scnprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { tmp___1 = scnprintf(buf, 4096UL, "%s\n", (vha->hw)->allow_cna_fw_dump != 0 ? (char *)"true" : (char *)"false"); return ((ssize_t )tmp___1); } } } static ssize_t qla2x00_allow_cna_fw_dump_store(struct device *dev , struct device_attribute *attr , char const *buf , size_t count ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int val ; int tmp___0 ; size_t tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; val = 0; if (((vha->hw)->device_type & 16384U) == 0U && ((vha->hw)->device_type & 262144U) == 0U) { return (-22L); } else { } tmp___0 = sscanf(buf, "%d", & val); if (tmp___0 != 1) { return (-22L); } else { } (vha->hw)->allow_cna_fw_dump = val != 0; tmp___1 = strlen(buf); return ((ssize_t )tmp___1); } } static struct device_attribute dev_attr_driver_version = {{"driver_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_drvr_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fw_version = {{"fw_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_fw_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_serial_num = {{"serial_num", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_serial_num_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_isp_name = {{"isp_name", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_isp_name_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_isp_id = {{"isp_id", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_isp_id_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_model_name = {{"model_name", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_model_name_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_model_desc = {{"model_desc", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_model_desc_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_pci_info = {{"pci_info", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_pci_info_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_link_state = {{"link_state", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_link_state_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_zio = {{"zio", 420U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_zio_show, & qla2x00_zio_store}; static struct device_attribute dev_attr_zio_timer = {{"zio_timer", 420U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_zio_timer_show, & qla2x00_zio_timer_store}; static struct device_attribute dev_attr_beacon = {{"beacon", 420U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_beacon_show, & qla2x00_beacon_store}; static struct device_attribute dev_attr_optrom_bios_version = {{"optrom_bios_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_optrom_bios_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_optrom_efi_version = {{"optrom_efi_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_optrom_efi_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_optrom_fcode_version = {{"optrom_fcode_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_optrom_fcode_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_optrom_fw_version = {{"optrom_fw_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_optrom_fw_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_optrom_gold_fw_version = {{"optrom_gold_fw_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_optrom_gold_fw_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_84xx_fw_version = {{"84xx_fw_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla24xx_84xx_fw_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_total_isp_aborts = {{"total_isp_aborts", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_total_isp_aborts_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_mpi_version = {{"mpi_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_mpi_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_phy_version = {{"phy_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_phy_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_flash_block_size = {{"flash_block_size", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_flash_block_size_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_vlan_id = {{"vlan_id", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_vlan_id_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_vn_port_mac_address = {{"vn_port_mac_address", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_vn_port_mac_address_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fabric_param = {{"fabric_param", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_fabric_param_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fw_state = {{"fw_state", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_fw_state_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_thermal_temp = {{"thermal_temp", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_thermal_temp_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_diag_requests = {{"diag_requests", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_diag_requests_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_diag_megabytes = {{"diag_megabytes", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_diag_megabytes_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fw_dump_size = {{"fw_dump_size", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_fw_dump_size_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_allow_cna_fw_dump = {{"allow_cna_fw_dump", 420U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_allow_cna_fw_dump_show, & qla2x00_allow_cna_fw_dump_store}; struct device_attribute *qla2x00_host_attrs[32U] = { & dev_attr_driver_version, & dev_attr_fw_version, & dev_attr_serial_num, & dev_attr_isp_name, & dev_attr_isp_id, & dev_attr_model_name, & dev_attr_model_desc, & dev_attr_pci_info, & dev_attr_link_state, & dev_attr_zio, & dev_attr_zio_timer, & dev_attr_beacon, & dev_attr_optrom_bios_version, & dev_attr_optrom_efi_version, & dev_attr_optrom_fcode_version, & dev_attr_optrom_fw_version, & dev_attr_84xx_fw_version, & dev_attr_total_isp_aborts, & dev_attr_mpi_version, & dev_attr_phy_version, & dev_attr_flash_block_size, & dev_attr_vlan_id, & dev_attr_vn_port_mac_address, & dev_attr_fabric_param, & dev_attr_fw_state, & dev_attr_optrom_gold_fw_version, & dev_attr_thermal_temp, & dev_attr_diag_requests, & dev_attr_diag_megabytes, & dev_attr_fw_dump_size, & dev_attr_allow_cna_fw_dump, (struct device_attribute *)0}; static void qla2x00_get_host_port_id(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; ((struct fc_host_attrs *)shost->shost_data)->port_id = (u32 )((((int )vha->d_id.b.domain << 16) | ((int )vha->d_id.b.area << 8)) | (int )vha->d_id.b.al_pa); return; } } static void qla2x00_get_host_speed(struct Scsi_Host *shost ) { struct qla_hw_data *ha ; void *tmp ; u32 speed ; { tmp = shost_priv(shost); ha = ((struct scsi_qla_host *)tmp)->hw; speed = 0U; if ((ha->device_type & 131072U) != 0U) { qlafx00_get_host_speed(shost); return; } else { } switch ((int )ha->link_data_rate) { case 0: speed = 1U; goto ldv_67001; case 1: speed = 2U; goto ldv_67001; case 3: speed = 8U; goto ldv_67001; case 4: speed = 16U; goto ldv_67001; case 19: speed = 4U; goto ldv_67001; case 5: speed = 32U; goto ldv_67001; case 6: speed = 64U; goto ldv_67001; } ldv_67001: ((struct fc_host_attrs *)shost->shost_data)->speed = speed; return; } } static void qla2x00_get_host_port_type(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; uint32_t port_type ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; port_type = 0U; if ((unsigned int )vha->vp_idx != 0U) { ((struct fc_host_attrs *)shost->shost_data)->port_type = 7; return; } else { } switch ((int )(vha->hw)->current_topology) { case 1: port_type = 5U; goto ldv_67014; case 4: port_type = 4U; goto ldv_67014; case 2: port_type = 6U; goto ldv_67014; case 8: port_type = 3U; goto ldv_67014; } ldv_67014: ((struct fc_host_attrs *)shost->shost_data)->port_type = (enum fc_port_type )port_type; return; } } static void qla2x00_get_starget_node_name(struct scsi_target *starget ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; scsi_qla_host_t *vha ; void *tmp___0 ; fc_port_t *fcport ; u64 node_name ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { tmp = dev_to_shost(starget->dev.parent); host = tmp; tmp___0 = shost_priv(host); vha = (scsi_qla_host_t *)tmp___0; node_name = 0ULL; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_67031; ldv_67030: ; if ((unsigned long )fcport->rport != (unsigned long )((struct fc_rport *)0) && starget->id == (fcport->rport)->scsi_target_id) { node_name = wwn_to_u64((u8 *)(& fcport->node_name)); goto ldv_67029; } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_67031: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_67030; } else { } ldv_67029: ((struct fc_starget_attrs *)(& starget->starget_data))->node_name = node_name; return; } } static void qla2x00_get_starget_port_name(struct scsi_target *starget ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; scsi_qla_host_t *vha ; void *tmp___0 ; fc_port_t *fcport ; u64 port_name ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { tmp = dev_to_shost(starget->dev.parent); host = tmp; tmp___0 = shost_priv(host); vha = (scsi_qla_host_t *)tmp___0; port_name = 0ULL; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_67045; ldv_67044: ; if ((unsigned long )fcport->rport != (unsigned long )((struct fc_rport *)0) && starget->id == (fcport->rport)->scsi_target_id) { port_name = wwn_to_u64((u8 *)(& fcport->port_name)); goto ldv_67043; } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_67045: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_67044; } else { } ldv_67043: ((struct fc_starget_attrs *)(& starget->starget_data))->port_name = port_name; return; } } static void qla2x00_get_starget_port_id(struct scsi_target *starget ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; scsi_qla_host_t *vha ; void *tmp___0 ; fc_port_t *fcport ; uint32_t port_id ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { tmp = dev_to_shost(starget->dev.parent); host = tmp; tmp___0 = shost_priv(host); vha = (scsi_qla_host_t *)tmp___0; port_id = 4294967295U; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_67059; ldv_67058: ; if ((unsigned long )fcport->rport != (unsigned long )((struct fc_rport *)0) && starget->id == (fcport->rport)->scsi_target_id) { port_id = (uint32_t )((((int )fcport->d_id.b.domain << 16) | ((int )fcport->d_id.b.area << 8)) | (int )fcport->d_id.b.al_pa); goto ldv_67057; } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_67059: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_67058; } else { } ldv_67057: ((struct fc_starget_attrs *)(& starget->starget_data))->port_id = port_id; return; } } static void qla2x00_set_rport_loss_tmo(struct fc_rport *rport , uint32_t timeout ) { { if (timeout != 0U) { rport->dev_loss_tmo = timeout; } else { rport->dev_loss_tmo = 1U; } return; } } static void qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; fc_port_t *fcport ; unsigned long flags ; raw_spinlock_t *tmp___0 ; struct fc_rport *tmp___1 ; int tmp___2 ; int tmp___3 ; long tmp___4 ; { tmp = dev_to_shost(rport->dev.parent); host = tmp; fcport = *((fc_port_t **)rport->dd_data); if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return; } else { } qla2x00_set_fcport_state___1(fcport, 2); tmp___0 = spinlock_check(host->host_lock); flags = _raw_spin_lock_irqsave(tmp___0); tmp___1 = (struct fc_rport *)0; fcport->drport = tmp___1; fcport->rport = tmp___1; *((fc_port_t **)rport->dd_data) = (fc_port_t *)0; spin_unlock_irqrestore(host->host_lock, flags); tmp___2 = constant_test_bit(3L, (unsigned long const volatile *)(& (fcport->vha)->dpc_flags)); if (tmp___2 != 0) { return; } else { } tmp___3 = pci_channel_offline(((fcport->vha)->hw)->pdev); tmp___4 = ldv__builtin_expect(tmp___3 != 0, 0L); if (tmp___4 != 0L) { qla2x00_abort_all_cmds(fcport->vha, 65536); return; } else { } return; } } static void qla2x00_terminate_rport_io(struct fc_rport *rport ) { fc_port_t *fcport ; int tmp ; int tmp___0 ; long tmp___1 ; { fcport = *((fc_port_t **)rport->dd_data); if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return; } else { } tmp = constant_test_bit(3L, (unsigned long const volatile *)(& (fcport->vha)->dpc_flags)); if (tmp != 0) { return; } else { } tmp___0 = pci_channel_offline(((fcport->vha)->hw)->pdev); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 0L); if (tmp___1 != 0L) { qla2x00_abort_all_cmds(fcport->vha, 65536); return; } else { } if ((unsigned int )fcport->loop_id != 4096U) { if ((((fcport->vha)->hw)->device_type & 134217728U) != 0U) { (*((((fcport->vha)->hw)->isp_ops)->fabric_logout))(fcport->vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { qla2x00_port_logout(fcport->vha, fcport); } } else { } return; } } static int qla2x00_issue_lip(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; if (((vha->hw)->device_type & 131072U) != 0U) { return (0); } else { } qla2x00_loop_reset(vha); return (0); } } static struct fc_host_statistics *qla2x00_get_fc_host_stats(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp___0 ; int rval ; struct link_statistics *stats ; dma_addr_t stats_dma ; struct fc_host_statistics *pfc_host_stat ; int tmp___1 ; int tmp___2 ; long tmp___3 ; int tmp___4 ; void *tmp___5 ; int tmp___6 ; u64 tmp___7 ; uint32_t __base ; uint32_t __rem ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp___0; pfc_host_stat = & vha->fc_host_stat; memset((void *)pfc_host_stat, -1, 232UL); if (((vha->hw)->device_type & 131072U) != 0U) { goto done; } else { } tmp___1 = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { goto done; } else { } tmp___2 = pci_channel_offline(ha->pdev); tmp___3 = ldv__builtin_expect(tmp___2 != 0, 0L); if (tmp___3 != 0L) { goto done; } else { } tmp___4 = qla2x00_reset_active(vha); if (tmp___4 != 0) { goto done; } else { } tmp___5 = dma_pool_alloc(ha->s_dma_pool, 208U, & stats_dma); stats = (struct link_statistics *)tmp___5; if ((unsigned long )stats == (unsigned long )((struct link_statistics *)0)) { ql_log(1U, vha, 28797, "Failed to allocate memory for stats.\n"); goto done; } else { } memset((void *)stats, 0, 256UL); rval = 258; if ((ha->device_type & 134217728U) != 0U) { rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma); } else { tmp___6 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___6 == 5 && (unsigned int )ha->dpc_active == 0U) { rval = qla2x00_get_link_status(base_vha, (int )base_vha->loop_id, stats, stats_dma); } else { } } if (rval != 0) { goto done_free; } else { } pfc_host_stat->link_failure_count = (u64 )stats->link_fail_cnt; pfc_host_stat->loss_of_sync_count = (u64 )stats->loss_sync_cnt; pfc_host_stat->loss_of_signal_count = (u64 )stats->loss_sig_cnt; pfc_host_stat->prim_seq_protocol_err_count = (u64 )stats->prim_seq_err_cnt; pfc_host_stat->invalid_tx_word_count = (u64 )stats->inval_xmit_word_cnt; pfc_host_stat->invalid_crc_count = (u64 )stats->inval_crc_cnt; if ((ha->device_type & 134217728U) != 0U) { pfc_host_stat->lip_count = (u64 )stats->lip_cnt; pfc_host_stat->tx_frames = (u64 )stats->tx_frames; pfc_host_stat->rx_frames = (u64 )stats->rx_frames; pfc_host_stat->dumped_frames = (u64 )stats->discarded_frames; pfc_host_stat->nos_count = (u64 )stats->nos_rcvd; pfc_host_stat->error_frames = (u64 )(stats->dropped_frames + stats->discarded_frames); pfc_host_stat->rx_words = vha->qla_stats.input_bytes; pfc_host_stat->tx_words = vha->qla_stats.output_bytes; } else { } pfc_host_stat->fcp_control_requests = (u64 )vha->qla_stats.control_requests; pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests; pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests; pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; tmp___7 = get_jiffies_64(); pfc_host_stat->seconds_since_last_reset = tmp___7 - vha->qla_stats.jiffies_at_last_reset; __base = 250U; __rem = (uint32_t )(pfc_host_stat->seconds_since_last_reset % (u64 )__base); pfc_host_stat->seconds_since_last_reset = pfc_host_stat->seconds_since_last_reset / (u64 )__base; done_free: dma_pool_free(ha->s_dma_pool, (void *)stats, stats_dma); done: ; return (pfc_host_stat); } } static void qla2x00_reset_host_stats(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; memset((void *)(& vha->fc_host_stat), 0, 232UL); vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); return; } } static void qla2x00_get_host_symbolic_name(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; qla2x00_get_sym_node_name(vha, (uint8_t *)(& ((struct fc_host_attrs *)shost->shost_data)->symbolic_name), 256UL); return; } } static void qla2x00_set_host_system_hostname(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; set_bit(12L, (unsigned long volatile *)(& vha->dpc_flags)); return; } } static void qla2x00_get_host_fabric_name(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; uint8_t node_name[8U] ; u64 fabric_name ; u64 tmp___0 ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; node_name[0] = 255U; node_name[1] = 255U; node_name[2] = 255U; node_name[3] = 255U; node_name[4] = 255U; node_name[5] = 255U; node_name[6] = 255U; node_name[7] = 255U; tmp___0 = wwn_to_u64((u8 *)(& node_name)); fabric_name = tmp___0; if ((int )vha->device_flags & 1) { fabric_name = wwn_to_u64((u8 *)(& vha->fabric_node_name)); } else { } ((struct fc_host_attrs *)shost->shost_data)->fabric_name = fabric_name; return; } } static void qla2x00_get_host_port_state(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; struct scsi_qla_host *base_vha ; void *tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; tmp___0 = pci_get_drvdata((vha->hw)->pdev); base_vha = (struct scsi_qla_host *)tmp___0; if (*((unsigned long *)base_vha + 19UL) == 0UL) { ((struct fc_host_attrs *)shost->shost_data)->port_state = 3; return; } else { } tmp___1 = atomic_read((atomic_t const *)(& base_vha->loop_state)); switch (tmp___1) { case 4: ((struct fc_host_attrs *)shost->shost_data)->port_state = 6; goto ldv_67120; case 2: tmp___2 = constant_test_bit(4L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___2 != 0) { ((struct fc_host_attrs *)shost->shost_data)->port_state = 6; } else { ((struct fc_host_attrs *)shost->shost_data)->port_state = 7; } goto ldv_67120; case 6: ((struct fc_host_attrs *)shost->shost_data)->port_state = 7; goto ldv_67120; case 5: ((struct fc_host_attrs *)shost->shost_data)->port_state = 2; goto ldv_67120; default: ((struct fc_host_attrs *)shost->shost_data)->port_state = 0; goto ldv_67120; } ldv_67120: ; return; } } static int qla24xx_vport_create(struct fc_vport *fc_vport , bool disable ) { int ret ; uint8_t qos ; scsi_qla_host_t *base_vha ; void *tmp ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; uint16_t options ; int cnt ; struct req_que *req ; int tmp___0 ; int tmp___1 ; int prot ; int guard ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { ret = 0; qos = 0U; tmp = shost_priv(fc_vport->shost); base_vha = (scsi_qla_host_t *)tmp; vha = (scsi_qla_host_t *)0; ha = base_vha->hw; options = 0U; req = *(ha->req_q_map); ret = qla24xx_vport_create_req_sanity_check(fc_vport); if (ret != 0) { ql_log(1U, vha, 28798, "Vport sanity check failed, status %x\n", ret); return (ret); } else { } vha = qla24xx_create_vhost(fc_vport); if ((unsigned long )vha == (unsigned long )((scsi_qla_host_t *)0)) { ql_log(1U, vha, 28799, "Vport create host failed.\n"); return (9); } else { } if ((int )disable) { atomic_set(& vha->vp_state, 0); fc_vport_set_state(fc_vport, 2); } else { atomic_set(& vha->vp_state, 2); } ql_log(2U, vha, 28800, "VP entry id %d assigned.\n", (int )vha->vp_idx); atomic_set(& vha->loop_state, 2); vha->vp_err_state = 1U; vha->vp_prev_err_state = 0U; tmp___0 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___0 == 2) { goto _L; } else { tmp___1 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___1 == 6) { _L: /* CIL Label */ ql_dbg(8388608U, vha, 28801, "Vport loop state is not UP.\n"); atomic_set(& vha->loop_state, 6); if (! disable) { fc_vport_set_state(fc_vport, 3); } else { } } else { } } if ((ha->device_type & 33554432U) != 0U && ql2xenabledif != 0) { if (((int )ha->fw_attributes & 16) != 0) { prot = 0; vha->flags.difdix_supported = 1U; ql_dbg(8388608U, vha, 28802, "Registered for DIF/DIX type 1 and 3 protection.\n"); if (ql2xenabledif == 1) { prot = 8; } else { } scsi_host_set_prot(vha->host, (unsigned int )(prot | 119)); guard = 1; if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) && ql2xenabledif > 1) { guard = guard | 2; } else { } scsi_host_set_guard(vha->host, (int )((unsigned char )guard)); } else { vha->flags.difdix_supported = 0U; } } else { } tmp___2 = ldv_scsi_add_host_with_dma_183(vha->host, & fc_vport->dev, & (ha->pdev)->dev); if (tmp___2 != 0) { ql_dbg(8388608U, vha, 28803, "scsi_add_host failure for VP[%d].\n", (int )vha->vp_idx); goto vport_create_failed_2; } else { } ((struct fc_host_attrs *)(vha->host)->shost_data)->dev_loss_tmo = (u32 )ha->port_down_retry_count; ((struct fc_host_attrs *)(vha->host)->shost_data)->node_name = wwn_to_u64((u8 *)(& vha->node_name)); ((struct fc_host_attrs *)(vha->host)->shost_data)->port_name = wwn_to_u64((u8 *)(& vha->port_name)); ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_classes = ((struct fc_host_attrs *)(base_vha->host)->shost_data)->supported_classes; ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_speeds = ((struct fc_host_attrs *)(base_vha->host)->shost_data)->supported_speeds; qlt_vport_create(vha, ha); qla24xx_vport_disable(fc_vport, (int )disable); if (*((unsigned long *)ha + 2UL) != 0UL) { req = *(ha->req_q_map + 1UL); ql_dbg(1048576U, vha, 49152, "Request queue %p attached with VP[%d], cpu affinity =%d\n", req, (int )vha->vp_idx, (int )ha->flags.cpu_affinity_enabled); goto vport_queue; } else if (ql2xmaxqueues == 1 || (unsigned long )ha->npiv_info == (unsigned long )((struct qla_npiv_entry *)0)) { goto vport_queue; } else { } cnt = 0; goto ldv_67143; ldv_67142: tmp___3 = memcmp((void const *)(& (ha->npiv_info + (unsigned long )cnt)->port_name), (void const *)(& vha->port_name), 8UL); if (tmp___3 == 0) { tmp___4 = memcmp((void const *)(& (ha->npiv_info + (unsigned long )cnt)->node_name), (void const *)(& vha->node_name), 8UL); if (tmp___4 == 0) { qos = (ha->npiv_info + (unsigned long )cnt)->q_qos; goto ldv_67141; } else { } } else { } cnt = cnt + 1; ldv_67143: ; if ((int )ha->nvram_npiv_size > cnt) { goto ldv_67142; } else { } ldv_67141: ; if ((unsigned int )qos != 0U) { ret = qla25xx_create_req_que(ha, (int )options, (int )((uint8_t )vha->vp_idx), 0, 0, (int )qos); if (ret == 0) { ql_log(1U, vha, 28804, "Can\'t create request queue for VP[%d]\n", (int )vha->vp_idx); } else { ql_dbg(1048576U, vha, 49153, "Request Que:%d Q0s: %d) created for VP[%d]\n", ret, (int )qos, (int )vha->vp_idx); ql_dbg(8388608U, vha, 28805, "Request Que:%d Q0s: %d) created for VP[%d]\n", ret, (int )qos, (int )vha->vp_idx); req = *(ha->req_q_map + (unsigned long )ret); } } else { } vport_queue: vha->req = req; return (0); vport_create_failed_2: qla24xx_disable_vp(vha); qla24xx_deallocate_vp_id(vha); scsi_host_put(vha->host); return (9); } } static int qla24xx_vport_delete(struct fc_vport *fc_vport ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; uint16_t id ; int tmp ; int tmp___0 ; int tmp___1 ; long tmp___2 ; int tmp___3 ; { vha = (scsi_qla_host_t *)fc_vport->dd_data; ha = vha->hw; id = vha->vp_idx; goto ldv_67151; ldv_67150: msleep(1000U); ldv_67151: tmp = constant_test_bit(5L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp != 0) { goto ldv_67150; } else { tmp___0 = constant_test_bit(13L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { goto ldv_67150; } else { goto ldv_67152; } } ldv_67152: qla24xx_disable_vp(vha); vha->flags.delete_progress = 1U; qlt_remove_target(ha, vha); fc_remove_host(vha->host); ldv_scsi_remove_host_184(vha->host); qla24xx_deallocate_vp_id(vha); if (vha->timer_active != 0U) { qla2x00_vp_stop_timer(vha); ql_dbg(8388608U, vha, 28806, "Timer for the VP[%d] has stopped\n", (int )vha->vp_idx); } else { } tmp___1 = atomic_read((atomic_t const *)(& vha->vref_count)); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_attr.c"), "i" (2162), "i" (12UL)); ldv_67153: ; goto ldv_67153; } else { } qla2x00_free_fcports(vha); mutex_lock_nested(& ha->vport_lock, 0U); ha->cur_vport_count = ha->cur_vport_count - 1; clear_bit((long )vha->vp_idx, (unsigned long volatile *)(& ha->vp_idx_map)); mutex_unlock(& ha->vport_lock); if ((unsigned int )(vha->req)->id != 0U && *((unsigned long *)ha + 2UL) == 0UL) { tmp___3 = qla25xx_delete_req_que(vha, vha->req); if (tmp___3 != 0) { ql_log(1U, vha, 28807, "Queue delete failed.\n"); } else { } } else { } ql_log(2U, vha, 28808, "VP[%d] deleted.\n", (int )id); scsi_host_put(vha->host); return (0); } } static int qla24xx_vport_disable(struct fc_vport *fc_vport , bool disable ) { scsi_qla_host_t *vha ; { vha = (scsi_qla_host_t *)fc_vport->dd_data; if ((int )disable) { qla24xx_disable_vp(vha); } else { qla24xx_enable_vp(vha); } return (0); } } struct fc_function_template qla2xxx_transport_functions = {0, & qla2x00_set_rport_loss_tmo, & qla2x00_get_starget_node_name, & qla2x00_get_starget_port_name, & qla2x00_get_starget_port_id, & qla2x00_get_host_port_id, & qla2x00_get_host_port_type, & qla2x00_get_host_port_state, 0, & qla2x00_get_host_speed, & qla2x00_get_host_fabric_name, & qla2x00_get_host_symbolic_name, & qla2x00_set_host_system_hostname, & qla2x00_get_fc_host_stats, & qla2x00_reset_host_stats, & qla2x00_issue_lip, & qla2x00_dev_loss_tmo_callbk, & qla2x00_terminate_rport_io, 0, & qla24xx_vport_create, & qla24xx_vport_disable, & qla24xx_vport_delete, 0, 0, & qla24xx_bsg_request, & qla24xx_bsg_timeout, 8U, 0U, 0U, (unsigned char)0, 1U, 1U, 1U, 1U, 1U, 1U, 1U, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, (unsigned char)0, 1U, 1U, 1U, 1U, (unsigned char)0}; struct fc_function_template qla2xxx_transport_vport_functions = {0, & qla2x00_set_rport_loss_tmo, & qla2x00_get_starget_node_name, & qla2x00_get_starget_port_name, & qla2x00_get_starget_port_id, & qla2x00_get_host_port_id, & qla2x00_get_host_port_type, & qla2x00_get_host_port_state, 0, & qla2x00_get_host_speed, & qla2x00_get_host_fabric_name, & qla2x00_get_host_symbolic_name, & qla2x00_set_host_system_hostname, & qla2x00_get_fc_host_stats, & qla2x00_reset_host_stats, & qla2x00_issue_lip, & qla2x00_dev_loss_tmo_callbk, & qla2x00_terminate_rport_io, 0, 0, 0, 0, 0, 0, & qla24xx_bsg_request, & qla24xx_bsg_timeout, 8U, 0U, 0U, (unsigned char)0, 1U, 1U, 1U, 1U, 1U, 1U, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, (unsigned char)0, 1U, 1U, 1U, 1U, (unsigned char)0}; void qla2x00_init_host_attr(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; u32 speed ; { ha = vha->hw; speed = 0U; ((struct fc_host_attrs *)(vha->host)->shost_data)->dev_loss_tmo = (u32 )ha->port_down_retry_count; ((struct fc_host_attrs *)(vha->host)->shost_data)->node_name = wwn_to_u64((u8 *)(& vha->node_name)); ((struct fc_host_attrs *)(vha->host)->shost_data)->port_name = wwn_to_u64((u8 *)(& vha->port_name)); ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_classes = (unsigned int )*((unsigned char *)ha + 3808UL) != 0U ? 12U : 8U; ((struct fc_host_attrs *)(vha->host)->shost_data)->max_npiv_vports = ha->max_npiv_vports; ((struct fc_host_attrs *)(vha->host)->shost_data)->npiv_vports_inuse = (u16 )ha->cur_vport_count; if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { speed = 4U; } else if ((ha->device_type & 32768U) != 0U) { speed = 56U; } else if ((ha->device_type & 2048U) != 0U) { speed = 27U; } else if ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) { speed = 11U; } else if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { speed = 3U; } else if ((ha->device_type & 131072U) != 0U) { speed = 27U; } else if ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) { speed = 112U; } else { speed = 1U; } ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_speeds = speed; return; } } extern int ldv_probe_72(void) ; extern int ldv_probe_71(void) ; extern int ldv_release_72(void) ; extern int ldv_probe_68(void) ; extern int ldv_probe_66(void) ; extern int ldv_probe_69(void) ; extern int ldv_probe_74(void) ; extern int ldv_release_69(void) ; extern int ldv_release_68(void) ; extern int ldv_release_65(void) ; extern int ldv_probe_67(void) ; extern int ldv_release_67(void) ; extern int ldv_release_66(void) ; extern int ldv_release_74(void) ; extern int ldv_probe_65(void) ; extern int ldv_release_70(void) ; extern int ldv_release_71(void) ; extern int ldv_release_73(void) ; extern int ldv_probe_73(void) ; extern int ldv_probe_70(void) ; void ldv_initialize_device_attribute_53(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); dev_attr_beacon_group0 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); dev_attr_beacon_group1 = (struct device *)tmp___0; return; } } void choose_timer_22(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_22 = 2; return; } } void ldv_initialize_bin_attribute_69(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(296UL); sysfs_vpd_attr_group0 = (struct kobject *)tmp; tmp___0 = __VERIFIER_nondet_pointer(); sysfs_vpd_attr_group1 = (struct file *)tmp___0; tmp___1 = ldv_init_zalloc(72UL); sysfs_vpd_attr_group2 = (struct bin_attribute *)tmp___1; return; } } void ldv_initialize_fc_function_template_32(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; { tmp = ldv_init_zalloc(3816UL); qla2xxx_transport_vport_functions_group0 = (struct Scsi_Host *)tmp; tmp___0 = ldv_init_zalloc(1504UL); qla2xxx_transport_vport_functions_group1 = (struct scsi_target *)tmp___0; tmp___1 = ldv_init_zalloc(2208UL); qla2xxx_transport_vport_functions_group2 = (struct fc_rport *)tmp___1; tmp___2 = ldv_init_zalloc(184UL); qla2xxx_transport_vport_functions_group3 = (struct fc_bsg_job *)tmp___2; return; } } void disable_suitable_timer_22(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_22) { ldv_timer_state_22 = 0; return; } else { } return; } } void ldv_initialize_device_attribute_54(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); dev_attr_zio_timer_group0 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); dev_attr_zio_timer_group1 = (struct device *)tmp___0; return; } } void ldv_initialize_bin_attribute_71(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(296UL); sysfs_optrom_attr_group0 = (struct kobject *)tmp; tmp___0 = __VERIFIER_nondet_pointer(); sysfs_optrom_attr_group1 = (struct file *)tmp___0; tmp___1 = ldv_init_zalloc(72UL); sysfs_optrom_attr_group2 = (struct bin_attribute *)tmp___1; return; } } void ldv_initialize_fc_function_template_33(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; void *tmp___3 ; { tmp = ldv_init_zalloc(3816UL); qla2xxx_transport_functions_group0 = (struct Scsi_Host *)tmp; tmp___0 = ldv_init_zalloc(1648UL); qla2xxx_transport_functions_group2 = (struct fc_vport *)tmp___0; tmp___1 = ldv_init_zalloc(1504UL); qla2xxx_transport_functions_group1 = (struct scsi_target *)tmp___1; tmp___2 = ldv_init_zalloc(2208UL); qla2xxx_transport_functions_group3 = (struct fc_rport *)tmp___2; tmp___3 = ldv_init_zalloc(184UL); qla2xxx_transport_functions_group4 = (struct fc_bsg_job *)tmp___3; return; } } void ldv_initialize_bin_attribute_73(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(296UL); sysfs_fw_dump_template_attr_group0 = (struct kobject *)tmp; tmp___0 = __VERIFIER_nondet_pointer(); sysfs_fw_dump_template_attr_group1 = (struct file *)tmp___0; tmp___1 = ldv_init_zalloc(72UL); sysfs_fw_dump_template_attr_group2 = (struct bin_attribute *)tmp___1; return; } } void ldv_initialize_bin_attribute_72(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(296UL); sysfs_nvram_attr_group0 = (struct kobject *)tmp; tmp___0 = __VERIFIER_nondet_pointer(); sysfs_nvram_attr_group1 = (struct file *)tmp___0; tmp___1 = ldv_init_zalloc(72UL); sysfs_nvram_attr_group2 = (struct bin_attribute *)tmp___1; return; } } void activate_pending_timer_22(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_22 == (unsigned long )timer) { if (ldv_timer_state_22 == 2 || pending_flag != 0) { ldv_timer_list_22 = timer; ldv_timer_list_22->data = data; ldv_timer_state_22 = 1; } else { } return; } else { } reg_timer_22(timer); ldv_timer_list_22->data = data; return; } } void ldv_initialize_device_attribute_55(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); dev_attr_zio_group0 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); dev_attr_zio_group1 = (struct device *)tmp___0; return; } } int reg_timer_22(struct timer_list *timer ) { { ldv_timer_list_22 = timer; ldv_timer_state_22 = 1; return (0); } } void ldv_initialize_bin_attribute_74(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(296UL); sysfs_fw_dump_attr_group0 = (struct kobject *)tmp; tmp___0 = __VERIFIER_nondet_pointer(); sysfs_fw_dump_attr_group1 = (struct file *)tmp___0; tmp___1 = ldv_init_zalloc(72UL); sysfs_fw_dump_attr_group2 = (struct bin_attribute *)tmp___1; return; } } void ldv_initialize_device_attribute_34(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); dev_attr_allow_cna_fw_dump_group0 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); dev_attr_allow_cna_fw_dump_group1 = (struct device *)tmp___0; return; } } void ldv_main_exported_67(void) { struct kobject *ldvarg335 ; void *tmp ; loff_t ldvarg336 ; char *ldvarg332 ; void *tmp___0 ; struct file *ldvarg333 ; void *tmp___1 ; struct bin_attribute *ldvarg334 ; void *tmp___2 ; size_t ldvarg331 ; int tmp___3 ; { tmp = ldv_init_zalloc(296UL); ldvarg335 = (struct kobject *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg332 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_pointer(); ldvarg333 = (struct file *)tmp___1; tmp___2 = ldv_init_zalloc(72UL); ldvarg334 = (struct bin_attribute *)tmp___2; ldv_memset((void *)(& ldvarg336), 0, 8UL); ldv_memset((void *)(& ldvarg331), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_67 == 2) { qla2x00_sysfs_write_reset(ldvarg333, ldvarg335, ldvarg334, ldvarg332, ldvarg336, ldvarg331); ldv_state_variable_67 = 2; } else { } goto ldv_67263; case 1: ; if (ldv_state_variable_67 == 2) { ldv_release_67(); ldv_state_variable_67 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_67263; case 2: ; if (ldv_state_variable_67 == 1) { ldv_probe_67(); ldv_state_variable_67 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_67263; default: ldv_stop(); } ldv_67263: ; return; } } void ldv_main_exported_33(void) { u32 ldvarg2 ; bool ldvarg0 ; bool ldvarg1 ; int tmp ; { ldv_memset((void *)(& ldvarg2), 0, 4UL); ldv_memset((void *)(& ldvarg0), 0, 1UL); ldv_memset((void *)(& ldvarg1), 0, 1UL); tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_33 == 1) { qla2x00_issue_lip(qla2xxx_transport_functions_group0); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 1: ; if (ldv_state_variable_33 == 1) { qla2x00_set_rport_loss_tmo(qla2xxx_transport_functions_group3, ldvarg2); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 2: ; if (ldv_state_variable_33 == 1) { qla2x00_get_fc_host_stats(qla2xxx_transport_functions_group0); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 3: ; if (ldv_state_variable_33 == 1) { qla2x00_get_host_port_type(qla2xxx_transport_functions_group0); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 4: ; if (ldv_state_variable_33 == 1) { qla24xx_bsg_timeout(qla2xxx_transport_functions_group4); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 5: ; if (ldv_state_variable_33 == 1) { qla24xx_vport_delete(qla2xxx_transport_functions_group2); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 6: ; if (ldv_state_variable_33 == 1) { qla24xx_bsg_request(qla2xxx_transport_functions_group4); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 7: ; if (ldv_state_variable_33 == 1) { qla24xx_vport_disable(qla2xxx_transport_functions_group2, (int )ldvarg1); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 8: ; if (ldv_state_variable_33 == 1) { qla2x00_terminate_rport_io(qla2xxx_transport_functions_group3); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 9: ; if (ldv_state_variable_33 == 1) { qla2x00_get_host_port_state(qla2xxx_transport_functions_group0); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 10: ; if (ldv_state_variable_33 == 1) { qla2x00_get_starget_node_name(qla2xxx_transport_functions_group1); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 11: ; if (ldv_state_variable_33 == 1) { qla2x00_get_host_speed(qla2xxx_transport_functions_group0); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 12: ; if (ldv_state_variable_33 == 1) { qla2x00_get_starget_port_id(qla2xxx_transport_functions_group1); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 13: ; if (ldv_state_variable_33 == 1) { qla2x00_get_starget_port_name(qla2xxx_transport_functions_group1); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 14: ; if (ldv_state_variable_33 == 1) { qla2x00_dev_loss_tmo_callbk(qla2xxx_transport_functions_group3); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 15: ; if (ldv_state_variable_33 == 1) { qla2x00_reset_host_stats(qla2xxx_transport_functions_group0); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 16: ; if (ldv_state_variable_33 == 1) { qla2x00_get_host_symbolic_name(qla2xxx_transport_functions_group0); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 17: ; if (ldv_state_variable_33 == 1) { qla24xx_vport_create(qla2xxx_transport_functions_group2, (int )ldvarg0); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 18: ; if (ldv_state_variable_33 == 1) { qla2x00_set_host_system_hostname(qla2xxx_transport_functions_group0); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 19: ; if (ldv_state_variable_33 == 1) { qla2x00_get_host_fabric_name(qla2xxx_transport_functions_group0); ldv_state_variable_33 = 1; } else { } goto ldv_67274; case 20: ; if (ldv_state_variable_33 == 1) { qla2x00_get_host_port_id(qla2xxx_transport_functions_group0); ldv_state_variable_33 = 1; } else { } goto ldv_67274; default: ldv_stop(); } ldv_67274: ; return; } } void ldv_main_exported_32(void) { u32 ldvarg3 ; int tmp ; { ldv_memset((void *)(& ldvarg3), 0, 4UL); tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_32 == 1) { qla2x00_issue_lip(qla2xxx_transport_vport_functions_group0); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 1: ; if (ldv_state_variable_32 == 1) { qla2x00_set_rport_loss_tmo(qla2xxx_transport_vport_functions_group2, ldvarg3); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 2: ; if (ldv_state_variable_32 == 1) { qla2x00_get_fc_host_stats(qla2xxx_transport_vport_functions_group0); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 3: ; if (ldv_state_variable_32 == 1) { qla2x00_get_host_port_type(qla2xxx_transport_vport_functions_group0); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 4: ; if (ldv_state_variable_32 == 1) { qla24xx_bsg_timeout(qla2xxx_transport_vport_functions_group3); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 5: ; if (ldv_state_variable_32 == 1) { qla24xx_bsg_request(qla2xxx_transport_vport_functions_group3); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 6: ; if (ldv_state_variable_32 == 1) { qla2x00_terminate_rport_io(qla2xxx_transport_vport_functions_group2); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 7: ; if (ldv_state_variable_32 == 1) { qla2x00_get_host_port_state(qla2xxx_transport_vport_functions_group0); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 8: ; if (ldv_state_variable_32 == 1) { qla2x00_get_starget_node_name(qla2xxx_transport_vport_functions_group1); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 9: ; if (ldv_state_variable_32 == 1) { qla2x00_get_host_speed(qla2xxx_transport_vport_functions_group0); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 10: ; if (ldv_state_variable_32 == 1) { qla2x00_get_starget_port_id(qla2xxx_transport_vport_functions_group1); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 11: ; if (ldv_state_variable_32 == 1) { qla2x00_get_starget_port_name(qla2xxx_transport_vport_functions_group1); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 12: ; if (ldv_state_variable_32 == 1) { qla2x00_dev_loss_tmo_callbk(qla2xxx_transport_vport_functions_group2); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 13: ; if (ldv_state_variable_32 == 1) { qla2x00_reset_host_stats(qla2xxx_transport_vport_functions_group0); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 14: ; if (ldv_state_variable_32 == 1) { qla2x00_get_host_symbolic_name(qla2xxx_transport_vport_functions_group0); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 15: ; if (ldv_state_variable_32 == 1) { qla2x00_set_host_system_hostname(qla2xxx_transport_vport_functions_group0); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 16: ; if (ldv_state_variable_32 == 1) { qla2x00_get_host_fabric_name(qla2xxx_transport_vport_functions_group0); ldv_state_variable_32 = 1; } else { } goto ldv_67301; case 17: ; if (ldv_state_variable_32 == 1) { qla2x00_get_host_port_id(qla2xxx_transport_vport_functions_group0); ldv_state_variable_32 = 1; } else { } goto ldv_67301; default: ldv_stop(); } ldv_67301: ; return; } } void ldv_main_exported_63(void) { struct device_attribute *ldvarg6 ; void *tmp ; char *ldvarg5 ; void *tmp___0 ; struct device *ldvarg4 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg6 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg5 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg4 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_63 == 1) { qla2x00_fw_version_show(ldvarg4, ldvarg6, ldvarg5); ldv_state_variable_63 = 1; } else { } goto ldv_67327; default: ldv_stop(); } ldv_67327: ; return; } } void ldv_main_exported_71(void) { loff_t ldvarg9 ; loff_t ldvarg12 ; size_t ldvarg10 ; size_t ldvarg7 ; char *ldvarg11 ; void *tmp ; char *ldvarg8 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg11 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg8 = (char *)tmp___0; ldv_memset((void *)(& ldvarg9), 0, 8UL); ldv_memset((void *)(& ldvarg12), 0, 8UL); ldv_memset((void *)(& ldvarg10), 0, 8UL); ldv_memset((void *)(& ldvarg7), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_71 == 2) { qla2x00_sysfs_write_optrom(sysfs_optrom_attr_group1, sysfs_optrom_attr_group0, sysfs_optrom_attr_group2, ldvarg11, ldvarg12, ldvarg10); ldv_state_variable_71 = 2; } else { } goto ldv_67339; case 1: ; if (ldv_state_variable_71 == 2) { qla2x00_sysfs_read_optrom(sysfs_optrom_attr_group1, sysfs_optrom_attr_group0, sysfs_optrom_attr_group2, ldvarg8, ldvarg9, ldvarg7); ldv_state_variable_71 = 2; } else { } goto ldv_67339; case 2: ; if (ldv_state_variable_71 == 2) { ldv_release_71(); ldv_state_variable_71 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_67339; case 3: ; if (ldv_state_variable_71 == 1) { ldv_probe_71(); ldv_state_variable_71 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_67339; default: ldv_stop(); } ldv_67339: ; return; } } void ldv_main_exported_70(void) { loff_t ldvarg342 ; struct file *ldvarg339 ; void *tmp ; struct bin_attribute *ldvarg340 ; void *tmp___0 ; size_t ldvarg337 ; char *ldvarg338 ; void *tmp___1 ; struct kobject *ldvarg341 ; void *tmp___2 ; int tmp___3 ; { tmp = __VERIFIER_nondet_pointer(); ldvarg339 = (struct file *)tmp; tmp___0 = ldv_init_zalloc(72UL); ldvarg340 = (struct bin_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg338 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(296UL); ldvarg341 = (struct kobject *)tmp___2; ldv_memset((void *)(& ldvarg342), 0, 8UL); ldv_memset((void *)(& ldvarg337), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_70 == 2) { qla2x00_sysfs_write_optrom_ctl(ldvarg339, ldvarg341, ldvarg340, ldvarg338, ldvarg342, ldvarg337); ldv_state_variable_70 = 2; } else { } goto ldv_67354; case 1: ; if (ldv_state_variable_70 == 2) { ldv_release_70(); ldv_state_variable_70 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_67354; case 2: ; if (ldv_state_variable_70 == 1) { ldv_probe_70(); ldv_state_variable_70 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_67354; default: ldv_stop(); } ldv_67354: ; return; } } void ldv_main_exported_68(void) { struct kobject *ldvarg347 ; void *tmp ; struct file *ldvarg345 ; void *tmp___0 ; char *ldvarg344 ; void *tmp___1 ; loff_t ldvarg348 ; size_t ldvarg343 ; struct bin_attribute *ldvarg346 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(296UL); ldvarg347 = (struct kobject *)tmp; tmp___0 = __VERIFIER_nondet_pointer(); ldvarg345 = (struct file *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg344 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(72UL); ldvarg346 = (struct bin_attribute *)tmp___2; ldv_memset((void *)(& ldvarg348), 0, 8UL); ldv_memset((void *)(& ldvarg343), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_68 == 2) { qla2x00_sysfs_read_sfp(ldvarg345, ldvarg347, ldvarg346, ldvarg344, ldvarg348, ldvarg343); ldv_state_variable_68 = 2; } else { } goto ldv_67368; case 1: ; if (ldv_state_variable_68 == 2) { ldv_release_68(); ldv_state_variable_68 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_67368; case 2: ; if (ldv_state_variable_68 == 1) { ldv_probe_68(); ldv_state_variable_68 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_67368; default: ldv_stop(); } ldv_67368: ; return; } } void ldv_main_exported_72(void) { size_t ldvarg47 ; size_t ldvarg50 ; loff_t ldvarg49 ; loff_t ldvarg52 ; char *ldvarg51 ; void *tmp ; char *ldvarg48 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg51 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg48 = (char *)tmp___0; ldv_memset((void *)(& ldvarg47), 0, 8UL); ldv_memset((void *)(& ldvarg50), 0, 8UL); ldv_memset((void *)(& ldvarg49), 0, 8UL); ldv_memset((void *)(& ldvarg52), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_72 == 2) { qla2x00_sysfs_write_nvram(sysfs_nvram_attr_group1, sysfs_nvram_attr_group0, sysfs_nvram_attr_group2, ldvarg51, ldvarg52, ldvarg50); ldv_state_variable_72 = 2; } else { } goto ldv_67382; case 1: ; if (ldv_state_variable_72 == 2) { qla2x00_sysfs_read_nvram(sysfs_nvram_attr_group1, sysfs_nvram_attr_group0, sysfs_nvram_attr_group2, ldvarg48, ldvarg49, ldvarg47); ldv_state_variable_72 = 2; } else { } goto ldv_67382; case 2: ; if (ldv_state_variable_72 == 2) { ldv_release_72(); ldv_state_variable_72 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_67382; case 3: ; if (ldv_state_variable_72 == 1) { ldv_probe_72(); ldv_state_variable_72 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_67382; default: ldv_stop(); } ldv_67382: ; return; } } void ldv_main_exported_44(void) { struct device_attribute *ldvarg55 ; void *tmp ; struct device *ldvarg53 ; void *tmp___0 ; char *ldvarg54 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg55 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg53 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg54 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_44 == 1) { qla2x00_phy_version_show(ldvarg53, ldvarg55, ldvarg54); ldv_state_variable_44 = 1; } else { } goto ldv_67394; default: ldv_stop(); } ldv_67394: ; return; } } void ldv_main_exported_55(void) { char *ldvarg56 ; void *tmp ; char *ldvarg58 ; void *tmp___0 ; size_t ldvarg57 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg56 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg58 = (char *)tmp___0; ldv_memset((void *)(& ldvarg57), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_55 == 1) { qla2x00_zio_store(dev_attr_zio_group1, dev_attr_zio_group0, (char const *)ldvarg58, ldvarg57); ldv_state_variable_55 = 1; } else { } goto ldv_67403; case 1: ; if (ldv_state_variable_55 == 1) { qla2x00_zio_show(dev_attr_zio_group1, dev_attr_zio_group0, ldvarg56); ldv_state_variable_55 = 1; } else { } goto ldv_67403; default: ldv_stop(); } ldv_67403: ; return; } } void ldv_main_exported_74(void) { size_t ldvarg99 ; char *ldvarg103 ; void *tmp ; char *ldvarg100 ; void *tmp___0 ; loff_t ldvarg101 ; size_t ldvarg102 ; loff_t ldvarg104 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg103 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg100 = (char *)tmp___0; ldv_memset((void *)(& ldvarg99), 0, 8UL); ldv_memset((void *)(& ldvarg101), 0, 8UL); ldv_memset((void *)(& ldvarg102), 0, 8UL); ldv_memset((void *)(& ldvarg104), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_74 == 2) { qla2x00_sysfs_write_fw_dump(sysfs_fw_dump_attr_group1, sysfs_fw_dump_attr_group0, sysfs_fw_dump_attr_group2, ldvarg103, ldvarg104, ldvarg102); ldv_state_variable_74 = 2; } else { } goto ldv_67416; case 1: ; if (ldv_state_variable_74 == 2) { qla2x00_sysfs_read_fw_dump(sysfs_fw_dump_attr_group1, sysfs_fw_dump_attr_group0, sysfs_fw_dump_attr_group2, ldvarg100, ldvarg101, ldvarg99); ldv_state_variable_74 = 2; } else { } goto ldv_67416; case 2: ; if (ldv_state_variable_74 == 2) { ldv_release_74(); ldv_state_variable_74 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_67416; case 3: ; if (ldv_state_variable_74 == 1) { ldv_probe_74(); ldv_state_variable_74 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_67416; default: ldv_stop(); } ldv_67416: ; return; } } void ldv_main_exported_57(void) { struct device_attribute *ldvarg107 ; void *tmp ; struct device *ldvarg105 ; void *tmp___0 ; char *ldvarg106 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg107 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg105 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg106 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_57 == 1) { qla2x00_pci_info_show(ldvarg105, ldvarg107, ldvarg106); ldv_state_variable_57 = 1; } else { } goto ldv_67428; default: ldv_stop(); } ldv_67428: ; return; } } void ldv_main_exported_40(void) { struct device_attribute *ldvarg429 ; void *tmp ; struct device *ldvarg427 ; void *tmp___0 ; char *ldvarg428 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg429 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg427 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg428 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_40 == 1) { qla2x00_fabric_param_show(ldvarg427, ldvarg429, ldvarg428); ldv_state_variable_40 = 1; } else { } goto ldv_67437; default: ldv_stop(); } ldv_67437: ; return; } } void ldv_main_exported_61(void) { char *ldvarg109 ; void *tmp ; struct device *ldvarg108 ; void *tmp___0 ; struct device_attribute *ldvarg110 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg109 = (char *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg108 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg110 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_61 == 1) { qla2x00_isp_name_show(ldvarg108, ldvarg110, ldvarg109); ldv_state_variable_61 = 1; } else { } goto ldv_67446; default: ldv_stop(); } ldv_67446: ; return; } } void ldv_main_exported_69(void) { size_t ldvarg430 ; char *ldvarg434 ; void *tmp ; loff_t ldvarg432 ; loff_t ldvarg435 ; char *ldvarg431 ; void *tmp___0 ; size_t ldvarg433 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg434 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg431 = (char *)tmp___0; ldv_memset((void *)(& ldvarg430), 0, 8UL); ldv_memset((void *)(& ldvarg432), 0, 8UL); ldv_memset((void *)(& ldvarg435), 0, 8UL); ldv_memset((void *)(& ldvarg433), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_69 == 2) { qla2x00_sysfs_write_vpd(sysfs_vpd_attr_group1, sysfs_vpd_attr_group0, sysfs_vpd_attr_group2, ldvarg434, ldvarg435, ldvarg433); ldv_state_variable_69 = 2; } else { } goto ldv_67458; case 1: ; if (ldv_state_variable_69 == 2) { qla2x00_sysfs_read_vpd(sysfs_vpd_attr_group1, sysfs_vpd_attr_group0, sysfs_vpd_attr_group2, ldvarg431, ldvarg432, ldvarg430); ldv_state_variable_69 = 2; } else { } goto ldv_67458; case 2: ; if (ldv_state_variable_69 == 2) { ldv_release_69(); ldv_state_variable_69 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_67458; case 3: ; if (ldv_state_variable_69 == 1) { ldv_probe_69(); ldv_state_variable_69 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_67458; default: ldv_stop(); } ldv_67458: ; return; } } void ldv_main_exported_59(void) { char *ldvarg437 ; void *tmp ; struct device *ldvarg436 ; void *tmp___0 ; struct device_attribute *ldvarg438 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg437 = (char *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg436 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg438 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_59 == 1) { qla2x00_model_name_show(ldvarg436, ldvarg438, ldvarg437); ldv_state_variable_59 = 1; } else { } goto ldv_67470; default: ldv_stop(); } ldv_67470: ; return; } } void ldv_main_exported_49(void) { char *ldvarg440 ; void *tmp ; struct device *ldvarg439 ; void *tmp___0 ; struct device_attribute *ldvarg441 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg440 = (char *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg439 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg441 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_49 == 1) { qla2x00_optrom_fw_version_show(ldvarg439, ldvarg441, ldvarg440); ldv_state_variable_49 = 1; } else { } goto ldv_67479; default: ldv_stop(); } ldv_67479: ; return; } } void ldv_main_exported_35(void) { struct device_attribute *ldvarg118 ; void *tmp ; char *ldvarg117 ; void *tmp___0 ; struct device *ldvarg116 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg118 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg117 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg116 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_35 == 1) { qla2x00_fw_dump_size_show(ldvarg116, ldvarg118, ldvarg117); ldv_state_variable_35 = 1; } else { } goto ldv_67488; default: ldv_stop(); } ldv_67488: ; return; } } void ldv_main_exported_53(void) { char *ldvarg442 ; void *tmp ; size_t ldvarg443 ; char *ldvarg444 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg442 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg444 = (char *)tmp___0; ldv_memset((void *)(& ldvarg443), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_53 == 1) { qla2x00_beacon_store(dev_attr_beacon_group1, dev_attr_beacon_group0, (char const *)ldvarg444, ldvarg443); ldv_state_variable_53 = 1; } else { } goto ldv_67497; case 1: ; if (ldv_state_variable_53 == 1) { qla2x00_beacon_show(dev_attr_beacon_group1, dev_attr_beacon_group0, ldvarg442); ldv_state_variable_53 = 1; } else { } goto ldv_67497; default: ldv_stop(); } ldv_67497: ; return; } } void ldv_main_exported_48(void) { struct device_attribute *ldvarg155 ; void *tmp ; struct device *ldvarg153 ; void *tmp___0 ; char *ldvarg154 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg155 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg153 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg154 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_48 == 1) { qla2x00_optrom_gold_fw_version_show(ldvarg153, ldvarg155, ldvarg154); ldv_state_variable_48 = 1; } else { } goto ldv_67507; default: ldv_stop(); } ldv_67507: ; return; } } void ldv_main_exported_42(void) { struct device *ldvarg473 ; void *tmp ; struct device_attribute *ldvarg475 ; void *tmp___0 ; char *ldvarg474 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg473 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg475 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg474 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_42 == 1) { qla2x00_vlan_id_show(ldvarg473, ldvarg475, ldvarg474); ldv_state_variable_42 = 1; } else { } goto ldv_67516; default: ldv_stop(); } ldv_67516: ; return; } } void ldv_main_exported_46(void) { struct device *ldvarg476 ; void *tmp ; char *ldvarg477 ; void *tmp___0 ; struct device_attribute *ldvarg478 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg476 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg477 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg478 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_46 == 1) { qla2x00_total_isp_aborts_show(ldvarg476, ldvarg478, ldvarg477); ldv_state_variable_46 = 1; } else { } goto ldv_67525; default: ldv_stop(); } ldv_67525: ; return; } } void ldv_main_exported_65(void) { char *ldvarg202 ; void *tmp ; size_t ldvarg201 ; loff_t ldvarg206 ; struct kobject *ldvarg205 ; void *tmp___0 ; struct file *ldvarg203 ; void *tmp___1 ; struct bin_attribute *ldvarg204 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(1UL); ldvarg202 = (char *)tmp; tmp___0 = ldv_init_zalloc(296UL); ldvarg205 = (struct kobject *)tmp___0; tmp___1 = __VERIFIER_nondet_pointer(); ldvarg203 = (struct file *)tmp___1; tmp___2 = ldv_init_zalloc(72UL); ldvarg204 = (struct bin_attribute *)tmp___2; ldv_memset((void *)(& ldvarg201), 0, 8UL); ldv_memset((void *)(& ldvarg206), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_65 == 2) { qla2x00_sysfs_read_dcbx_tlv(ldvarg203, ldvarg205, ldvarg204, ldvarg202, ldvarg206, ldvarg201); ldv_state_variable_65 = 2; } else { } goto ldv_67537; case 1: ; if (ldv_state_variable_65 == 2) { ldv_release_65(); ldv_state_variable_65 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_67537; case 2: ; if (ldv_state_variable_65 == 1) { ldv_probe_65(); ldv_state_variable_65 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_67537; default: ldv_stop(); } ldv_67537: ; return; } } void ldv_main_exported_50(void) { struct device *ldvarg207 ; void *tmp ; char *ldvarg208 ; void *tmp___0 ; struct device_attribute *ldvarg209 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg207 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg208 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg209 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_50 == 1) { qla2x00_optrom_fcode_version_show(ldvarg207, ldvarg209, ldvarg208); ldv_state_variable_50 = 1; } else { } goto ldv_67548; default: ldv_stop(); } ldv_67548: ; return; } } void ldv_main_exported_39(void) { struct device *ldvarg210 ; void *tmp ; struct device_attribute *ldvarg212 ; void *tmp___0 ; char *ldvarg211 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg210 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg212 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg211 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_39 == 1) { qla2x00_fw_state_show(ldvarg210, ldvarg212, ldvarg211); ldv_state_variable_39 = 1; } else { } goto ldv_67557; default: ldv_stop(); } ldv_67557: ; return; } } void ldv_main_exported_64(void) { char *ldvarg214 ; void *tmp ; struct device_attribute *ldvarg215 ; void *tmp___0 ; struct device *ldvarg213 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg214 = (char *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg215 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg213 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_64 == 1) { qla2x00_drvr_version_show(ldvarg213, ldvarg215, ldvarg214); ldv_state_variable_64 = 1; } else { } goto ldv_67566; default: ldv_stop(); } ldv_67566: ; return; } } void ldv_main_exported_36(void) { struct device *ldvarg519 ; void *tmp ; char *ldvarg520 ; void *tmp___0 ; struct device_attribute *ldvarg521 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg519 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg520 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg521 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_36 == 1) { qla2x00_diag_megabytes_show(ldvarg519, ldvarg521, ldvarg520); ldv_state_variable_36 = 1; } else { } goto ldv_67575; default: ldv_stop(); } ldv_67575: ; return; } } void ldv_main_exported_51(void) { struct device_attribute *ldvarg524 ; void *tmp ; struct device *ldvarg522 ; void *tmp___0 ; char *ldvarg523 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg524 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg522 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg523 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_51 == 1) { qla2x00_optrom_efi_version_show(ldvarg522, ldvarg524, ldvarg523); ldv_state_variable_51 = 1; } else { } goto ldv_67584; default: ldv_stop(); } ldv_67584: ; return; } } void ldv_main_exported_41(void) { struct device *ldvarg219 ; void *tmp ; struct device_attribute *ldvarg221 ; void *tmp___0 ; char *ldvarg220 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg219 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg221 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg220 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_41 == 1) { qla2x00_vn_port_mac_address_show(ldvarg219, ldvarg221, ldvarg220); ldv_state_variable_41 = 1; } else { } goto ldv_67593; default: ldv_stop(); } ldv_67593: ; return; } } void ldv_main_exported_58(void) { struct device *ldvarg216 ; void *tmp ; char *ldvarg217 ; void *tmp___0 ; struct device_attribute *ldvarg218 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg216 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg217 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg218 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_58 == 1) { qla2x00_model_desc_show(ldvarg216, ldvarg218, ldvarg217); ldv_state_variable_58 = 1; } else { } goto ldv_67602; default: ldv_stop(); } ldv_67602: ; return; } } void ldv_main_exported_47(void) { struct device *ldvarg525 ; void *tmp ; struct device_attribute *ldvarg527 ; void *tmp___0 ; char *ldvarg526 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg525 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg527 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg526 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_47 == 1) { qla24xx_84xx_fw_version_show(ldvarg525, ldvarg527, ldvarg526); ldv_state_variable_47 = 1; } else { } goto ldv_67611; default: ldv_stop(); } ldv_67611: ; return; } } void ldv_main_exported_38(void) { struct device_attribute *ldvarg530 ; void *tmp ; struct device *ldvarg528 ; void *tmp___0 ; char *ldvarg529 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg530 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg528 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg529 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_38 == 1) { qla2x00_thermal_temp_show(ldvarg528, ldvarg530, ldvarg529); ldv_state_variable_38 = 1; } else { } goto ldv_67620; default: ldv_stop(); } ldv_67620: ; return; } } void ldv_main_exported_52(void) { char *ldvarg257 ; void *tmp ; struct device_attribute *ldvarg258 ; void *tmp___0 ; struct device *ldvarg256 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg257 = (char *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg258 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg256 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_52 == 1) { qla2x00_optrom_bios_version_show(ldvarg256, ldvarg258, ldvarg257); ldv_state_variable_52 = 1; } else { } goto ldv_67629; default: ldv_stop(); } ldv_67629: ; return; } } void ldv_main_exported_60(void) { char *ldvarg260 ; void *tmp ; struct device_attribute *ldvarg261 ; void *tmp___0 ; struct device *ldvarg259 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg260 = (char *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg261 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg259 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_60 == 1) { qla2x00_isp_id_show(ldvarg259, ldvarg261, ldvarg260); ldv_state_variable_60 = 1; } else { } goto ldv_67638; default: ldv_stop(); } ldv_67638: ; return; } } void ldv_main_exported_34(void) { size_t ldvarg532 ; char *ldvarg533 ; void *tmp ; char *ldvarg531 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg533 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg531 = (char *)tmp___0; ldv_memset((void *)(& ldvarg532), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_34 == 1) { qla2x00_allow_cna_fw_dump_store(dev_attr_allow_cna_fw_dump_group1, dev_attr_allow_cna_fw_dump_group0, (char const *)ldvarg533, ldvarg532); ldv_state_variable_34 = 1; } else { } goto ldv_67647; case 1: ; if (ldv_state_variable_34 == 1) { qla2x00_allow_cna_fw_dump_show(dev_attr_allow_cna_fw_dump_group1, dev_attr_allow_cna_fw_dump_group0, ldvarg531); ldv_state_variable_34 = 1; } else { } goto ldv_67647; default: ldv_stop(); } ldv_67647: ; return; } } void ldv_main_exported_56(void) { struct device *ldvarg262 ; void *tmp ; char *ldvarg263 ; void *tmp___0 ; struct device_attribute *ldvarg264 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg262 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg263 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg264 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_56 == 1) { qla2x00_link_state_show(ldvarg262, ldvarg264, ldvarg263); ldv_state_variable_56 = 1; } else { } goto ldv_67657; default: ldv_stop(); } ldv_67657: ; return; } } void ldv_main_exported_73(void) { size_t ldvarg265 ; loff_t ldvarg270 ; size_t ldvarg268 ; loff_t ldvarg267 ; char *ldvarg269 ; void *tmp ; char *ldvarg266 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg269 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg266 = (char *)tmp___0; ldv_memset((void *)(& ldvarg265), 0, 8UL); ldv_memset((void *)(& ldvarg270), 0, 8UL); ldv_memset((void *)(& ldvarg268), 0, 8UL); ldv_memset((void *)(& ldvarg267), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_73 == 2) { qla2x00_sysfs_write_fw_dump_template(sysfs_fw_dump_template_attr_group1, sysfs_fw_dump_template_attr_group0, sysfs_fw_dump_template_attr_group2, ldvarg269, ldvarg270, ldvarg268); ldv_state_variable_73 = 2; } else { } goto ldv_67669; case 1: ; if (ldv_state_variable_73 == 2) { qla2x00_sysfs_read_fw_dump_template(sysfs_fw_dump_template_attr_group1, sysfs_fw_dump_template_attr_group0, sysfs_fw_dump_template_attr_group2, ldvarg266, ldvarg267, ldvarg265); ldv_state_variable_73 = 2; } else { } goto ldv_67669; case 2: ; if (ldv_state_variable_73 == 2) { ldv_release_73(); ldv_state_variable_73 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_67669; case 3: ; if (ldv_state_variable_73 == 1) { ldv_probe_73(); ldv_state_variable_73 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_67669; default: ldv_stop(); } ldv_67669: ; return; } } void ldv_main_exported_66(void) { struct bin_attribute *ldvarg274 ; void *tmp ; loff_t ldvarg276 ; struct file *ldvarg273 ; void *tmp___0 ; struct kobject *ldvarg275 ; void *tmp___1 ; size_t ldvarg271 ; char *ldvarg272 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(72UL); ldvarg274 = (struct bin_attribute *)tmp; tmp___0 = __VERIFIER_nondet_pointer(); ldvarg273 = (struct file *)tmp___0; tmp___1 = ldv_init_zalloc(296UL); ldvarg275 = (struct kobject *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg272 = (char *)tmp___2; ldv_memset((void *)(& ldvarg276), 0, 8UL); ldv_memset((void *)(& ldvarg271), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_66 == 2) { qla2x00_sysfs_read_xgmac_stats(ldvarg273, ldvarg275, ldvarg274, ldvarg272, ldvarg276, ldvarg271); ldv_state_variable_66 = 2; } else { } goto ldv_67684; case 1: ; if (ldv_state_variable_66 == 2) { ldv_release_66(); ldv_state_variable_66 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_67684; case 2: ; if (ldv_state_variable_66 == 1) { ldv_probe_66(); ldv_state_variable_66 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_67684; default: ldv_stop(); } ldv_67684: ; return; } } void ldv_main_exported_45(void) { struct device_attribute *ldvarg279 ; void *tmp ; struct device *ldvarg277 ; void *tmp___0 ; char *ldvarg278 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg279 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg277 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg278 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_45 == 1) { qla2x00_mpi_version_show(ldvarg277, ldvarg279, ldvarg278); ldv_state_variable_45 = 1; } else { } goto ldv_67695; default: ldv_stop(); } ldv_67695: ; return; } } void ldv_main_exported_37(void) { char *ldvarg535 ; void *tmp ; struct device_attribute *ldvarg536 ; void *tmp___0 ; struct device *ldvarg534 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg535 = (char *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg536 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg534 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_37 == 1) { qla2x00_diag_requests_show(ldvarg534, ldvarg536, ldvarg535); ldv_state_variable_37 = 1; } else { } goto ldv_67704; default: ldv_stop(); } ldv_67704: ; return; } } void ldv_main_exported_43(void) { struct device *ldvarg537 ; void *tmp ; struct device_attribute *ldvarg539 ; void *tmp___0 ; char *ldvarg538 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg537 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg539 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg538 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_43 == 1) { qla2x00_flash_block_size_show(ldvarg537, ldvarg539, ldvarg538); ldv_state_variable_43 = 1; } else { } goto ldv_67713; default: ldv_stop(); } ldv_67713: ; return; } } void ldv_main_exported_62(void) { char *ldvarg326 ; void *tmp ; struct device_attribute *ldvarg327 ; void *tmp___0 ; struct device *ldvarg325 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg326 = (char *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg327 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg325 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_62 == 1) { qla2x00_serial_num_show(ldvarg325, ldvarg327, ldvarg326); ldv_state_variable_62 = 1; } else { } goto ldv_67722; default: ldv_stop(); } ldv_67722: ; return; } } void ldv_main_exported_54(void) { char *ldvarg328 ; void *tmp ; char *ldvarg330 ; void *tmp___0 ; size_t ldvarg329 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg328 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg330 = (char *)tmp___0; ldv_memset((void *)(& ldvarg329), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_54 == 1) { qla2x00_zio_timer_store(dev_attr_zio_timer_group1, dev_attr_zio_timer_group0, (char const *)ldvarg330, ldvarg329); ldv_state_variable_54 = 1; } else { } goto ldv_67731; case 1: ; if (ldv_state_variable_54 == 1) { qla2x00_zio_timer_show(dev_attr_zio_timer_group1, dev_attr_zio_timer_group0, ldvarg328); ldv_state_variable_54 = 1; } else { } goto ldv_67731; default: ldv_stop(); } ldv_67731: ; return; } } bool ldv_queue_work_on_177(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_178(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_179(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_180(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_181(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_182(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_183(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___4 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } void ldv_scsi_remove_host_184(struct Scsi_Host *shost ) { { scsi_remove_host(shost); ldv_state_variable_88 = 0; return; } } int ldv_del_timer_sync_203(struct timer_list *ldv_func_arg1 ) ; bool ldv_queue_work_on_197(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_199(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_198(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_201(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_200(struct workqueue_struct *ldv_func_arg1 ) ; void activate_work_6(struct work_struct *work , int state ) ; void activate_pending_timer_23(struct timer_list *timer , unsigned long data , int pending_flag ) ; void choose_timer_23(struct timer_list *timer ) ; void invoke_work_6(void) ; int reg_timer_23(struct timer_list *timer ) ; void call_and_disable_all_6(int state ) ; void disable_suitable_timer_23(struct timer_list *timer ) ; void disable_work_6(struct work_struct *work ) ; void call_and_disable_work_6(struct work_struct *work ) ; int ldv_scsi_add_host_with_dma_202(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; __inline static void u64_to_wwn(u64 inm , u8 *wwn ) { { *wwn = (u8 )(inm >> 56); *(wwn + 1UL) = (u8 )(inm >> 48); *(wwn + 2UL) = (u8 )(inm >> 40); *(wwn + 3UL) = (u8 )(inm >> 32); *(wwn + 4UL) = (u8 )(inm >> 24); *(wwn + 5UL) = (u8 )(inm >> 16); *(wwn + 6UL) = (u8 )(inm >> 8); *(wwn + 7UL) = (u8 )inm; return; } } static char const * const port_state_str___3[5U] = { "Unknown", "UNCONFIGURED", "DEAD", "LOST", "ONLINE"}; __inline static void qla2x00_set_fcport_state___2(fc_port_t *fcport , int state ) { int old_state ; { old_state = atomic_read((atomic_t const *)(& fcport->state)); atomic_set(& fcport->state, state); if (old_state != 0 && old_state != state) { ql_dbg(268435456U, fcport->vha, 8317, "FCPort state transitioned from %s to %s - portid=%02x%02x%02x.\n", port_state_str___3[old_state], port_state_str___3[state], (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } return; } } void qla2x00_vp_stop_timer(scsi_qla_host_t *vha ) { { if ((unsigned int )vha->vp_idx != 0U && vha->timer_active != 0U) { ldv_del_timer_sync_203(& vha->timer); vha->timer_active = 0U; } else { } return; } } static uint32_t qla24xx_allocate_vp_id(scsi_qla_host_t *vha ) { uint32_t vp_id ; struct qla_hw_data *ha ; unsigned long flags ; unsigned long tmp ; raw_spinlock_t *tmp___0 ; { ha = vha->hw; mutex_lock_nested(& ha->vport_lock, 0U); tmp = find_first_zero_bit((unsigned long const *)(& ha->vp_idx_map), (unsigned long )((int )ha->max_npiv_vports + 1)); vp_id = (uint32_t )tmp; if ((uint32_t )ha->max_npiv_vports < vp_id) { ql_dbg(262144U, vha, 40960, "vp_id %d is bigger than max-supported %d.\n", vp_id, (int )ha->max_npiv_vports); mutex_unlock(& ha->vport_lock); return (vp_id); } else { } set_bit((long )vp_id, (unsigned long volatile *)(& ha->vp_idx_map)); ha->num_vhosts = (uint16_t )((int )ha->num_vhosts + 1); vha->vp_idx = (uint16_t )vp_id; tmp___0 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___0); list_add_tail(& vha->list, & ha->vp_list); qlt_update_vp_map(vha, 1); spin_unlock_irqrestore(& ha->vport_slock, flags); mutex_unlock(& ha->vport_lock); return (vp_id); } } void qla24xx_deallocate_vp_id(scsi_qla_host_t *vha ) { uint16_t vp_id ; struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; int tmp___1 ; { ha = vha->hw; flags = 0UL; mutex_lock_nested(& ha->vport_lock, 0U); tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); goto ldv_65965; ldv_65964: spin_unlock_irqrestore(& ha->vport_slock, flags); msleep(500U); tmp___0 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___0); ldv_65965: tmp___1 = atomic_read((atomic_t const *)(& vha->vref_count)); if (tmp___1 != 0) { goto ldv_65964; } else { } list_del(& vha->list); qlt_update_vp_map(vha, 3); spin_unlock_irqrestore(& ha->vport_slock, flags); vp_id = vha->vp_idx; ha->num_vhosts = (uint16_t )((int )ha->num_vhosts - 1); clear_bit((long )vp_id, (unsigned long volatile *)(& ha->vp_idx_map)); mutex_unlock(& ha->vport_lock); return; } } static scsi_qla_host_t *qla24xx_find_vhost_by_name(struct qla_hw_data *ha , uint8_t *port_name ) { scsi_qla_host_t *vha ; struct scsi_qla_host *tvha ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp___0 ; struct list_head const *__mptr___1 ; { tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)ha->vp_list.next; vha = (scsi_qla_host_t *)__mptr; __mptr___0 = (struct list_head const *)vha->list.next; tvha = (scsi_qla_host_t *)__mptr___0; goto ldv_65984; ldv_65983: tmp___0 = memcmp((void const *)port_name, (void const *)(& vha->port_name), 8UL); if (tmp___0 == 0) { spin_unlock_irqrestore(& ha->vport_slock, flags); return (vha); } else { } vha = tvha; __mptr___1 = (struct list_head const *)tvha->list.next; tvha = (struct scsi_qla_host *)__mptr___1; ldv_65984: ; if ((unsigned long )(& vha->list) != (unsigned long )(& ha->vp_list)) { goto ldv_65983; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); return ((scsi_qla_host_t *)0); } } static void qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha ) { fc_port_t *fcport ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_65995; ldv_65994: ql_dbg(262144U, vha, 40961, "Marking port dead, loop_id=0x%04x : %x.\n", (int )fcport->loop_id, (int )(fcport->vha)->vp_idx); qla2x00_mark_device_lost(vha, fcport, 0, 0); qla2x00_set_fcport_state___2(fcport, 1); __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_65995: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_65994; } else { } return; } } int qla24xx_disable_vp(scsi_qla_host_t *vha ) { unsigned long flags ; int ret ; raw_spinlock_t *tmp ; { ret = qla24xx_control_vp(vha, 11); atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); tmp = spinlock_check(& (vha->hw)->vport_slock); flags = _raw_spin_lock_irqsave(tmp); qlt_update_vp_map(vha, 4); spin_unlock_irqrestore(& (vha->hw)->vport_slock, flags); qla2x00_mark_vp_devices_dead(vha); atomic_set(& vha->vp_state, 2); vha->flags.management_server_logged_in = 0U; if (ret == 0) { fc_vport_set_state(vha->fc_vport, 2); } else { fc_vport_set_state(vha->fc_vport, 9); return (-1); } return (0); } } int qla24xx_enable_vp(scsi_qla_host_t *vha ) { int ret ; struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; { ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; tmp___0 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___0 == 2) { vha->vp_err_state = 1U; fc_vport_set_state(vha->fc_vport, 3); goto enable_failed; } else { tmp___1 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___1 == 6) { vha->vp_err_state = 1U; fc_vport_set_state(vha->fc_vport, 3); goto enable_failed; } else if (((int )ha->current_topology & 8) == 0) { vha->vp_err_state = 1U; fc_vport_set_state(vha->fc_vport, 3); goto enable_failed; } else { } } mutex_lock_nested(& ha->vport_lock, 0U); ret = qla24xx_modify_vp_config(vha); mutex_unlock(& ha->vport_lock); if (ret != 0) { fc_vport_set_state(vha->fc_vport, 9); goto enable_failed; } else { } ql_dbg(4194304U, vha, 32794, "Virtual port with id: %d - Enabled.\n", (int )vha->vp_idx); return (0); enable_failed: ql_dbg(4194304U, vha, 32795, "Virtual port with id: %d - Disabled.\n", (int )vha->vp_idx); return (1); } } static void qla24xx_configure_vp(scsi_qla_host_t *vha ) { struct fc_vport *fc_vport ; int ret ; int tmp ; { fc_vport = vha->fc_vport; ql_dbg(262144U, vha, 40962, "%s: change request #3.\n", "qla24xx_configure_vp"); ret = qla2x00_send_change_request(vha, 3, (int )vha->vp_idx); if (ret != 0) { ql_dbg(262144U, vha, 40963, "Failed to enable receiving of RSCN requests: 0x%x.\n", ret); return; } else { clear_bit(4L, (unsigned long volatile *)(& vha->vp_flags)); } vha->flags.online = 1U; tmp = qla24xx_configure_vhba(vha); if (tmp != 0) { return; } else { } atomic_set(& vha->vp_state, 1); fc_vport_set_state(fc_vport, 1); return; } } void qla2x00_alert_all_vps(struct rsp_que *rsp , uint16_t *mb ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; int i ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; raw_spinlock_t *tmp___0 ; struct list_head const *__mptr___0 ; { ha = rsp->hw; i = 0; tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)ha->vp_list.next; vha = (scsi_qla_host_t *)__mptr; goto ldv_66046; ldv_66045: ; if ((unsigned int )vha->vp_idx != 0U) { atomic_inc(& vha->vref_count); spin_unlock_irqrestore(& ha->vport_slock, flags); switch ((int )*mb) { case 32784: ; case 32785: ; case 32786: ; case 32787: ; case 32816: ; case 32822: ; case 32788: ; case 32789: ql_dbg(33554432U, vha, 20516, "Async_event for VP[%d], mb=0x%x vha=%p.\n", i, (int )*mb, vha); qla2x00_async_event(vha, rsp, mb); goto ldv_66041; } ldv_66041: tmp___0 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___0); atomic_dec(& vha->vref_count); } else { } i = i + 1; __mptr___0 = (struct list_head const *)vha->list.next; vha = (scsi_qla_host_t *)__mptr___0; ldv_66046: ; if ((unsigned long )(& vha->list) != (unsigned long )(& ha->vp_list)) { goto ldv_66045; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); return; } } int qla2x00_vp_abort_isp(scsi_qla_host_t *vha ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp___0 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___0 != 2) { atomic_set(& vha->loop_state, 2); qla2x00_mark_all_devices_lost(vha, 0); } else { tmp = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp == 0) { atomic_set(& vha->loop_down_timer, 255); } else { } } tmp___1 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 == 0) { qla24xx_control_vp(vha, 11); } else { } ql_dbg(4194304U, vha, 32797, "Scheduling enable of Vport %d.\n", (int )vha->vp_idx); tmp___2 = qla24xx_enable_vp(vha); return (tmp___2); } } static int qla2x00_do_dpc_vp(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; { ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; ql_dbg(67141632U, vha, 16402, "Entering %s vp_flags: 0x%lx.\n", "qla2x00_do_dpc_vp", vha->vp_flags); qla2x00_do_work(vha); tmp___1 = constant_test_bit(5L, (unsigned long const volatile *)(& base_vha->vp_flags)); if (tmp___1 != 0) { tmp___0 = test_and_clear_bit(0L, (unsigned long volatile *)(& vha->vp_flags)); if (tmp___0 != 0) { ql_dbg(67108864U, vha, 16404, "Configure VP scheduled.\n"); qla24xx_configure_vp(vha); ql_dbg(67108864U, vha, 16405, "Configure VP end.\n"); return (0); } else { } } else { } tmp___2 = constant_test_bit(13L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 != 0) { ql_dbg(67108864U, vha, 16406, "FCPort update scheduled.\n"); qla2x00_update_fcports(vha); clear_bit(13L, (unsigned long volatile *)(& vha->dpc_flags)); ql_dbg(67108864U, vha, 16407, "FCPort update end.\n"); } else { } tmp___3 = test_and_clear_bit(8L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___3 != 0) { tmp___4 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___4 == 0) { tmp___5 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___5 != 2) { ql_dbg(67108864U, vha, 16408, "Relogin needed scheduled.\n"); qla2x00_relogin(vha); ql_dbg(67108864U, vha, 16409, "Relogin needed end.\n"); } else { } } else { } } else { } tmp___6 = test_and_clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___6 != 0) { tmp___7 = test_and_set_bit(1L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___7 == 0) { clear_bit(1L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } } else { } tmp___9 = test_and_clear_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___9 != 0) { tmp___8 = test_and_set_bit(5L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___8 == 0) { ql_dbg(67108864U, vha, 16410, "Loop resync scheduled.\n"); qla2x00_loop_resync(vha); clear_bit(5L, (unsigned long volatile *)(& vha->dpc_flags)); ql_dbg(67108864U, vha, 16411, "Loop resync end.\n"); } else { } } else { } ql_dbg(67141632U, vha, 16412, "Exiting %s.\n", "qla2x00_do_dpc_vp"); return (0); } } void qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha ) { int ret ; struct qla_hw_data *ha ; scsi_qla_host_t *vp ; unsigned long flags ; int tmp ; raw_spinlock_t *tmp___0 ; struct list_head const *__mptr ; raw_spinlock_t *tmp___1 ; struct list_head const *__mptr___0 ; { ha = vha->hw; flags = 0UL; if ((unsigned int )vha->vp_idx != 0U) { return; } else { } tmp = list_empty((struct list_head const *)(& ha->vp_list)); if (tmp != 0) { return; } else { } clear_bit(14L, (unsigned long volatile *)(& vha->dpc_flags)); if (((int )ha->current_topology & 8) == 0) { return; } else { } tmp___0 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___0); __mptr = (struct list_head const *)ha->vp_list.next; vp = (scsi_qla_host_t *)__mptr; goto ldv_66075; ldv_66074: ; if ((unsigned int )vp->vp_idx != 0U) { atomic_inc(& vp->vref_count); spin_unlock_irqrestore(& ha->vport_slock, flags); ret = qla2x00_do_dpc_vp(vp); tmp___1 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___1); atomic_dec(& vp->vref_count); } else { } __mptr___0 = (struct list_head const *)vp->list.next; vp = (scsi_qla_host_t *)__mptr___0; ldv_66075: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_66074; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); return; } } int qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport ) { scsi_qla_host_t *base_vha ; void *tmp ; struct qla_hw_data *ha ; scsi_qla_host_t *vha ; uint8_t port_name[8U] ; int tmp___0 ; { tmp = shost_priv(fc_vport->shost); base_vha = (scsi_qla_host_t *)tmp; ha = base_vha->hw; if (fc_vport->roles != 2U) { return (-38); } else { } if (*((unsigned long *)ha + 2UL) == 0UL) { return (-38); } else { } if (((int )ha->switch_cap & 1024) == 0) { return (-95); } else { } u64_to_wwn(fc_vport->port_name, (u8 *)(& port_name)); tmp___0 = memcmp((void const *)(& port_name), (void const *)(& base_vha->port_name), 8UL); if (tmp___0 == 0) { return (-76); } else { } vha = qla24xx_find_vhost_by_name(ha, (uint8_t *)(& port_name)); if ((unsigned long )vha != (unsigned long )((scsi_qla_host_t *)0)) { return (-76); } else { } if ((int )ha->num_vhosts > (int )ha->max_npiv_vports) { ql_dbg(262144U, vha, 40964, "num_vhosts %ud is bigger than max_npiv_vports %ud.\n", (int )ha->num_vhosts, (int )ha->max_npiv_vports); return (-38); } else { } return (0); } } scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *fc_vport ) { scsi_qla_host_t *base_vha ; void *tmp ; struct qla_hw_data *ha ; scsi_qla_host_t *vha ; struct scsi_host_template *sht ; struct Scsi_Host *host ; struct scsi_qla_host *tmp___0 ; uint32_t tmp___1 ; { tmp = shost_priv(fc_vport->shost); base_vha = (scsi_qla_host_t *)tmp; ha = base_vha->hw; sht = & qla2xxx_driver_template; tmp___0 = qla2x00_create_host(sht, ha); vha = tmp___0; if ((unsigned long )vha == (unsigned long )((scsi_qla_host_t *)0)) { ql_log(1U, vha, 40965, "scsi_host_alloc() failed for vport.\n"); return ((scsi_qla_host_t *)0); } else { } host = vha->host; fc_vport->dd_data = (void *)vha; u64_to_wwn(fc_vport->node_name, (u8 *)(& vha->node_name)); u64_to_wwn(fc_vport->port_name, (u8 *)(& vha->port_name)); vha->fc_vport = fc_vport; vha->device_flags = 0U; tmp___1 = qla24xx_allocate_vp_id(vha); vha->vp_idx = (uint16_t )tmp___1; if ((int )vha->vp_idx > (int )ha->max_npiv_vports) { ql_dbg(262144U, vha, 40966, "Couldn\'t allocate vp_id.\n"); goto create_vhost_failed; } else { } vha->mgmt_svr_loop_id = (unsigned int )vha->vp_idx + 10U; vha->dpc_flags = 0UL; set_bit(4L, (unsigned long volatile *)(& vha->vp_flags)); atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); qla2x00_start_timer(vha, (void *)(& qla2x00_timer), 1UL); vha->req = base_vha->req; host->can_queue = (int )(base_vha->req)->length + 128; host->cmd_per_lun = 3; if ((ha->device_type & 33554432U) != 0U && ql2xenabledif != 0) { host->max_cmd_len = 32U; } else { host->max_cmd_len = 16U; } host->max_channel = 0U; host->max_lun = ql2xmaxlun; host->unique_id = host->host_no; host->max_id = (unsigned int )ha->max_fibre_devices; host->transportt = qla2xxx_transport_vport_template; ql_dbg(262144U, vha, 40967, "Detect vport hba %ld at address = %p.\n", vha->host_no, vha); vha->flags.init_done = 1U; mutex_lock_nested(& ha->vport_lock, 0U); set_bit((long )vha->vp_idx, (unsigned long volatile *)(& ha->vp_idx_map)); ha->cur_vport_count = ha->cur_vport_count + 1; mutex_unlock(& ha->vport_lock); return (vha); create_vhost_failed: ; return ((scsi_qla_host_t *)0); } } static void qla25xx_free_req_que(struct scsi_qla_host *vha , struct req_que *req ) { struct qla_hw_data *ha ; uint16_t que_id ; { ha = vha->hw; que_id = req->id; dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )req->length + 1) * 64UL, (void *)req->ring, req->dma, (struct dma_attrs *)0); req->ring = (request_t *)0; req->dma = 0ULL; if ((unsigned int )que_id != 0U) { *(ha->req_q_map + (unsigned long )que_id) = (struct req_que *)0; mutex_lock_nested(& ha->vport_lock, 0U); clear_bit((long )que_id, (unsigned long volatile *)(& ha->req_qid_map)); mutex_unlock(& ha->vport_lock); } else { } kfree((void const *)req->outstanding_cmds); kfree((void const *)req); req = (struct req_que *)0; return; } } static void qla25xx_free_rsp_que(struct scsi_qla_host *vha , struct rsp_que *rsp ) { struct qla_hw_data *ha ; uint16_t que_id ; { ha = vha->hw; que_id = rsp->id; if ((unsigned long )rsp->msix != (unsigned long )((struct qla_msix_entry *)0) && (rsp->msix)->have_irq != 0) { free_irq((rsp->msix)->vector, (void *)rsp); (rsp->msix)->have_irq = 0; (rsp->msix)->rsp = (struct rsp_que *)0; } else { } dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )rsp->length + 1) * 64UL, (void *)rsp->ring, rsp->dma, (struct dma_attrs *)0); rsp->ring = (response_t *)0; rsp->dma = 0ULL; if ((unsigned int )que_id != 0U) { *(ha->rsp_q_map + (unsigned long )que_id) = (struct rsp_que *)0; mutex_lock_nested(& ha->vport_lock, 0U); clear_bit((long )que_id, (unsigned long volatile *)(& ha->rsp_qid_map)); mutex_unlock(& ha->vport_lock); } else { } kfree((void const *)rsp); rsp = (struct rsp_que *)0; return; } } int qla25xx_delete_req_que(struct scsi_qla_host *vha , struct req_que *req ) { int ret ; { ret = -1; if ((unsigned long )req != (unsigned long )((struct req_que *)0)) { req->options = (uint16_t )((unsigned int )req->options | 1U); ret = qla25xx_init_req_que(vha, req); } else { } if (ret == 0) { qla25xx_free_req_que(vha, req); } else { } return (ret); } } static int qla25xx_delete_rsp_que(struct scsi_qla_host *vha , struct rsp_que *rsp ) { int ret ; { ret = -1; if ((unsigned long )rsp != (unsigned long )((struct rsp_que *)0)) { rsp->options = (uint16_t )((unsigned int )rsp->options | 1U); ret = qla25xx_init_rsp_que(vha, rsp); } else { } if (ret == 0) { qla25xx_free_rsp_que(vha, rsp); } else { } return (ret); } } int qla25xx_delete_queues(struct scsi_qla_host *vha ) { int cnt ; int ret ; struct req_que *req ; struct rsp_que *rsp ; struct qla_hw_data *ha ; { ret = 0; req = (struct req_que *)0; rsp = (struct rsp_que *)0; ha = vha->hw; cnt = 1; goto ldv_66124; ldv_66123: req = *(ha->req_q_map + (unsigned long )cnt); if ((unsigned long )req != (unsigned long )((struct req_que *)0)) { ret = qla25xx_delete_req_que(vha, req); if (ret != 0) { ql_log(1U, vha, 234, "Couldn\'t delete req que %d.\n", (int )req->id); return (ret); } else { } } else { } cnt = cnt + 1; ldv_66124: ; if ((int )ha->max_req_queues > cnt) { goto ldv_66123; } else { } cnt = 1; goto ldv_66127; ldv_66126: rsp = *(ha->rsp_q_map + (unsigned long )cnt); if ((unsigned long )rsp != (unsigned long )((struct rsp_que *)0)) { ret = qla25xx_delete_rsp_que(vha, rsp); if (ret != 0) { ql_log(1U, vha, 235, "Couldn\'t delete rsp que %d.\n", (int )rsp->id); return (ret); } else { } } else { } cnt = cnt + 1; ldv_66127: ; if ((int )ha->max_rsp_queues > cnt) { goto ldv_66126; } else { } return (ret); } } int qla25xx_create_req_que(struct qla_hw_data *ha , uint16_t options , uint8_t vp_idx , uint16_t rid , int rsp_que , uint8_t qos ) { int ret ; struct req_que *req ; struct scsi_qla_host *base_vha ; void *tmp ; uint16_t que_id ; device_reg_t *reg ; uint32_t cnt ; void *tmp___0 ; void *tmp___1 ; unsigned long tmp___2 ; { ret = 0; req = (struct req_que *)0; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; que_id = 0U; tmp___0 = kzalloc(192UL, 208U); req = (struct req_que *)tmp___0; if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { ql_log(0U, base_vha, 217, "Failed to allocate memory for request queue.\n"); goto failed; } else { } req->length = 2048U; tmp___1 = dma_alloc_attrs(& (ha->pdev)->dev, (unsigned long )((int )req->length + 1) * 64UL, & req->dma, 208U, (struct dma_attrs *)0); req->ring = (request_t *)tmp___1; if ((unsigned long )req->ring == (unsigned long )((request_t *)0)) { ql_log(0U, base_vha, 218, "Failed to allocate memory for request_ring.\n"); goto que_failed; } else { } ret = qla2x00_alloc_outstanding_cmds(ha, req); if (ret != 0) { goto que_failed; } else { } mutex_lock_nested(& ha->vport_lock, 0U); tmp___2 = find_first_zero_bit((unsigned long const *)(& ha->req_qid_map), (unsigned long )ha->max_req_queues); que_id = (uint16_t )tmp___2; if ((int )((unsigned short )ha->max_req_queues) <= (int )que_id) { mutex_unlock(& ha->vport_lock); ql_log(1U, base_vha, 219, "No resources to create additional request queue.\n"); goto que_failed; } else { } set_bit((long )que_id, (unsigned long volatile *)(& ha->req_qid_map)); *(ha->req_q_map + (unsigned long )que_id) = req; req->rid = rid; req->vp_idx = (uint16_t )vp_idx; req->qos = (uint16_t )qos; ql_dbg(1048576U, base_vha, 49154, "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", (int )que_id, (int )req->rid, (int )req->vp_idx, (int )req->qos); ql_dbg(1073741824U, base_vha, 220, "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", (int )que_id, (int )req->rid, (int )req->vp_idx, (int )req->qos); if (rsp_que < 0) { req->rsp = (struct rsp_que *)0; } else { req->rsp = *(ha->rsp_q_map + (unsigned long )rsp_que); } if ((unsigned int )((unsigned char )((int )req->rid >> 8)) != 0U) { options = (uint16_t )((unsigned int )options | 16U); } else { } if ((unsigned int )((unsigned char )req->rid) != 0U) { options = (uint16_t )((unsigned int )options | 32U); } else { } req->options = options; ql_dbg(1048576U, base_vha, 49155, "options=0x%x.\n", (int )req->options); ql_dbg(1073741824U, base_vha, 221, "options=0x%x.\n", (int )req->options); cnt = 1U; goto ldv_66146; ldv_66145: *(req->outstanding_cmds + (unsigned long )cnt) = (srb_t *)0; cnt = cnt + 1U; ldv_66146: ; if ((uint32_t )req->num_outstanding_cmds > cnt) { goto ldv_66145; } else { } req->current_outstanding_cmd = 1U; req->ring_ptr = req->ring; req->ring_index = 0U; req->cnt = req->length; req->id = que_id; reg = ((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) ? ha->mqiobase + (unsigned long )((int )que_id * 4096) : ha->iobase; req->req_q_in = & reg->isp25mq.req_q_in; req->req_q_out = & reg->isp25mq.req_q_out; req->max_q_depth = (*(ha->req_q_map))->max_q_depth; req->out_ptr = (uint16_t *)req->ring + (unsigned long )req->length; mutex_unlock(& ha->vport_lock); ql_dbg(1048576U, base_vha, 49156, "ring_ptr=%p ring_index=%d, cnt=%d id=%d max_q_depth=%d.\n", req->ring_ptr, (int )req->ring_index, (int )req->cnt, (int )req->id, req->max_q_depth); ql_dbg(1073741824U, base_vha, 222, "ring_ptr=%p ring_index=%d, cnt=%d id=%d max_q_depth=%d.\n", req->ring_ptr, (int )req->ring_index, (int )req->cnt, (int )req->id, req->max_q_depth); ret = qla25xx_init_req_que(base_vha, req); if (ret != 0) { ql_log(0U, base_vha, 223, "%s failed.\n", "qla25xx_create_req_que"); mutex_lock_nested(& ha->vport_lock, 0U); clear_bit((long )que_id, (unsigned long volatile *)(& ha->req_qid_map)); mutex_unlock(& ha->vport_lock); goto que_failed; } else { } return ((int )req->id); que_failed: qla25xx_free_req_que(base_vha, req); failed: ; return (0); } } static void qla_do_work(struct work_struct *work ) { unsigned long flags ; struct rsp_que *rsp ; struct work_struct const *__mptr ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; raw_spinlock_t *tmp ; void *tmp___0 ; { __mptr = (struct work_struct const *)work; rsp = (struct rsp_que *)__mptr + 0xffffffffffffff98UL; ha = rsp->hw; tmp = spinlock_check(& (rsp->hw)->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (struct scsi_qla_host *)tmp___0; qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(& (rsp->hw)->hardware_lock, flags); return; } } int qla25xx_create_rsp_que(struct qla_hw_data *ha , uint16_t options , uint8_t vp_idx , uint16_t rid , int req ) { int ret ; struct rsp_que *rsp ; struct scsi_qla_host *base_vha ; void *tmp ; uint16_t que_id ; device_reg_t *reg ; void *tmp___0 ; void *tmp___1 ; unsigned long tmp___2 ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; { ret = 0; rsp = (struct rsp_que *)0; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; que_id = 0U; tmp___0 = kzalloc(272UL, 208U); rsp = (struct rsp_que *)tmp___0; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(1U, base_vha, 102, "Failed to allocate memory for response queue.\n"); goto failed; } else { } rsp->length = 128U; tmp___1 = dma_alloc_attrs(& (ha->pdev)->dev, (unsigned long )((int )rsp->length + 1) * 64UL, & rsp->dma, 208U, (struct dma_attrs *)0); rsp->ring = (response_t *)tmp___1; if ((unsigned long )rsp->ring == (unsigned long )((response_t *)0)) { ql_log(1U, base_vha, 225, "Failed to allocate memory for response ring.\n"); goto que_failed; } else { } mutex_lock_nested(& ha->vport_lock, 0U); tmp___2 = find_first_zero_bit((unsigned long const *)(& ha->rsp_qid_map), (unsigned long )ha->max_rsp_queues); que_id = (uint16_t )tmp___2; if ((int )((unsigned short )ha->max_rsp_queues) <= (int )que_id) { mutex_unlock(& ha->vport_lock); ql_log(1U, base_vha, 226, "No resources to create additional request queue.\n"); goto que_failed; } else { } set_bit((long )que_id, (unsigned long volatile *)(& ha->rsp_qid_map)); if (*((unsigned long *)ha + 2UL) != 0UL) { rsp->msix = ha->msix_entries + ((unsigned long )que_id + 1UL); } else { ql_log(1U, base_vha, 227, "MSIX not enabled.\n"); } *(ha->rsp_q_map + (unsigned long )que_id) = rsp; rsp->rid = rid; rsp->vp_idx = (uint16_t )vp_idx; rsp->hw = ha; ql_dbg(1073741824U, base_vha, 228, "queue_id=%d rid=%d vp_idx=%d hw=%p.\n", (int )que_id, (int )rsp->rid, (int )rsp->vp_idx, rsp->hw); if ((unsigned int )((unsigned char )((int )rsp->rid >> 8)) != 0U) { options = (uint16_t )((unsigned int )options | 16U); } else { } if ((unsigned int )((unsigned char )rsp->rid) != 0U) { options = (uint16_t )((unsigned int )options | 32U); } else { } if (((ha->device_type & 8192U) == 0U && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) && ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U)) { options = (uint16_t )((unsigned int )options | 64U); } else { } rsp->options = options; rsp->id = que_id; reg = ((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U) ? ha->mqiobase + (unsigned long )((int )que_id * 4096) : ha->iobase; rsp->rsp_q_in = & reg->isp25mq.rsp_q_in; rsp->rsp_q_out = & reg->isp25mq.rsp_q_out; rsp->in_ptr = (uint16_t *)rsp->ring + (unsigned long )rsp->length; mutex_unlock(& ha->vport_lock); ql_dbg(1048576U, base_vha, 49163, "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", (int )rsp->options, (int )rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ql_dbg(1073741824U, base_vha, 229, "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", (int )rsp->options, (int )rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ret = qla25xx_request_irq(rsp); if (ret != 0) { goto que_failed; } else { } ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != 0) { ql_log(0U, base_vha, 231, "%s failed.\n", "qla25xx_create_rsp_que"); mutex_lock_nested(& ha->vport_lock, 0U); clear_bit((long )que_id, (unsigned long volatile *)(& ha->rsp_qid_map)); mutex_unlock(& ha->vport_lock); goto que_failed; } else { } if (req >= 0) { rsp->req = *(ha->req_q_map + (unsigned long )req); } else { rsp->req = (struct req_que *)0; } qla2x00_init_response_q_entries(rsp); if ((unsigned long )(rsp->hw)->wq != (unsigned long )((struct workqueue_struct *)0)) { __init_work(& rsp->q_work, 0); __constr_expr_0.counter = 137438953408L; rsp->q_work.data = __constr_expr_0; lockdep_init_map(& rsp->q_work.lockdep_map, "(&rsp->q_work)", & __key, 0); INIT_LIST_HEAD(& rsp->q_work.entry); rsp->q_work.func = & qla_do_work; } else { } return ((int )rsp->id); que_failed: qla25xx_free_rsp_que(base_vha, rsp); failed: ; return (0); } } void activate_work_6(struct work_struct *work , int state ) { { if (ldv_work_6_0 == 0) { ldv_work_struct_6_0 = work; ldv_work_6_0 = state; return; } else { } if (ldv_work_6_1 == 0) { ldv_work_struct_6_1 = work; ldv_work_6_1 = state; return; } else { } if (ldv_work_6_2 == 0) { ldv_work_struct_6_2 = work; ldv_work_6_2 = state; return; } else { } if (ldv_work_6_3 == 0) { ldv_work_struct_6_3 = work; ldv_work_6_3 = state; return; } else { } return; } } void activate_pending_timer_23(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_23 == (unsigned long )timer) { if (ldv_timer_state_23 == 2 || pending_flag != 0) { ldv_timer_list_23 = timer; ldv_timer_list_23->data = data; ldv_timer_state_23 = 1; } else { } return; } else { } reg_timer_23(timer); ldv_timer_list_23->data = data; return; } } void choose_timer_23(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_23 = 2; return; } } void invoke_work_6(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_6_0 == 2 || ldv_work_6_0 == 3) { ldv_work_6_0 = 4; qla_do_work(ldv_work_struct_6_0); ldv_work_6_0 = 1; } else { } goto ldv_66196; case 1: ; if (ldv_work_6_1 == 2 || ldv_work_6_1 == 3) { ldv_work_6_1 = 4; qla_do_work(ldv_work_struct_6_0); ldv_work_6_1 = 1; } else { } goto ldv_66196; case 2: ; if (ldv_work_6_2 == 2 || ldv_work_6_2 == 3) { ldv_work_6_2 = 4; qla_do_work(ldv_work_struct_6_0); ldv_work_6_2 = 1; } else { } goto ldv_66196; case 3: ; if (ldv_work_6_3 == 2 || ldv_work_6_3 == 3) { ldv_work_6_3 = 4; qla_do_work(ldv_work_struct_6_0); ldv_work_6_3 = 1; } else { } goto ldv_66196; default: ldv_stop(); } ldv_66196: ; return; } } int reg_timer_23(struct timer_list *timer ) { { ldv_timer_list_23 = timer; ldv_timer_state_23 = 1; return (0); } } void call_and_disable_all_6(int state ) { { if (ldv_work_6_0 == state) { call_and_disable_work_6(ldv_work_struct_6_0); } else { } if (ldv_work_6_1 == state) { call_and_disable_work_6(ldv_work_struct_6_1); } else { } if (ldv_work_6_2 == state) { call_and_disable_work_6(ldv_work_struct_6_2); } else { } if (ldv_work_6_3 == state) { call_and_disable_work_6(ldv_work_struct_6_3); } else { } return; } } void disable_suitable_timer_23(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_23) { ldv_timer_state_23 = 0; return; } else { } return; } } void disable_work_6(struct work_struct *work ) { { if ((ldv_work_6_0 == 3 || ldv_work_6_0 == 2) && (unsigned long )ldv_work_struct_6_0 == (unsigned long )work) { ldv_work_6_0 = 1; } else { } if ((ldv_work_6_1 == 3 || ldv_work_6_1 == 2) && (unsigned long )ldv_work_struct_6_1 == (unsigned long )work) { ldv_work_6_1 = 1; } else { } if ((ldv_work_6_2 == 3 || ldv_work_6_2 == 2) && (unsigned long )ldv_work_struct_6_2 == (unsigned long )work) { ldv_work_6_2 = 1; } else { } if ((ldv_work_6_3 == 3 || ldv_work_6_3 == 2) && (unsigned long )ldv_work_struct_6_3 == (unsigned long )work) { ldv_work_6_3 = 1; } else { } return; } } void work_init_6(void) { { ldv_work_6_0 = 0; ldv_work_6_1 = 0; ldv_work_6_2 = 0; ldv_work_6_3 = 0; return; } } void call_and_disable_work_6(struct work_struct *work ) { { if ((ldv_work_6_0 == 2 || ldv_work_6_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_6_0) { qla_do_work(work); ldv_work_6_0 = 1; return; } else { } if ((ldv_work_6_1 == 2 || ldv_work_6_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_6_1) { qla_do_work(work); ldv_work_6_1 = 1; return; } else { } if ((ldv_work_6_2 == 2 || ldv_work_6_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_6_2) { qla_do_work(work); ldv_work_6_2 = 1; return; } else { } if ((ldv_work_6_3 == 2 || ldv_work_6_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_6_3) { qla_do_work(work); ldv_work_6_3 = 1; return; } else { } return; } } bool ldv_queue_work_on_197(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_198(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_199(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_200(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_201(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_202(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } int ldv_del_timer_sync_203(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___4 ldv_func_res ; int tmp ; { tmp = del_timer_sync(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_21(ldv_func_arg1); return (ldv_func_res); } } bool ldv_queue_work_on_215(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_217(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_216(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_219(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_218(struct workqueue_struct *ldv_func_arg1 ) ; void disable_suitable_timer_24(struct timer_list *timer ) ; void choose_timer_24(struct timer_list *timer ) ; int reg_timer_24(struct timer_list *timer ) ; void activate_pending_timer_24(struct timer_list *timer , unsigned long data , int pending_flag ) ; extern ssize_t seq_read(struct file * , char * , size_t , loff_t * ) ; extern loff_t seq_lseek(struct file * , loff_t , int ) ; extern int seq_putc(struct seq_file * , char ) ; extern int seq_puts(struct seq_file * , char const * ) ; extern int seq_printf(struct seq_file * , char const * , ...) ; extern int single_open(struct file * , int (*)(struct seq_file * , void * ) , void * ) ; extern int single_release(struct inode * , struct file * ) ; int ldv_scsi_add_host_with_dma_220(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; extern struct dentry *debugfs_create_file(char const * , umode_t , struct dentry * , void * , struct file_operations const * ) ; extern struct dentry *debugfs_create_dir(char const * , struct dentry * ) ; extern void debugfs_remove(struct dentry * ) ; static struct dentry *qla2x00_dfs_root ; static atomic_t qla2x00_dfs_root_count ; static int qla2x00_dfs_fce_show(struct seq_file *s , void *unused ) { scsi_qla_host_t *vha ; uint32_t cnt ; uint32_t *fce ; uint64_t fce_start ; struct qla_hw_data *ha ; uint32_t *tmp ; { vha = (scsi_qla_host_t *)s->private; ha = vha->hw; mutex_lock_nested(& ha->fce_mutex, 0U); seq_puts(s, "FCE Trace Buffer\n"); seq_printf(s, "In Pointer = %llx\n\n", ha->fce_wr); seq_printf(s, "Base = %llx\n\n", ha->fce_dma); seq_puts(s, "FCE Enable Registers\n"); seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", (int )ha->fce_mb[0], (int )ha->fce_mb[2], (int )ha->fce_mb[3], (int )ha->fce_mb[4], (int )ha->fce_mb[5], (int )ha->fce_mb[6]); fce = (uint32_t *)ha->fce; fce_start = ha->fce_dma; cnt = 0U; goto ldv_65896; ldv_65895: ; if ((cnt & 7U) == 0U) { seq_printf(s, "\n%llx: ", (uint64_t )(cnt * 4U) + fce_start); } else { seq_putc(s, 32); } tmp = fce; fce = fce + 1; seq_printf(s, "%08x", *tmp); cnt = cnt + 1U; ldv_65896: ; if ((ha->fce_bufs * 1024U) / 4U > cnt) { goto ldv_65895; } else { } seq_puts(s, "\nEnd\n"); mutex_unlock(& ha->fce_mutex); return (0); } } static int qla2x00_dfs_fce_open(struct inode *inode , struct file *file ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; int rval ; int tmp ; { vha = (scsi_qla_host_t *)inode->i_private; ha = vha->hw; if (*((unsigned long *)ha + 2UL) == 0UL) { goto out; } else { } mutex_lock_nested(& ha->fce_mutex, 0U); rval = qla2x00_disable_fce_trace(vha, & ha->fce_wr, & ha->fce_rd); if (rval != 0) { ql_dbg(8388608U, vha, 28764, "DebugFS: Unable to disable FCE (%d).\n", rval); } else { } ha->flags.fce_enabled = 0U; mutex_unlock(& ha->fce_mutex); out: tmp = single_open(file, & qla2x00_dfs_fce_show, (void *)vha); return (tmp); } } static int qla2x00_dfs_fce_release(struct inode *inode , struct file *file ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; int rval ; int tmp ; { vha = (scsi_qla_host_t *)inode->i_private; ha = vha->hw; if (*((unsigned long *)ha + 2UL) != 0UL) { goto out; } else { } mutex_lock_nested(& ha->fce_mutex, 0U); ha->flags.fce_enabled = 1U; memset(ha->fce, 0, (size_t )(ha->fce_bufs * 1024U)); rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, (int )((uint16_t )ha->fce_bufs), (uint16_t *)(& ha->fce_mb), & ha->fce_bufs); if (rval != 0) { ql_dbg(8388608U, vha, 28685, "DebugFS: Unable to reinitialize FCE (%d).\n", rval); ha->flags.fce_enabled = 0U; } else { } mutex_unlock(& ha->fce_mutex); out: tmp = single_release(inode, file); return (tmp); } } static struct file_operations const dfs_fce_ops = {0, & seq_lseek, & seq_read, 0, 0, 0, 0, 0, 0, 0, 0, 0, & qla2x00_dfs_fce_open, 0, & qla2x00_dfs_fce_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; int qla2x00_dfs_setup(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct lock_class_key __key ; { ha = vha->hw; if ((((ha->device_type & 2048U) == 0U && (ha->device_type & 8192U) == 0U) && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) && ((ha->device_type & 524288U) == 0U && (ha->device_type & 1048576U) == 0U)) { goto out; } else { } if ((unsigned long )ha->fce == (unsigned long )((void *)0)) { goto out; } else { } if ((unsigned long )qla2x00_dfs_root != (unsigned long )((struct dentry *)0)) { goto create_dir; } else { } atomic_set(& qla2x00_dfs_root_count, 0); qla2x00_dfs_root = debugfs_create_dir("qla2xxx", (struct dentry *)0); if ((unsigned long )qla2x00_dfs_root == (unsigned long )((struct dentry *)0)) { ql_log(1U, vha, 247, "Unable to create debugfs root directory.\n"); goto out; } else { } create_dir: ; if ((unsigned long )ha->dfs_dir != (unsigned long )((struct dentry *)0)) { goto create_nodes; } else { } __mutex_init(& ha->fce_mutex, "&ha->fce_mutex", & __key); ha->dfs_dir = debugfs_create_dir((char const *)(& vha->host_str), qla2x00_dfs_root); if ((unsigned long )ha->dfs_dir == (unsigned long )((struct dentry *)0)) { ql_log(1U, vha, 248, "Unable to create debugfs ha directory.\n"); goto out; } else { } atomic_inc(& qla2x00_dfs_root_count); create_nodes: ha->dfs_fce = debugfs_create_file("fce", 256, ha->dfs_dir, (void *)vha, & dfs_fce_ops); if ((unsigned long )ha->dfs_fce == (unsigned long )((struct dentry *)0)) { ql_log(1U, vha, 249, "Unable to create debugfs fce node.\n"); goto out; } else { } out: ; return (0); } } int qla2x00_dfs_remove(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; if ((unsigned long )ha->dfs_fce != (unsigned long )((struct dentry *)0)) { debugfs_remove(ha->dfs_fce); ha->dfs_fce = (struct dentry *)0; } else { } if ((unsigned long )ha->dfs_dir != (unsigned long )((struct dentry *)0)) { debugfs_remove(ha->dfs_dir); ha->dfs_dir = (struct dentry *)0; atomic_dec(& qla2x00_dfs_root_count); } else { } tmp = atomic_read((atomic_t const *)(& qla2x00_dfs_root_count)); if (tmp == 0 && (unsigned long )qla2x00_dfs_root != (unsigned long )((struct dentry *)0)) { debugfs_remove(qla2x00_dfs_root); qla2x00_dfs_root = (struct dentry *)0; } else { } return (0); } } int ldv_retval_0 ; void disable_suitable_timer_24(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_24) { ldv_timer_state_24 = 0; return; } else { } return; } } void ldv_file_operations_31(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); dfs_fce_ops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); dfs_fce_ops_group2 = (struct file *)tmp___0; return; } } void choose_timer_24(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_24 = 2; return; } } int reg_timer_24(struct timer_list *timer ) { { ldv_timer_list_24 = timer; ldv_timer_state_24 = 1; return (0); } } void activate_pending_timer_24(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_24 == (unsigned long )timer) { if (ldv_timer_state_24 == 2 || pending_flag != 0) { ldv_timer_list_24 = timer; ldv_timer_list_24->data = data; ldv_timer_state_24 = 1; } else { } return; } else { } reg_timer_24(timer); ldv_timer_list_24->data = data; return; } } void ldv_main_exported_31(void) { char *ldvarg115 ; void *tmp ; size_t ldvarg114 ; loff_t *ldvarg113 ; void *tmp___0 ; loff_t ldvarg112 ; int ldvarg111 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg115 = (char *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg113 = (loff_t *)tmp___0; ldv_memset((void *)(& ldvarg114), 0, 8UL); ldv_memset((void *)(& ldvarg112), 0, 8UL); ldv_memset((void *)(& ldvarg111), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_31 == 2) { qla2x00_dfs_fce_release(dfs_fce_ops_group1, dfs_fce_ops_group2); ldv_state_variable_31 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_65954; case 1: ; if (ldv_state_variable_31 == 1) { ldv_retval_0 = qla2x00_dfs_fce_open(dfs_fce_ops_group1, dfs_fce_ops_group2); if (ldv_retval_0 == 0) { ldv_state_variable_31 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_65954; case 2: ; if (ldv_state_variable_31 == 2) { seq_read(dfs_fce_ops_group2, ldvarg115, ldvarg114, ldvarg113); ldv_state_variable_31 = 2; } else { } goto ldv_65954; case 3: ; if (ldv_state_variable_31 == 2) { seq_lseek(dfs_fce_ops_group2, ldvarg112, ldvarg111); ldv_state_variable_31 = 2; } else { } goto ldv_65954; default: ldv_stop(); } ldv_65954: ; return; } } bool ldv_queue_work_on_215(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_216(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_217(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_218(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_219(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_220(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_231(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_233(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_232(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_235(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_234(struct workqueue_struct *ldv_func_arg1 ) ; int reg_timer_25(struct timer_list *timer ) ; void activate_pending_timer_25(struct timer_list *timer , unsigned long data , int pending_flag ) ; void choose_timer_25(struct timer_list *timer ) ; void disable_suitable_timer_25(struct timer_list *timer ) ; extern size_t sg_copy_from_buffer(struct scatterlist * , unsigned int , void const * , size_t ) ; extern size_t sg_copy_to_buffer(struct scatterlist * , unsigned int , void * , size_t ) ; __inline static void dma_unmap_sg_attrs___0(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (70), "i" (12UL)); ldv_26981: ; goto ldv_26981; } else { } debug_dma_unmap_sg(dev, sg, nents, (int )dir); if ((unsigned long )ops->unmap_sg != (unsigned long )((void (*)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_sg))(dev, sg, nents, dir, attrs); } else { } return; } } int ldv_scsi_add_host_with_dma_236(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; void qla2x00_bsg_job_done(void *data , void *ptr , int res ) ; void qla2x00_bsg_sp_free(void *data , void *ptr ) ; void qla2x00_bsg_job_done(void *data , void *ptr , int res ) { srb_t *sp ; struct scsi_qla_host *vha ; struct fc_bsg_job *bsg_job ; { sp = (srb_t *)ptr; vha = (struct scsi_qla_host *)data; bsg_job = sp->u.bsg_job; (bsg_job->reply)->result = (uint32_t )res; (*(bsg_job->job_done))(bsg_job); (*(sp->free))((void *)vha, (void *)sp); return; } } void qla2x00_bsg_sp_free(void *data , void *ptr ) { srb_t *sp ; struct scsi_qla_host *vha ; struct fc_bsg_job *bsg_job ; struct qla_hw_data *ha ; struct qla_mt_iocb_rqst_fx00 *piocb_rqst ; { sp = (srb_t *)ptr; vha = (sp->fcport)->vha; bsg_job = sp->u.bsg_job; ha = vha->hw; if ((unsigned int )sp->type == 11U) { piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)(& (bsg_job->request)->rqst_data.h_vendor.vendor_cmd) + 1U; if ((int )piocb_rqst->flags & 1) { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); } else { } if (((int )piocb_rqst->flags & 2) != 0) { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); } else { } } else { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); } if (((unsigned int )sp->type == 5U || (unsigned int )sp->type == 11U) || (unsigned int )sp->type == 4U) { kfree((void const *)sp->fcport); } else { } qla2x00_rel_sp(vha, sp); return; } } int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha , struct qla_fcp_prio_cfg *pri_cfg , uint8_t flag ) { int i ; int ret ; int num_valid ; uint8_t *bcode ; struct qla_fcp_prio_entry *pri_entry ; uint32_t *bcode_val_ptr ; uint32_t bcode_val ; { ret = 1; num_valid = 0; bcode = (uint8_t *)pri_cfg; bcode_val_ptr = (uint32_t *)pri_cfg; bcode_val = *bcode_val_ptr; if (bcode_val == 4294967295U) { ql_dbg(8388608U, vha, 28753, "No FCP Priority config data.\n"); return (0); } else { } if ((((unsigned int )*bcode != 72U || (unsigned int )*(bcode + 1UL) != 81U) || (unsigned int )*(bcode + 2UL) != 79U) || (unsigned int )*(bcode + 3UL) != 83U) { ql_dbg(8388608U, vha, 28754, "Invalid FCP Priority data header. bcode=0x%x.\n", bcode_val); return (0); } else { } if ((unsigned int )flag != 1U) { return (ret); } else { } pri_entry = (struct qla_fcp_prio_entry *)(& pri_cfg->entry); i = 0; goto ldv_65996; ldv_65995: ; if (((int )pri_entry->flags & 2) != 0) { num_valid = num_valid + 1; } else { } pri_entry = pri_entry + 1; i = i + 1; ldv_65996: ; if ((int )pri_cfg->num_entries > i) { goto ldv_65995; } else { } if (num_valid == 0) { ql_dbg(8388608U, vha, 28755, "No valid FCP Priority data entries.\n"); ret = 0; } else { ql_dbg(8388608U, vha, 28756, "Valid FCP priority data. num entries = %d.\n", num_valid); } return (ret); } } static int qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int ret ; uint32_t len ; uint32_t oper ; size_t tmp___0 ; void *tmp___1 ; int tmp___2 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; ret = 0; if ((((((ha->device_type & 128U) == 0U && (ha->device_type & 256U) == 0U) && ((ha->device_type & 512U) == 0U && (ha->device_type & 1024U) == 0U)) && (ha->device_type & 4096U) == 0U) && (ha->device_type & 2048U) == 0U) && ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U)) { ret = -22; goto exit_fcp_prio_cfg; } else { } oper = (bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; if ((unsigned long )ha->fcp_prio_cfg == (unsigned long )((struct qla_fcp_prio_cfg *)0) && oper != 3U) { ret = -22; goto exit_fcp_prio_cfg; } else { } switch (oper) { case 0U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { ha->flags.fcp_prio_enabled = 0U; (ha->fcp_prio_cfg)->attributes = (unsigned int )(ha->fcp_prio_cfg)->attributes & 254U; qla24xx_update_all_fcp_prio(vha); (bsg_job->reply)->result = 0U; } else { ret = -22; (bsg_job->reply)->result = 458752U; goto exit_fcp_prio_cfg; } goto ldv_66009; case 1U: ; if (*((unsigned long *)ha + 2UL) == 0UL) { if ((unsigned long )ha->fcp_prio_cfg != (unsigned long )((struct qla_fcp_prio_cfg *)0)) { ha->flags.fcp_prio_enabled = 1U; (ha->fcp_prio_cfg)->attributes = (uint8_t )((unsigned int )(ha->fcp_prio_cfg)->attributes | 1U); qla24xx_update_all_fcp_prio(vha); (bsg_job->reply)->result = 0U; } else { ret = -22; (bsg_job->reply)->result = 458752U; goto exit_fcp_prio_cfg; } } else { } goto ldv_66009; case 2U: len = bsg_job->reply_payload.payload_len; if (len == 0U || len > 32768U) { ret = -22; (bsg_job->reply)->result = 458752U; goto exit_fcp_prio_cfg; } else { } (bsg_job->reply)->result = 0U; tmp___0 = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void const *)ha->fcp_prio_cfg, (size_t )len); (bsg_job->reply)->reply_payload_rcv_len = (uint32_t )tmp___0; goto ldv_66009; case 3U: len = bsg_job->request_payload.payload_len; if (len == 0U || len > 32768U) { (bsg_job->reply)->result = 458752U; ret = -22; goto exit_fcp_prio_cfg; } else { } if ((unsigned long )ha->fcp_prio_cfg == (unsigned long )((struct qla_fcp_prio_cfg *)0)) { tmp___1 = vmalloc(32768UL); ha->fcp_prio_cfg = (struct qla_fcp_prio_cfg *)tmp___1; if ((unsigned long )ha->fcp_prio_cfg == (unsigned long )((struct qla_fcp_prio_cfg *)0)) { ql_log(1U, vha, 28752, "Unable to allocate memory for fcp prio config data (%x).\n", 32768); (bsg_job->reply)->result = 458752U; ret = -12; goto exit_fcp_prio_cfg; } else { } } else { } memset((void *)ha->fcp_prio_cfg, 0, 32768UL); sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)ha->fcp_prio_cfg, 32768UL); tmp___2 = qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1); if (tmp___2 == 0) { (bsg_job->reply)->result = 458752U; ret = -22; vfree((void const *)ha->fcp_prio_cfg); ha->fcp_prio_cfg = (struct qla_fcp_prio_cfg *)0; goto exit_fcp_prio_cfg; } else { } ha->flags.fcp_prio_enabled = 0U; if ((int )(ha->fcp_prio_cfg)->attributes & 1) { ha->flags.fcp_prio_enabled = 1U; } else { } qla24xx_update_all_fcp_prio(vha); (bsg_job->reply)->result = 0U; goto ldv_66009; default: ret = -22; goto ldv_66009; } ldv_66009: ; exit_fcp_prio_cfg: ; if (ret == 0) { (*(bsg_job->job_done))(bsg_job); } else { } return (ret); } } static int qla2x00_process_els(struct fc_bsg_job *bsg_job ) { struct fc_rport *rport ; fc_port_t *fcport ; struct Scsi_Host *host ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; srb_t *sp ; char const *type ; int req_sg_cnt ; int rsp_sg_cnt ; int rval ; uint16_t nextlid ; void *tmp ; void *tmp___0 ; int tmp___1 ; { fcport = (fc_port_t *)0; rval = 262144; nextlid = 0U; if ((bsg_job->request)->msgcode == 1073741825U) { rport = bsg_job->rport; fcport = *((fc_port_t **)rport->dd_data); host = dev_to_shost(rport->dev.parent); tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; type = "FC_BSG_RPT_ELS"; } else { host = bsg_job->shost; tmp___0 = shost_priv(host); vha = (scsi_qla_host_t *)tmp___0; ha = vha->hw; type = "FC_BSG_HST_ELS_NOLOGIN"; } if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(1U, vha, 28677, "Host not online.\n"); rval = -5; goto done; } else { } if ((ha->device_type & 134217728U) == 0U) { ql_dbg(8388608U, vha, 28673, "ELS passthru not supported for ISP23xx based adapters.\n"); rval = -1; goto done; } else { } if (bsg_job->request_payload.sg_cnt > 1 || bsg_job->reply_payload.sg_cnt > 1) { ql_dbg(8388608U, vha, 28674, "Multiple SG\'s are not suppored for ELS requests, request_sg_cnt=%x reply_sg_cnt=%x.\n", bsg_job->request_payload.sg_cnt, bsg_job->reply_payload.sg_cnt); rval = -1; goto done; } else { } if ((bsg_job->request)->msgcode == 1073741825U) { tmp___1 = qla2x00_fabric_login(vha, fcport, & nextlid); if (tmp___1 != 0) { ql_dbg(8388608U, vha, 28675, "Failed to login port %06X for ELS passthru.\n", (int )fcport->d_id.b24); rval = -5; goto done; } else { } } else { fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { rval = -12; goto done; } else { } fcport->vha = vha; fcport->d_id.b.al_pa = (bsg_job->request)->rqst_data.h_els.port_id[0]; fcport->d_id.b.area = (bsg_job->request)->rqst_data.h_els.port_id[1]; fcport->d_id.b.domain = (bsg_job->request)->rqst_data.h_els.port_id[2]; fcport->loop_id = (unsigned int )fcport->d_id.b.al_pa == 253U ? 2045U : 2046U; } req_sg_cnt = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); if (req_sg_cnt == 0) { rval = -12; goto done_free_fcport; } else { } rsp_sg_cnt = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); if (rsp_sg_cnt == 0) { rval = -12; goto done_free_fcport; } else { } if (bsg_job->request_payload.sg_cnt != req_sg_cnt || bsg_job->reply_payload.sg_cnt != rsp_sg_cnt) { ql_log(1U, vha, 28680, "dma mapping resulted in different sg counts, request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); rval = -11; goto done_unmap_sg; } else { } sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { rval = -12; goto done_unmap_sg; } else { } sp->type = (bsg_job->request)->msgcode == 1073741825U ? 3U : 4U; sp->name = (bsg_job->request)->msgcode == 1073741825U ? (char *)"bsg_els_rpt" : (char *)"bsg_els_hst"; sp->u.bsg_job = bsg_job; sp->free = & qla2x00_bsg_sp_free; sp->done = & qla2x00_bsg_job_done; ql_dbg(8388608U, vha, 28682, "bsg rqst type: %s els type: %x - loop-id=%x portid=%-2x%02x%02x.\n", type, (int )(bsg_job->request)->rqst_data.h_els.command_code, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); rval = qla2x00_start_sp(sp); if (rval != 0) { ql_log(1U, vha, 28686, "qla2x00_start_sp failed = %d\n", rval); qla2x00_rel_sp(vha, sp); rval = -5; goto done_unmap_sg; } else { } return (rval); done_unmap_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); goto done_free_fcport; done_free_fcport: ; if ((bsg_job->request)->msgcode == 1073741825U) { kfree((void const *)fcport); } else { } done: ; return (rval); } } __inline uint16_t qla24xx_calc_ct_iocbs(uint16_t dsds ) { uint16_t iocbs ; { iocbs = 1U; if ((unsigned int )dsds > 2U) { iocbs = (int )((uint16_t )(((int )dsds + -2) / 5)) + (int )iocbs; if (((int )dsds + -2) % 5 != 0) { iocbs = (uint16_t )((int )iocbs + 1); } else { } } else { } return (iocbs); } } static int qla2x00_process_ct(struct fc_bsg_job *bsg_job ) { srb_t *sp ; struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; int req_sg_cnt ; int rsp_sg_cnt ; uint16_t loop_id ; struct fc_port *fcport ; char *type ; fc_port_t *tmp___0 ; uint16_t tmp___1 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 262144; type = (char *)"FC_BSG_HST_CT"; req_sg_cnt = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); if (req_sg_cnt == 0) { ql_log(1U, vha, 28687, "dma_map_sg return %d for request\n", req_sg_cnt); rval = -12; goto done; } else { } rsp_sg_cnt = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); if (rsp_sg_cnt == 0) { ql_log(1U, vha, 28688, "dma_map_sg return %d for reply\n", rsp_sg_cnt); rval = -12; goto done; } else { } if (bsg_job->request_payload.sg_cnt != req_sg_cnt || bsg_job->reply_payload.sg_cnt != rsp_sg_cnt) { ql_log(1U, vha, 28689, "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); rval = -11; goto done_unmap_sg; } else { } if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(1U, vha, 28690, "Host is not online.\n"); rval = -5; goto done_unmap_sg; } else { } loop_id = (uint16_t )((bsg_job->request)->rqst_data.h_ct.preamble_word1 >> 24); switch ((int )loop_id) { case 252: loop_id = 2044U; goto ldv_66051; case 250: loop_id = vha->mgmt_svr_loop_id; goto ldv_66051; default: ql_dbg(8388608U, vha, 28691, "Unknown loop id: %x.\n", (int )loop_id); rval = -22; goto done_unmap_sg; } ldv_66051: tmp___0 = qla2x00_alloc_fcport(vha, 208U); fcport = tmp___0; if ((unsigned long )fcport == (unsigned long )((struct fc_port *)0)) { ql_log(1U, vha, 28692, "Failed to allocate fcport.\n"); rval = -12; goto done_unmap_sg; } else { } fcport->vha = vha; fcport->d_id.b.al_pa = (bsg_job->request)->rqst_data.h_ct.port_id[0]; fcport->d_id.b.area = (bsg_job->request)->rqst_data.h_ct.port_id[1]; fcport->d_id.b.domain = (bsg_job->request)->rqst_data.h_ct.port_id[2]; fcport->loop_id = loop_id; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_log(1U, vha, 28693, "qla2x00_get_sp failed.\n"); rval = -12; goto done_free_fcport; } else { } sp->type = 5U; sp->name = (char *)"bsg_ct"; tmp___1 = qla24xx_calc_ct_iocbs((int )((uint16_t )req_sg_cnt) + (int )((uint16_t )rsp_sg_cnt)); sp->iocbs = (int )tmp___1; sp->u.bsg_job = bsg_job; sp->free = & qla2x00_bsg_sp_free; sp->done = & qla2x00_bsg_job_done; ql_dbg(8388608U, vha, 28694, "bsg rqst type: %s else type: %x - loop-id=%x portid=%02x%02x%02x.\n", type, (bsg_job->request)->rqst_data.h_ct.preamble_word2 >> 16, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); rval = qla2x00_start_sp(sp); if (rval != 0) { ql_log(1U, vha, 28695, "qla2x00_start_sp failed=%d.\n", rval); qla2x00_rel_sp(vha, sp); rval = -5; goto done_free_fcport; } else { } return (rval); done_free_fcport: kfree((void const *)fcport); done_unmap_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); done: ; return (rval); } } __inline static int qla81xx_reset_loopback_mode(scsi_qla_host_t *vha , uint16_t *config , int wait , int wait2 ) { int ret ; int rval ; uint16_t new_config[4U] ; struct qla_hw_data *ha ; unsigned long tmp ; unsigned long tmp___0 ; { ret = 0; rval = 0; ha = vha->hw; if (((ha->device_type & 8192U) == 0U && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U) { goto done_reset_internal; } else { } memset((void *)(& new_config), 0, 8UL); if (((int )*config & 14) >> 1 == 2 || ((int )*config & 14) >> 1 == 4) { new_config[0] = (unsigned int )*config & 65521U; ql_dbg(8388608U, vha, 28863, "new_config[0]=%02x\n", (int )new_config[0] & 14); memcpy((void *)(& new_config) + 1U, (void const *)config + 1U, 6UL); ha->notify_dcbx_comp = wait; ha->notify_lb_portup_comp = wait2; ret = qla81xx_set_port_config(vha, (uint16_t *)(& new_config)); if (ret != 0) { ql_log(1U, vha, 28709, "Set port config failed.\n"); ha->notify_dcbx_comp = 0; ha->notify_lb_portup_comp = 0; rval = -22; goto done_reset_internal; } else { } if (wait != 0) { tmp = wait_for_completion_timeout(& ha->dcbx_comp, 5000UL); if (tmp == 0UL) { ql_dbg(8388608U, vha, 28710, "DCBX completion not received.\n"); ha->notify_dcbx_comp = 0; ha->notify_lb_portup_comp = 0; rval = -22; goto done_reset_internal; } else { ql_dbg(8388608U, vha, 28711, "DCBX completion received.\n"); } } else { ql_dbg(8388608U, vha, 28711, "DCBX completion received.\n"); } if (wait2 != 0) { tmp___0 = wait_for_completion_timeout(& ha->lb_portup_comp, 2500UL); if (tmp___0 == 0UL) { ql_dbg(8388608U, vha, 28869, "Port up completion not received.\n"); ha->notify_lb_portup_comp = 0; rval = -22; goto done_reset_internal; } else { ql_dbg(8388608U, vha, 28870, "Port up completion received.\n"); } } else { ql_dbg(8388608U, vha, 28870, "Port up completion received.\n"); } ha->notify_dcbx_comp = 0; ha->notify_lb_portup_comp = 0; } else { } done_reset_internal: ; return (rval); } } __inline static int qla81xx_set_loopback_mode(scsi_qla_host_t *vha , uint16_t *config , uint16_t *new_config , uint16_t mode ) { int ret ; int rval ; unsigned long rem_tmo ; unsigned long current_tmo ; struct qla_hw_data *ha ; { ret = 0; rval = 0; rem_tmo = 0UL; current_tmo = 0UL; ha = vha->hw; if (((ha->device_type & 8192U) == 0U && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U) { goto done_set_internal; } else { } if ((unsigned int )mode == 241U) { *new_config = (uint16_t )((unsigned int )*config | 4U); } else if ((unsigned int )mode == 242U) { *new_config = (uint16_t )((unsigned int )*config | 8U); } else { } ql_dbg(8388608U, vha, 28862, "new_config[0]=%02x\n", (int )*new_config & 14); memcpy((void *)new_config + 1U, (void const *)config + 1U, 6UL); ha->notify_dcbx_comp = 1; ret = qla81xx_set_port_config(vha, new_config); if (ret != 0) { ql_log(1U, vha, 28705, "set port config failed.\n"); ha->notify_dcbx_comp = 0; rval = -22; goto done_set_internal; } else { } current_tmo = 5000UL; ldv_66079: rem_tmo = wait_for_completion_timeout(& ha->dcbx_comp, current_tmo); if (ha->idc_extend_tmo == 0U || rem_tmo != 0UL) { ha->idc_extend_tmo = 0U; goto ldv_66078; } else { } current_tmo = (unsigned long )(ha->idc_extend_tmo * 250U); ha->idc_extend_tmo = 0U; goto ldv_66079; ldv_66078: ; if (rem_tmo == 0UL) { ql_dbg(8388608U, vha, 28706, "DCBX completion not received.\n"); ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); if (ret != 0) { (*((ha->isp_ops)->fw_dump))(vha, 0); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } rval = -22; } else if (*((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(8388608U, vha, 28867, "Bad status in IDC Completion AEN\n"); rval = -22; ha->flags.idc_compl_status = 0U; } else { ql_dbg(8388608U, vha, 28707, "DCBX completion received.\n"); } ha->notify_dcbx_comp = 0; ha->idc_extend_tmo = 0U; done_set_internal: ; return (rval); } } static int qla2x00_process_loopback(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint8_t command_sent ; char *type ; struct msg_echo_lb elreq ; uint16_t response[32U] ; uint16_t config[4U] ; uint16_t new_config[4U] ; uint8_t *fw_sts_ptr ; uint8_t *req_data ; dma_addr_t req_data_dma ; uint32_t req_data_len ; uint8_t *rsp_data ; dma_addr_t rsp_data_dma ; uint32_t rsp_data_len ; int tmp___0 ; int tmp___1 ; void *tmp___2 ; void *tmp___3 ; int tmp___4 ; int tmp___5 ; int ret ; int tmp___6 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; req_data = (uint8_t *)0U; rsp_data = (uint8_t *)0U; if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(1U, vha, 28697, "Host is not online.\n"); return (-5); } else { } tmp___0 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); elreq.req_sg_cnt = (uint16_t )tmp___0; if ((unsigned int )elreq.req_sg_cnt == 0U) { ql_log(1U, vha, 28698, "dma_map_sg returned %d for request.\n", (int )elreq.req_sg_cnt); return (-12); } else { } tmp___1 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); elreq.rsp_sg_cnt = (uint16_t )tmp___1; if ((unsigned int )elreq.rsp_sg_cnt == 0U) { ql_log(1U, vha, 28699, "dma_map_sg returned %d for reply.\n", (int )elreq.rsp_sg_cnt); rval = -12; goto done_unmap_req_sg; } else { } if ((int )elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt || (int )elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt) { ql_log(1U, vha, 28700, "dma mapping resulted in different sg counts, request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", bsg_job->request_payload.sg_cnt, (int )elreq.req_sg_cnt, bsg_job->reply_payload.sg_cnt, (int )elreq.rsp_sg_cnt); rval = -11; goto done_unmap_sg; } else { } rsp_data_len = bsg_job->request_payload.payload_len; req_data_len = rsp_data_len; tmp___2 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )req_data_len, & req_data_dma, 208U, (struct dma_attrs *)0); req_data = (uint8_t *)tmp___2; if ((unsigned long )req_data == (unsigned long )((uint8_t *)0U)) { ql_log(1U, vha, 28701, "dma alloc failed for req_data.\n"); rval = -12; goto done_unmap_sg; } else { } tmp___3 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )rsp_data_len, & rsp_data_dma, 208U, (struct dma_attrs *)0); rsp_data = (uint8_t *)tmp___3; if ((unsigned long )rsp_data == (unsigned long )((uint8_t *)0U)) { ql_log(1U, vha, 28676, "dma alloc failed for rsp_data.\n"); rval = -12; goto done_free_dma_req; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)req_data, (size_t )req_data_len); elreq.send_dma = req_data_dma; elreq.rcv_dma = rsp_data_dma; elreq.transfer_size = req_data_len; elreq.options = (uint16_t )(bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; elreq.iteration_count = (bsg_job->request)->rqst_data.h_vendor.vendor_cmd[2]; tmp___6 = atomic_read((atomic_t const *)(& vha->loop_state)); if ((tmp___6 == 5 && ((unsigned int )ha->current_topology == 8U || (((((ha->device_type & 8192U) != 0U || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) && *((uint32_t *)req_data) == 16U) && req_data_len == 252U))) && (unsigned int )elreq.options == 242U) { type = (char *)"FC_BSG_HST_VENDOR_ECHO_DIAG"; ql_dbg(8388608U, vha, 28702, "BSG request type: %s.\n", type); command_sent = 1U; rval = qla2x00_echo_test(vha, & elreq, (uint16_t *)(& response)); } else if (((ha->device_type & 8192U) != 0U || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { memset((void *)(& config), 0, 8UL); memset((void *)(& new_config), 0, 8UL); tmp___4 = qla81xx_get_port_config(vha, (uint16_t *)(& config)); if (tmp___4 != 0) { ql_log(1U, vha, 28703, "Get port config failed.\n"); rval = -1; goto done_free_dma_rsp; } else { } if (((int )config[0] & 14) != 0) { ql_dbg(8388608U, vha, 28868, "Loopback operation already in progress.\n"); rval = -11; goto done_free_dma_rsp; } else { } ql_dbg(8388608U, vha, 28864, "elreq.options=%04x\n", (int )elreq.options); if ((unsigned int )elreq.options == 242U) { if ((ha->device_type & 65536U) != 0U || (ha->device_type & 262144U) != 0U) { rval = qla81xx_set_loopback_mode(vha, (uint16_t *)(& config), (uint16_t *)(& new_config), (int )elreq.options); } else { rval = qla81xx_reset_loopback_mode(vha, (uint16_t *)(& config), 1, 0); } } else { rval = qla81xx_set_loopback_mode(vha, (uint16_t *)(& config), (uint16_t *)(& new_config), (int )elreq.options); } if (rval != 0) { rval = -1; goto done_free_dma_rsp; } else { } type = (char *)"FC_BSG_HST_VENDOR_LOOPBACK"; ql_dbg(8388608U, vha, 28712, "BSG request type: %s.\n", type); command_sent = 0U; rval = qla2x00_loopback_test(vha, & elreq, (uint16_t *)(& response)); if ((unsigned int )response[0] == 16389U && (unsigned int )response[1] == 23U) { ql_log(1U, vha, 28713, "MBX command error, Aborting ISP.\n"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); if ((ha->device_type & 8192U) != 0U) { tmp___5 = qla81xx_restart_mpi_firmware(vha); if (tmp___5 != 0) { ql_log(1U, vha, 28714, "MPI reset failed.\n"); } else { } } else { } rval = -5; goto done_free_dma_rsp; } else { } if ((unsigned int )new_config[0] != 0U) { ret = qla81xx_reset_loopback_mode(vha, (uint16_t *)(& new_config), 0, 1); if (ret != 0) { (*((ha->isp_ops)->fw_dump))(vha, 0); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } } else { } } else { type = (char *)"FC_BSG_HST_VENDOR_LOOPBACK"; ql_dbg(8388608U, vha, 28715, "BSG request type: %s.\n", type); command_sent = 0U; rval = qla2x00_loopback_test(vha, & elreq, (uint16_t *)(& response)); } if (rval != 0) { ql_log(1U, vha, 28716, "Vendor request %s failed.\n", type); rval = 0; (bsg_job->reply)->result = 458752U; (bsg_job->reply)->reply_payload_rcv_len = 0U; } else { ql_dbg(8388608U, vha, 28717, "Vendor request %s completed.\n", type); (bsg_job->reply)->result = 0U; sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void const *)rsp_data, (size_t )rsp_data_len); } bsg_job->reply_len = 81U; fw_sts_ptr = (uint8_t *)(bsg_job->req)->sense + 16UL; memcpy((void *)fw_sts_ptr, (void const *)(& response), 64UL); fw_sts_ptr = fw_sts_ptr + 64UL; *fw_sts_ptr = command_sent; done_free_dma_rsp: dma_free_attrs(& (ha->pdev)->dev, (size_t )rsp_data_len, (void *)rsp_data, rsp_data_dma, (struct dma_attrs *)0); done_free_dma_req: dma_free_attrs(& (ha->pdev)->dev, (size_t )req_data_len, (void *)req_data, req_data_dma, (struct dma_attrs *)0); done_unmap_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); done_unmap_req_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); if (rval == 0) { (*(bsg_job->job_done))(bsg_job); } else { } return (rval); } } static int qla84xx_reset(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint32_t flag ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; if ((ha->device_type & 4096U) == 0U) { ql_dbg(8388608U, vha, 28719, "Not 84xx, exiting.\n"); return (-22); } else { } flag = (bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; rval = qla84xx_reset_chip(vha, flag == 4U); if (rval != 0) { ql_log(1U, vha, 28720, "Vendor request 84xx reset failed.\n"); rval = 458752; } else { ql_dbg(8388608U, vha, 28721, "Vendor request 84xx reset completed.\n"); (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); } return (rval); } } static int qla84xx_updatefw(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; struct verify_chip_entry_84xx *mn ; dma_addr_t mn_dma ; dma_addr_t fw_dma ; void *fw_buf ; int rval ; uint32_t sg_cnt ; uint32_t data_len ; uint16_t options ; uint32_t flag ; uint32_t fw_ver ; int tmp___0 ; void *tmp___1 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; mn = (struct verify_chip_entry_84xx *)0; fw_buf = (void *)0; rval = 0; if ((ha->device_type & 4096U) == 0U) { ql_dbg(8388608U, vha, 28722, "Not 84xx, exiting.\n"); return (-22); } else { } tmp___0 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); sg_cnt = (uint32_t )tmp___0; if (sg_cnt == 0U) { ql_log(1U, vha, 28723, "dma_map_sg returned %d for request.\n", sg_cnt); return (-12); } else { } if ((uint32_t )bsg_job->request_payload.sg_cnt != sg_cnt) { ql_log(1U, vha, 28724, "DMA mapping resulted in different sg counts, request_sg_cnt: %x dma_request_sg_cnt: %x.\n", bsg_job->request_payload.sg_cnt, sg_cnt); rval = -11; goto done_unmap_sg; } else { } data_len = bsg_job->request_payload.payload_len; fw_buf = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )data_len, & fw_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )fw_buf == (unsigned long )((void *)0)) { ql_log(1U, vha, 28725, "DMA alloc failed for fw_buf.\n"); rval = -12; goto done_unmap_sg; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, fw_buf, (size_t )data_len); tmp___1 = dma_pool_alloc(ha->s_dma_pool, 208U, & mn_dma); mn = (struct verify_chip_entry_84xx *)tmp___1; if ((unsigned long )mn == (unsigned long )((struct verify_chip_entry_84xx *)0)) { ql_log(1U, vha, 28726, "DMA alloc failed for fw buffer.\n"); rval = -12; goto done_free_fw_buf; } else { } flag = (bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; fw_ver = *((uint32_t *)fw_buf + 2UL); memset((void *)mn, 0, 64UL); mn->entry_type = 27U; mn->entry_count = 1U; options = 16386U; if (flag == 6U) { options = (uint16_t )((unsigned int )options | 8U); } else { } mn->options = options; mn->fw_ver = fw_ver; mn->fw_size = data_len; mn->fw_seq_size = data_len; mn->dseg_address[0] = (unsigned int )fw_dma; mn->dseg_address[1] = (unsigned int )(fw_dma >> 32ULL); mn->dseg_length = data_len; mn->data_seg_cnt = 1U; rval = qla2x00_issue_iocb_timeout(vha, (void *)mn, mn_dma, 0UL, 120U); if (rval != 0) { ql_log(1U, vha, 28727, "Vendor request 84xx updatefw failed.\n"); rval = 458752; } else { ql_dbg(8388608U, vha, 28728, "Vendor request 84xx updatefw completed.\n"); bsg_job->reply_len = 16U; (bsg_job->reply)->result = 0U; } dma_pool_free(ha->s_dma_pool, (void *)mn, mn_dma); done_free_fw_buf: dma_free_attrs(& (ha->pdev)->dev, (size_t )data_len, fw_buf, fw_dma, (struct dma_attrs *)0); done_unmap_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); if (rval == 0) { (*(bsg_job->job_done))(bsg_job); } else { } return (rval); } } static int qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; struct access_chip_84xx *mn ; dma_addr_t mn_dma ; dma_addr_t mgmt_dma ; void *mgmt_b ; int rval ; struct qla_bsg_a84_mgmt *ql84_mgmt ; uint32_t sg_cnt ; uint32_t data_len ; uint32_t dma_direction ; void *tmp___0 ; int tmp___1 ; int tmp___2 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; mn = (struct access_chip_84xx *)0; mgmt_b = (void *)0; rval = 0; data_len = 0U; dma_direction = 3U; if ((ha->device_type & 4096U) == 0U) { ql_log(1U, vha, 28730, "Not 84xx, exiting.\n"); return (-22); } else { } tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & mn_dma); mn = (struct access_chip_84xx *)tmp___0; if ((unsigned long )mn == (unsigned long )((struct access_chip_84xx *)0)) { ql_log(1U, vha, 28732, "DMA alloc failed for fw buffer.\n"); return (-12); } else { } memset((void *)mn, 0, 64UL); mn->entry_type = 43U; mn->entry_count = 1U; ql84_mgmt = (struct qla_bsg_a84_mgmt *)bsg_job->request + 20U; switch ((int )ql84_mgmt->mgmt.cmd) { case 0: ; case 3: tmp___1 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); sg_cnt = (uint32_t )tmp___1; if (sg_cnt == 0U) { ql_log(1U, vha, 28733, "dma_map_sg returned %d for reply.\n", sg_cnt); rval = -12; goto exit_mgmt; } else { } dma_direction = 2U; if ((uint32_t )bsg_job->reply_payload.sg_cnt != sg_cnt) { ql_log(1U, vha, 28734, "DMA mapping resulted in different sg counts, reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", bsg_job->reply_payload.sg_cnt, sg_cnt); rval = -11; goto done_unmap_sg; } else { } data_len = bsg_job->reply_payload.payload_len; mgmt_b = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )data_len, & mgmt_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )mgmt_b == (unsigned long )((void *)0)) { ql_log(1U, vha, 28735, "DMA alloc failed for mgmt_b.\n"); rval = -12; goto done_unmap_sg; } else { } if ((unsigned int )ql84_mgmt->mgmt.cmd == 0U) { mn->options = 0U; mn->parameter1 = ql84_mgmt->mgmt.mgmtp.u.mem.start_addr; } else if ((unsigned int )ql84_mgmt->mgmt.cmd == 3U) { mn->options = 3U; mn->parameter1 = ql84_mgmt->mgmt.mgmtp.u.info.type; mn->parameter2 = ql84_mgmt->mgmt.mgmtp.u.info.context; } else { } goto ldv_66150; case 1: tmp___2 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); sg_cnt = (uint32_t )tmp___2; if (sg_cnt == 0U) { ql_log(1U, vha, 28736, "dma_map_sg returned %d.\n", sg_cnt); rval = -12; goto exit_mgmt; } else { } dma_direction = 1U; if ((uint32_t )bsg_job->request_payload.sg_cnt != sg_cnt) { ql_log(1U, vha, 28737, "DMA mapping resulted in different sg counts, request_sg_cnt: %x dma_request_sg_cnt: %x.\n", bsg_job->request_payload.sg_cnt, sg_cnt); rval = -11; goto done_unmap_sg; } else { } data_len = bsg_job->request_payload.payload_len; mgmt_b = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )data_len, & mgmt_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )mgmt_b == (unsigned long )((void *)0)) { ql_log(1U, vha, 28738, "DMA alloc failed for mgmt_b.\n"); rval = -12; goto done_unmap_sg; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, mgmt_b, (size_t )data_len); mn->options = 1U; mn->parameter1 = ql84_mgmt->mgmt.mgmtp.u.mem.start_addr; goto ldv_66150; case 2: mn->options = 2U; mn->parameter1 = ql84_mgmt->mgmt.mgmtp.u.config.id; mn->parameter2 = ql84_mgmt->mgmt.mgmtp.u.config.param0; mn->parameter3 = ql84_mgmt->mgmt.mgmtp.u.config.param1; goto ldv_66150; default: rval = -5; goto exit_mgmt; } ldv_66150: ; if ((unsigned int )ql84_mgmt->mgmt.cmd != 2U) { mn->total_byte_cnt = ql84_mgmt->mgmt.len; mn->dseg_count = 1U; mn->dseg_address[0] = (unsigned int )mgmt_dma; mn->dseg_address[1] = (unsigned int )(mgmt_dma >> 32ULL); mn->dseg_length = ql84_mgmt->mgmt.len; } else { } rval = qla2x00_issue_iocb(vha, (void *)mn, mn_dma, 0UL); if (rval != 0) { ql_log(1U, vha, 28739, "Vendor request 84xx mgmt failed.\n"); rval = 458752; } else { ql_dbg(8388608U, vha, 28740, "Vendor request 84xx mgmt completed.\n"); bsg_job->reply_len = 16U; (bsg_job->reply)->result = 0U; if ((unsigned int )ql84_mgmt->mgmt.cmd == 0U || (unsigned int )ql84_mgmt->mgmt.cmd == 3U) { (bsg_job->reply)->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void const *)mgmt_b, (size_t )data_len); } else { } } done_unmap_sg: ; if ((unsigned long )mgmt_b != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )data_len, mgmt_b, mgmt_dma, (struct dma_attrs *)0); } else { } if (dma_direction == 1U) { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); } else if (dma_direction == 2U) { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); } else { } exit_mgmt: dma_pool_free(ha->s_dma_pool, (void *)mn, mn_dma); if (rval == 0) { (*(bsg_job->job_done))(bsg_job); } else { } return (rval); } } static int qla24xx_iidma(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; int rval ; struct qla_port_param *port_param ; fc_port_t *fcport ; int found ; uint16_t mb[32U] ; uint8_t *rsp_ptr ; struct list_head const *__mptr ; int tmp___0 ; struct list_head const *__mptr___0 ; int tmp___1 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; rval = 0; port_param = (struct qla_port_param *)0; fcport = (fc_port_t *)0; found = 0; rsp_ptr = (uint8_t *)0U; if (((vha->hw)->device_type & 67108864U) == 0U) { ql_log(2U, vha, 28742, "iiDMA not supported.\n"); return (-22); } else { } port_param = (struct qla_port_param *)bsg_job->request + 20U; if ((unsigned int )port_param->fc_scsi_addr.dest_type != 2U) { ql_log(1U, vha, 28744, "Invalid destination type.\n"); return (-22); } else { } __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_66172; ldv_66171: ; if ((unsigned int )fcport->port_type != 5U) { goto ldv_66169; } else { } tmp___0 = memcmp((void const *)(& port_param->fc_scsi_addr.dest_addr.wwpn), (void const *)(& fcport->port_name), 8UL); if (tmp___0 != 0) { goto ldv_66169; } else { } found = 1; goto ldv_66170; ldv_66169: __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_66172: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66171; } else { } ldv_66170: ; if (found == 0) { ql_log(1U, vha, 28745, "Failed to find port.\n"); return (-22); } else { } tmp___1 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___1 != 4) { ql_log(1U, vha, 28746, "Port is not online.\n"); return (-22); } else { } if ((fcport->flags & 2U) != 0U) { ql_log(1U, vha, 28747, "Remote port not logged in flags = 0x%x.\n", fcport->flags); return (-22); } else { } if ((unsigned int )port_param->mode != 0U) { rval = qla2x00_set_idma_speed(vha, (int )fcport->loop_id, (int )port_param->speed, (uint16_t *)(& mb)); } else { rval = qla2x00_get_idma_speed(vha, (int )fcport->loop_id, & port_param->speed, (uint16_t *)(& mb)); } if (rval != 0) { ql_log(1U, vha, 28748, "iIDMA cmd failed for %8phN -- %04x %x %04x %04x.\n", (uint8_t *)(& fcport->port_name), rval, (int )fcport->fp_speed, (int )mb[0], (int )mb[1]); rval = 458752; } else { if ((unsigned int )port_param->mode == 0U) { bsg_job->reply_len = 36U; rsp_ptr = (uint8_t *)bsg_job->reply + 16UL; memcpy((void *)rsp_ptr, (void const *)port_param, 20UL); } else { } (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); } return (rval); } } static int qla2x00_optrom_setup(struct fc_bsg_job *bsg_job , scsi_qla_host_t *vha , uint8_t is_update ) { uint32_t start ; int valid ; struct qla_hw_data *ha ; int tmp ; long tmp___0 ; void *tmp___1 ; { start = 0U; valid = 0; ha = vha->hw; tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (-22); } else { } start = (bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; if (ha->optrom_size < start) { ql_log(1U, vha, 28757, "start %d > optrom_size %d.\n", start, ha->optrom_size); return (-22); } else { } if (ha->optrom_state != 0) { ql_log(2U, vha, 28758, "optrom_state %d.\n", ha->optrom_state); return (-16); } else { } ha->optrom_region_start = start; ql_dbg(8388608U, vha, 28759, "is_update=%d.\n", (int )is_update); if ((unsigned int )is_update != 0U) { if (ha->optrom_size == 131072U && start == 0U) { valid = 1; } else if (ha->flt_region_boot * 4U == start || ha->flt_region_fw * 4U == start) { valid = 1; } else if ((((((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) || ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U)) || (ha->device_type & 32768U) != 0U) || ((ha->device_type & 524288U) != 0U || (ha->device_type & 1048576U) != 0U)) { valid = 1; } else { } if (valid == 0) { ql_log(1U, vha, 28760, "Invalid start region 0x%x/0x%x.\n", start, bsg_job->request_payload.payload_len); return (-22); } else { } ha->optrom_region_size = bsg_job->request_payload.payload_len + start > ha->optrom_size ? ha->optrom_size - start : bsg_job->request_payload.payload_len; ha->optrom_state = 2; } else { ha->optrom_region_size = bsg_job->reply_payload.payload_len + start > ha->optrom_size ? ha->optrom_size - start : bsg_job->reply_payload.payload_len; ha->optrom_state = 1; } tmp___1 = vmalloc((unsigned long )ha->optrom_region_size); ha->optrom_buffer = (char *)tmp___1; if ((unsigned long )ha->optrom_buffer == (unsigned long )((char *)0)) { ql_log(1U, vha, 28761, "Read: Unable to allocate memory for optrom retrieval (%x)\n", ha->optrom_region_size); ha->optrom_state = 0; return (-12); } else { } memset((void *)ha->optrom_buffer, 0, (size_t )ha->optrom_region_size); return (0); } } static int qla2x00_read_optrom(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; if (*((unsigned long *)ha + 2UL) != 0UL) { return (-16); } else { } mutex_lock_nested(& ha->optrom_mutex, 0U); rval = qla2x00_optrom_setup(bsg_job, vha, 0); if (rval != 0) { mutex_unlock(& ha->optrom_mutex); return (rval); } else { } (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void const *)ha->optrom_buffer, (size_t )ha->optrom_region_size); (bsg_job->reply)->reply_payload_rcv_len = ha->optrom_region_size; (bsg_job->reply)->result = 0U; vfree((void const *)ha->optrom_buffer); ha->optrom_buffer = (char *)0; ha->optrom_state = 0; mutex_unlock(& ha->optrom_mutex); (*(bsg_job->job_done))(bsg_job); return (rval); } } static int qla2x00_update_optrom(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; mutex_lock_nested(& ha->optrom_mutex, 0U); rval = qla2x00_optrom_setup(bsg_job, vha, 1); if (rval != 0) { mutex_unlock(& ha->optrom_mutex); return (rval); } else { } ha->flags.isp82xx_no_md_cap = 1U; sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)ha->optrom_buffer, (size_t )ha->optrom_region_size); (*((ha->isp_ops)->write_optrom))(vha, (uint8_t *)ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); (bsg_job->reply)->result = 0U; vfree((void const *)ha->optrom_buffer); ha->optrom_buffer = (char *)0; ha->optrom_state = 0; mutex_unlock(& ha->optrom_mutex); (*(bsg_job->job_done))(bsg_job); return (rval); } } static int qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint8_t bsg[256U] ; struct qla_image_version_list *list ; struct qla_image_version *image ; uint32_t count ; dma_addr_t sfp_dma ; void *sfp ; void *tmp___0 ; uint32_t tmp___1 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; list = (struct qla_image_version_list *)(& bsg); tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & sfp_dma); sfp = tmp___0; if ((unsigned long )sfp == (unsigned long )((void *)0)) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 17U; goto done; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)list, 256UL); image = (struct qla_image_version *)(& list->version); count = list->count; goto ldv_66211; ldv_66210: memcpy(sfp, (void const *)(& image->field_info), 36UL); rval = qla2x00_write_sfp(vha, sfp_dma, (uint8_t *)sfp, (int )image->field_address.device, (int )image->field_address.offset, 36, (int )image->field_address.option); if (rval != 0) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 11U; goto dealloc; } else { } image = image + 1; ldv_66211: tmp___1 = count; count = count - 1U; if (tmp___1 != 0U) { goto ldv_66210; } else { } (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 0U; dealloc: dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); done: bsg_job->reply_len = 16U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qla2x00_read_fru_status(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint8_t bsg[256U] ; struct qla_status_reg *sr ; dma_addr_t sfp_dma ; uint8_t *sfp ; void *tmp___0 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; sr = (struct qla_status_reg *)(& bsg); tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & sfp_dma); sfp = (uint8_t *)tmp___0; if ((unsigned long )sfp == (unsigned long )((uint8_t *)0U)) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 17U; goto done; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)sr, 14UL); rval = qla2x00_read_sfp(vha, sfp_dma, sfp, (int )sr->field_address.device, (int )sr->field_address.offset, 1, (int )sr->field_address.option); sr->status_reg = *sfp; if (rval != 0) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 11U; goto dealloc; } else { } sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void const *)sr, 14UL); (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 0U; dealloc: dma_pool_free(ha->s_dma_pool, (void *)sfp, sfp_dma); done: bsg_job->reply_len = 16U; (bsg_job->reply)->reply_payload_rcv_len = 14U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qla2x00_write_fru_status(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint8_t bsg[256U] ; struct qla_status_reg *sr ; dma_addr_t sfp_dma ; uint8_t *sfp ; void *tmp___0 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; sr = (struct qla_status_reg *)(& bsg); tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & sfp_dma); sfp = (uint8_t *)tmp___0; if ((unsigned long )sfp == (unsigned long )((uint8_t *)0U)) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 17U; goto done; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)sr, 14UL); *sfp = sr->status_reg; rval = qla2x00_write_sfp(vha, sfp_dma, sfp, (int )sr->field_address.device, (int )sr->field_address.offset, 1, (int )sr->field_address.option); if (rval != 0) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 11U; goto dealloc; } else { } (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 0U; dealloc: dma_pool_free(ha->s_dma_pool, (void *)sfp, sfp_dma); done: bsg_job->reply_len = 16U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qla2x00_write_i2c(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint8_t bsg[256U] ; struct qla_i2c_access *i2c ; dma_addr_t sfp_dma ; uint8_t *sfp ; void *tmp___0 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; i2c = (struct qla_i2c_access *)(& bsg); tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & sfp_dma); sfp = (uint8_t *)tmp___0; if ((unsigned long )sfp == (unsigned long )((uint8_t *)0U)) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 17U; goto done; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)i2c, 72UL); memcpy((void *)sfp, (void const *)(& i2c->buffer), (size_t )i2c->length); rval = qla2x00_write_sfp(vha, sfp_dma, sfp, (int )i2c->device, (int )i2c->offset, (int )i2c->length, (int )i2c->option); if (rval != 0) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 11U; goto dealloc; } else { } (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 0U; dealloc: dma_pool_free(ha->s_dma_pool, (void *)sfp, sfp_dma); done: bsg_job->reply_len = 16U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qla2x00_read_i2c(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint8_t bsg[256U] ; struct qla_i2c_access *i2c ; dma_addr_t sfp_dma ; uint8_t *sfp ; void *tmp___0 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; i2c = (struct qla_i2c_access *)(& bsg); tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & sfp_dma); sfp = (uint8_t *)tmp___0; if ((unsigned long )sfp == (unsigned long )((uint8_t *)0U)) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 17U; goto done; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)i2c, 72UL); rval = qla2x00_read_sfp(vha, sfp_dma, sfp, (int )i2c->device, (int )i2c->offset, (int )i2c->length, (int )i2c->option); if (rval != 0) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 11U; goto dealloc; } else { } memcpy((void *)(& i2c->buffer), (void const *)sfp, (size_t )i2c->length); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void const *)i2c, 72UL); (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 0U; dealloc: dma_pool_free(ha->s_dma_pool, (void *)sfp, sfp_dma); done: bsg_job->reply_len = 16U; (bsg_job->reply)->reply_payload_rcv_len = 72U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; uint16_t thread_id ; uint32_t rval ; uint16_t req_sg_cnt ; uint16_t rsp_sg_cnt ; uint16_t nextlid ; uint32_t tot_dsds ; srb_t *sp ; uint32_t req_data_len ; uint32_t rsp_data_len ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0U; req_sg_cnt = 0U; rsp_sg_cnt = 0U; nextlid = 0U; sp = (srb_t *)0; req_data_len = 0U; rsp_data_len = 0U; if ((ha->device_type & 2048U) == 0U && (ha->device_type & 32768U) == 0U) { ql_log(1U, vha, 28832, "This adapter is not supported\n"); rval = 27U; goto done; } else { } tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { rval = 2U; goto done; } else { tmp___1 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { rval = 2U; goto done; } else { tmp___2 = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 != 0) { rval = 2U; goto done; } else { } } } if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(1U, vha, 28833, "Host is not online\n"); rval = 22U; goto done; } else { } if ((vha->device_flags & 2U) != 0U) { ql_log(1U, vha, 28834, "Cable is unplugged...\n"); rval = 28U; goto done; } else { } if ((unsigned int )ha->current_topology != 8U) { ql_log(1U, vha, 28835, "Host is not connected to the switch\n"); rval = 28U; goto done; } else { } if ((unsigned int )ha->operating_mode != 1U) { ql_log(1U, vha, 28836, "Host is operating mode is not P2p\n"); rval = 28U; goto done; } else { } thread_id = (uint16_t )(bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; mutex_lock_nested(& ha->selflogin_lock, 0U); if ((unsigned int )vha->self_login_loop_id == 0U) { vha->bidir_fcport.vha = vha; vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; vha->bidir_fcport.loop_id = vha->loop_id; tmp___3 = qla2x00_fabric_login(vha, & vha->bidir_fcport, & nextlid); if (tmp___3 != 0) { ql_log(1U, vha, 28839, "Failed to login port %06X for bidirectional IOCB\n", (int )vha->bidir_fcport.d_id.b24); mutex_unlock(& ha->selflogin_lock); rval = 11U; goto done; } else { } vha->self_login_loop_id = (unsigned int )nextlid + 65535U; } else { } mutex_unlock(& ha->selflogin_lock); vha->bidir_fcport.loop_id = vha->self_login_loop_id; tmp___4 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); req_sg_cnt = (uint16_t )tmp___4; if ((unsigned int )req_sg_cnt == 0U) { rval = 17U; goto done; } else { } tmp___5 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); rsp_sg_cnt = (uint16_t )tmp___5; if ((unsigned int )rsp_sg_cnt == 0U) { rval = 17U; goto done_unmap_req_sg; } else { } if ((int )req_sg_cnt != bsg_job->request_payload.sg_cnt || (int )rsp_sg_cnt != bsg_job->reply_payload.sg_cnt) { ql_dbg(8388608U, vha, 28841, "Dma mapping resulted in different sg counts [request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", bsg_job->request_payload.sg_cnt, (int )req_sg_cnt, bsg_job->reply_payload.sg_cnt, (int )rsp_sg_cnt); rval = 17U; goto done_unmap_sg; } else { } if (req_data_len != rsp_data_len) { rval = 2U; ql_log(1U, vha, 28842, "req_data_len != rsp_data_len\n"); goto done_unmap_sg; } else { } req_data_len = bsg_job->request_payload.payload_len; rsp_data_len = bsg_job->reply_payload.payload_len; sp = qla2x00_get_sp(vha, & vha->bidir_fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_dbg(8388608U, vha, 28844, "Alloc SRB structure failed\n"); rval = 17U; goto done_unmap_sg; } else { } sp->u.bsg_job = bsg_job; sp->free = & qla2x00_bsg_sp_free; sp->type = 9U; sp->done = & qla2x00_bsg_job_done; tot_dsds = (uint32_t )((int )rsp_sg_cnt + (int )req_sg_cnt); tmp___6 = qla2x00_start_bidir(sp, vha, tot_dsds); rval = (uint32_t )tmp___6; if (rval != 0U) { goto done_free_srb; } else { } return ((int )rval); done_free_srb: mempool_free((void *)sp, ha->srb_mempool); done_unmap_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); done_unmap_req_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); done: (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = rval; bsg_job->reply_len = 16U; (bsg_job->reply)->reply_payload_rcv_len = 0U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; struct qla_mt_iocb_rqst_fx00 *piocb_rqst ; srb_t *sp ; int req_sg_cnt ; int rsp_sg_cnt ; struct fc_port *fcport ; char *type ; fc_port_t *tmp___0 ; uint16_t tmp___1 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 262144; req_sg_cnt = 0; rsp_sg_cnt = 0; type = (char *)"FC_BSG_HST_FX_MGMT"; piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)(& (bsg_job->request)->rqst_data.h_vendor.vendor_cmd) + 1U; ql_dump_buffer(8421376U, vha, 28879, (uint8_t *)piocb_rqst, 32U); if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(1U, vha, 28880, "Host is not online.\n"); rval = -5; goto done; } else { } if ((int )piocb_rqst->flags & 1) { req_sg_cnt = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); if (req_sg_cnt == 0) { ql_log(1U, vha, 28871, "dma_map_sg return %d for request\n", req_sg_cnt); rval = -12; goto done; } else { } } else { } if (((int )piocb_rqst->flags & 2) != 0) { rsp_sg_cnt = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); if (rsp_sg_cnt == 0) { ql_log(1U, vha, 28872, "dma_map_sg return %d for reply\n", rsp_sg_cnt); rval = -12; goto done_unmap_req_sg; } else { } } else { } ql_dbg(8388608U, vha, 28873, "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); tmp___0 = qla2x00_alloc_fcport(vha, 208U); fcport = tmp___0; if ((unsigned long )fcport == (unsigned long )((struct fc_port *)0)) { ql_log(1U, vha, 28874, "Failed to allocate fcport.\n"); rval = -12; goto done_unmap_rsp_sg; } else { } sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_log(1U, vha, 28875, "qla2x00_get_sp failed.\n"); rval = -12; goto done_free_fcport; } else { } fcport->vha = vha; fcport->loop_id = (uint16_t )piocb_rqst->dataword; sp->type = 11U; sp->name = (char *)"bsg_fx_mgmt"; tmp___1 = qla24xx_calc_ct_iocbs((int )((uint16_t )req_sg_cnt) + (int )((uint16_t )rsp_sg_cnt)); sp->iocbs = (int )tmp___1; sp->u.bsg_job = bsg_job; sp->free = & qla2x00_bsg_sp_free; sp->done = & qla2x00_bsg_job_done; ql_dbg(8388608U, vha, 28876, "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", type, (int )piocb_rqst->func_type, (int )fcport->loop_id); rval = qla2x00_start_sp(sp); if (rval != 0) { ql_log(1U, vha, 28877, "qla2x00_start_sp failed=%d.\n", rval); mempool_free((void *)sp, ha->srb_mempool); rval = -5; goto done_free_fcport; } else { } return (rval); done_free_fcport: kfree((void const *)fcport); done_unmap_rsp_sg: ; if (((int )piocb_rqst->flags & 2) != 0) { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); } else { } done_unmap_req_sg: ; if ((int )piocb_rqst->flags & 1) { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); } else { } done: ; return (rval); } } static int qla26xx_serdes_op(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; int rval ; struct qla_serdes_reg sr ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; rval = 0; memset((void *)(& sr), 0, 6UL); sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)(& sr), 6UL); switch ((int )sr.cmd) { case 2: rval = qla2x00_write_serdes_word(vha, (int )sr.addr, (int )sr.val); (bsg_job->reply)->reply_payload_rcv_len = 0U; goto ldv_66309; case 1: rval = qla2x00_read_serdes_word(vha, (int )sr.addr, & sr.val); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void const *)(& sr), 6UL); (bsg_job->reply)->reply_payload_rcv_len = 6U; goto ldv_66309; default: ql_dbg(8388608U, vha, 28812, "Unknown serdes cmd %x.\n", (int )sr.cmd); rval = -22; goto ldv_66309; } ldv_66309: (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = rval != 0 ? 11U : 0U; bsg_job->reply_len = 16U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qla8044_serdes_op(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; int rval ; struct qla_serdes_reg_ex sr ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; rval = 0; memset((void *)(& sr), 0, 10UL); sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)(& sr), 10UL); switch ((int )sr.cmd) { case 2: rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); (bsg_job->reply)->reply_payload_rcv_len = 0U; goto ldv_66320; case 1: rval = qla8044_read_serdes_word(vha, sr.addr, & sr.val); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void const *)(& sr), 10UL); (bsg_job->reply)->reply_payload_rcv_len = 10U; goto ldv_66320; default: ql_dbg(8388608U, vha, 28879, "Unknown serdes cmd %x.\n", (int )sr.cmd); rval = -22; goto ldv_66320; } ldv_66320: (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = rval != 0 ? 11U : 0U; bsg_job->reply_len = 16U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; { switch ((bsg_job->request)->rqst_data.h_vendor.vendor_cmd[0]) { case 1U: tmp = qla2x00_process_loopback(bsg_job); return (tmp); case 2U: tmp___0 = qla84xx_reset(bsg_job); return (tmp___0); case 3U: tmp___1 = qla84xx_updatefw(bsg_job); return (tmp___1); case 4U: tmp___2 = qla84xx_mgmt_cmd(bsg_job); return (tmp___2); case 5U: tmp___3 = qla24xx_iidma(bsg_job); return (tmp___3); case 6U: tmp___4 = qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); return (tmp___4); case 7U: tmp___5 = qla2x00_read_optrom(bsg_job); return (tmp___5); case 8U: tmp___6 = qla2x00_update_optrom(bsg_job); return (tmp___6); case 11U: tmp___7 = qla2x00_update_fru_versions(bsg_job); return (tmp___7); case 12U: tmp___8 = qla2x00_read_fru_status(bsg_job); return (tmp___8); case 13U: tmp___9 = qla2x00_write_fru_status(bsg_job); return (tmp___9); case 16U: tmp___10 = qla2x00_write_i2c(bsg_job); return (tmp___10); case 17U: tmp___11 = qla2x00_read_i2c(bsg_job); return (tmp___11); case 10U: tmp___12 = qla24xx_process_bidir_cmd(bsg_job); return (tmp___12); case 18U: tmp___13 = qlafx00_mgmt_cmd(bsg_job); return (tmp___13); case 19U: tmp___14 = qla26xx_serdes_op(bsg_job); return (tmp___14); case 20U: tmp___15 = qla8044_serdes_op(bsg_job); return (tmp___15); default: ; return (-38); } } } int qla24xx_bsg_request(struct fc_bsg_job *bsg_job ) { int ret ; struct fc_rport *rport ; fc_port_t *fcport ; struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; void *tmp___0 ; int tmp___1 ; { ret = -22; fcport = (fc_port_t *)0; (bsg_job->reply)->reply_payload_rcv_len = 0U; if ((bsg_job->request)->msgcode == 1073741825U) { rport = bsg_job->rport; fcport = *((fc_port_t **)rport->dd_data); host = dev_to_shost(rport->dev.parent); tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; } else { host = bsg_job->shost; tmp___0 = shost_priv(host); vha = (scsi_qla_host_t *)tmp___0; } tmp___1 = qla2x00_reset_active(vha); if (tmp___1 != 0) { ql_dbg(8388608U, vha, 28831, "BSG: ISP abort active/needed -- cmd=%d.\n", (bsg_job->request)->msgcode); return (-16); } else { } ql_dbg(8388608U, vha, 28672, "Entered %s msgcode=0x%x.\n", "qla24xx_bsg_request", (bsg_job->request)->msgcode); switch ((bsg_job->request)->msgcode) { case 1073741825U: ; case 2147483651U: ret = qla2x00_process_els(bsg_job); goto ldv_66355; case 2147483652U: ret = qla2x00_process_ct(bsg_job); goto ldv_66355; case 2147483903U: ret = qla2x00_process_vendor_specific(bsg_job); goto ldv_66355; case 2147483649U: ; case 2147483650U: ; case 1073741826U: ; default: ql_log(1U, vha, 28762, "Unsupported BSG request.\n"); goto ldv_66355; } ldv_66355: ; return (ret); } } int qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job ) { scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; srb_t *sp ; int cnt ; int que ; unsigned long flags ; struct req_que *req ; raw_spinlock_t *tmp___0 ; int tmp___1 ; raw_spinlock_t *tmp___2 ; { tmp = shost_priv(bsg_job->shost); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); que = 0; goto ldv_66384; ldv_66383: req = *(ha->req_q_map + (unsigned long )que); if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { goto ldv_66375; } else { } cnt = 1; goto ldv_66381; ldv_66380: sp = *(req->outstanding_cmds + (unsigned long )cnt); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { if ((((unsigned int )sp->type == 5U || (unsigned int )sp->type == 4U) || (unsigned int )sp->type == 11U) && (unsigned long )sp->u.bsg_job == (unsigned long )bsg_job) { *(req->outstanding_cmds + (unsigned long )cnt) = (srb_t *)0; spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___1 = (*((ha->isp_ops)->abort_command))(sp); if (tmp___1 != 0) { ql_log(1U, vha, 28809, "mbx abort_command failed.\n"); (bsg_job->reply)->result = 4294967291U; (bsg_job->req)->errors = -5; } else { ql_dbg(8388608U, vha, 28810, "mbx abort_command success.\n"); (bsg_job->reply)->result = 0U; (bsg_job->req)->errors = 0; } tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); goto done; } else { } } else { } cnt = cnt + 1; ldv_66381: ; if ((int )req->num_outstanding_cmds > cnt) { goto ldv_66380; } else { } ldv_66375: que = que + 1; ldv_66384: ; if ((int )ha->max_req_queues > que) { goto ldv_66383; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); ql_log(2U, vha, 28811, "SRB not found to abort.\n"); (bsg_job->reply)->result = 4294967290U; (bsg_job->req)->errors = -6; return (0); done: spin_unlock_irqrestore(& ha->hardware_lock, flags); (*(sp->free))((void *)vha, (void *)sp); return (0); } } int reg_timer_25(struct timer_list *timer ) { { ldv_timer_list_25 = timer; ldv_timer_state_25 = 1; return (0); } } void activate_pending_timer_25(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_25 == (unsigned long )timer) { if (ldv_timer_state_25 == 2 || pending_flag != 0) { ldv_timer_list_25 = timer; ldv_timer_list_25->data = data; ldv_timer_state_25 = 1; } else { } return; } else { } reg_timer_25(timer); ldv_timer_list_25->data = data; return; } } void choose_timer_25(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_25 = 2; return; } } void disable_suitable_timer_25(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_25) { ldv_timer_state_25 = 0; return; } else { } return; } } bool ldv_queue_work_on_231(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_232(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_233(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_234(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_235(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_236(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern int __printk_ratelimit(char const * ) ; extern void __rwlock_init(rwlock_t * , char const * , struct lock_class_key * ) ; extern void _raw_read_lock(rwlock_t * ) ; extern unsigned long _raw_write_lock_irqsave(rwlock_t * ) ; extern void _raw_read_unlock(rwlock_t * ) ; extern void _raw_write_unlock_irqrestore(rwlock_t * , unsigned long ) ; bool ldv_queue_work_on_247(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_249(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_248(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_251(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_250(struct workqueue_struct *ldv_func_arg1 ) ; __inline static unsigned char readb(void const volatile *addr ) { unsigned char ret ; { __asm__ volatile ("movb %1,%0": "=q" (ret): "m" (*((unsigned char volatile *)addr)): "memory"); return (ret); } } __inline static unsigned long readq(void const volatile *addr ) { unsigned long ret ; { __asm__ volatile ("movq %1,%0": "=r" (ret): "m" (*((unsigned long volatile *)addr)): "memory"); return (ret); } } __inline static void writeq(unsigned long val , void volatile *addr ) { { __asm__ volatile ("movq %0,%1": : "r" (val), "m" (*((unsigned long volatile *)addr)): "memory"); return; } } extern long schedule_timeout(long ) ; void choose_timer_26(struct timer_list *timer ) ; void activate_pending_timer_26(struct timer_list *timer , unsigned long data , int pending_flag ) ; void disable_suitable_timer_26(struct timer_list *timer ) ; int reg_timer_26(struct timer_list *timer ) ; extern int ___ratelimit(struct ratelimit_state * , char const * ) ; extern void dev_err(struct device const * , char const * , ...) ; extern int pci_bus_read_config_dword(struct pci_bus * , unsigned int , int , u32 * ) ; __inline static int pci_read_config_dword(struct pci_dev const *dev , int where , u32 *val ) { int tmp ; { tmp = pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); return (tmp); } } extern int pcie_capability_read_word(struct pci_dev * , int , u16 * ) ; extern int pci_set_mwi(struct pci_dev * ) ; extern int pci_request_regions(struct pci_dev * , char const * ) ; int ldv_scsi_add_host_with_dma_252(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; static int const MD_MIU_TEST_AGT_RDDATA[4U] = { 1090519208, 1090519212, 1090519224, 1090519228}; int qla82xx_pci_mem_read_2M(struct qla_hw_data *ha , u64 off , void *data , int size ) ; int qla82xx_pci_region_offset(struct pci_dev *pdev , int region ) ; uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *vha , uint32_t curr_state ) ; char *qdev_state(uint32_t dev_state ) ; int qla82xx_md_alloc(scsi_qla_host_t *vha ) ; int qla82xx_md_collect(scsi_qla_host_t *vha ) ; int qla82xx_validate_template_chksum(scsi_qla_host_t *vha ) ; void qla82xx_mbx_completion(scsi_qla_host_t *vha , uint16_t mb0 ) ; int qla8044_check_fw_alive(struct scsi_qla_host *vha ) ; static unsigned long crb_addr_xform[60U] ; static int qla82xx_crb_table_initialized ; static void qla82xx_crb_addr_transform_setup(void) { { crb_addr_xform[49] = 1078984704UL; crb_addr_xform[40] = 1097859072UL; crb_addr_xform[5] = 218103808UL; crb_addr_xform[11] = 238026752UL; crb_addr_xform[10] = 236978176UL; crb_addr_xform[9] = 235929600UL; crb_addr_xform[8] = 234881024UL; crb_addr_xform[16] = 1883242496UL; crb_addr_xform[15] = 1882193920UL; crb_addr_xform[14] = 1881145344UL; crb_addr_xform[13] = 1880096768UL; crb_addr_xform[48] = 1894776832UL; crb_addr_xform[47] = 148897792UL; crb_addr_xform[46] = 147849216UL; crb_addr_xform[45] = 1891631104UL; crb_addr_xform[44] = 1890582528UL; crb_addr_xform[43] = 1889533952UL; crb_addr_xform[42] = 143654912UL; crb_addr_xform[53] = 142606336UL; crb_addr_xform[51] = 1108344832UL; crb_addr_xform[29] = 1090519040UL; crb_addr_xform[7] = 241172480UL; crb_addr_xform[12] = 1879048192UL; crb_addr_xform[22] = 876609536UL; crb_addr_xform[21] = 877658112UL; crb_addr_xform[20] = 875560960UL; crb_addr_xform[19] = 874512384UL; crb_addr_xform[18] = 873463808UL; crb_addr_xform[17] = 872415232UL; crb_addr_xform[28] = 1010827264UL; crb_addr_xform[27] = 1011875840UL; crb_addr_xform[26] = 1009778688UL; crb_addr_xform[25] = 1008730112UL; crb_addr_xform[24] = 1007681536UL; crb_addr_xform[23] = 1006632960UL; crb_addr_xform[1] = 1999634432UL; crb_addr_xform[0] = 698351616UL; crb_addr_xform[6] = 454033408UL; crb_addr_xform[50] = 1107296256UL; crb_addr_xform[31] = 219152384UL; crb_addr_xform[2] = 693108736UL; crb_addr_xform[3] = 709885952UL; crb_addr_xform[37] = 209715200UL; crb_addr_xform[36] = 208666624UL; crb_addr_xform[35] = 207618048UL; crb_addr_xform[34] = 1096810496UL; crb_addr_xform[39] = 1972371456UL; crb_addr_xform[38] = 1971322880UL; crb_addr_xform[58] = 1904214016UL; crb_addr_xform[56] = 1080033280UL; crb_addr_xform[59] = 428867584UL; qla82xx_crb_table_initialized = 1; return; } } static struct crb_128M_2M_block_map crb_128M_2M_map[64U] = { {{{0U, 0U, 0U, 0U}}}, {{{1U, 1048576U, 1056768U, 1179648U}, {1U, 1114112U, 1179648U, 1245184U}, {1U, 1179648U, 1187840U, 1196032U}, {1U, 1245184U, 1253376U, 1204224U}, {1U, 1310720U, 1318912U, 1212416U}, {1U, 1376256U, 1384448U, 1220608U}, {1U, 1441792U, 1507328U, 1114112U}, {1U, 1507328U, 1515520U, 1236992U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 1966080U, 1968128U, 1187840U}, {0U, 0U, 0U, 0U}}}, {{{1U, 2097152U, 2162688U, 1572864U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 4194304U, 4198400U, 1478656U}}}, {{{1U, 5242880U, 5308416U, 1310720U}}}, {{{1U, 6291456U, 6356992U, 1835008U}}}, {{{1U, 7340032U, 7356416U, 1802240U}}}, {{{1U, 8388608U, 8396800U, 1507328U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 9371648U, 9379840U, 1515520U}}}, {{{1U, 9437184U, 9445376U, 1523712U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 10420224U, 10428416U, 1531904U}}}, {{{0U, 10485760U, 10493952U, 1540096U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 11468800U, 11476992U, 1548288U}}}, {{{0U, 11534336U, 11542528U, 1556480U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 12517376U, 12525568U, 1564672U}}}, {{{1U, 12582912U, 12599296U, 1916928U}}}, {{{1U, 13631488U, 13647872U, 1720320U}}}, {{{1U, 14680064U, 14696448U, 1703936U}}}, {{{1U, 15728640U, 15732736U, 1458176U}}}, {{{0U, 16777216U, 16793600U, 1736704U}}}, {{{1U, 17825792U, 17829888U, 1441792U}}}, {{{1U, 18874368U, 18878464U, 1445888U}}}, {{{1U, 19922944U, 19927040U, 1449984U}}}, {{{1U, 20971520U, 20975616U, 1454080U}}}, {{{1U, 22020096U, 22024192U, 1462272U}}}, {{{1U, 23068672U, 23072768U, 1466368U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 30408704U, 30474240U, 1638400U}}}, {{{1U, 31457280U, 31461376U, 1482752U}}}, {{{1U, 32505856U, 32571392U, 1376256U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 34603008U, 34611200U, 1179648U}, {1U, 34668544U, 34734080U, 1245184U}, {1U, 34734080U, 34742272U, 1196032U}, {1U, 34799616U, 34807808U, 1204224U}, {1U, 34865152U, 34873344U, 1212416U}, {1U, 34930688U, 34938880U, 1220608U}, {1U, 34996224U, 35061760U, 1114112U}, {1U, 35061760U, 35069952U, 1236992U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}}}, {{{1U, 35651584U, 35667968U, 1769472U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 41943040U, 41959424U, 1720320U}}}, {{{1U, 42991616U, 42995712U, 1486848U}}}, {{{1U, 44040192U, 44041216U, 1754112U}}}, {{{1U, 45088768U, 45089792U, 1755136U}}}, {{{1U, 46137344U, 46138368U, 1756160U}}}, {{{1U, 47185920U, 47186944U, 1757184U}}}, {{{1U, 48234496U, 48235520U, 1758208U}}}, {{{1U, 49283072U, 49284096U, 1759232U}}}, {{{1U, 50331648U, 50332672U, 1760256U}}}, {{{0U, 51380224U, 51396608U, 1736704U}}}, {{{1U, 52428800U, 52445184U, 1916928U}}}, {{{1U, 53477376U, 53493760U, 1703936U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 55574528U, 55575552U, 1753088U}}}, {{{1U, 56623104U, 56624128U, 1761280U}}}, {{{1U, 57671680U, 57672704U, 1762304U}}}, {{{1U, 58720256U, 58736640U, 1900544U}}}, {{{1U, 59768832U, 59785216U, 1785856U}}}, {{{1U, 60817408U, 60833792U, 1933312U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 63963136U, 63979520U, 1949696U}}}, {{{1U, 65011712U, 65015808U, 1470464U}}}, {{{1U, 66060288U, 66064384U, 1474560U}}}}; static unsigned int qla82xx_crb_hub_agt[64U] = { 0U, 1907U, 661U, 677U, 0U, 208U, 433U, 230U, 224U, 225U, 226U, 227U, 1056U, 1047U, 1057U, 843U, 1029U, 832U, 833U, 834U, 835U, 837U, 836U, 960U, 961U, 962U, 963U, 0U, 964U, 1040U, 0U, 209U, 0U, 1907U, 1046U, 0U, 0U, 0U, 0U, 0U, 1047U, 0U, 137U, 1802U, 1803U, 1804U, 141U, 142U, 1807U, 1029U, 1056U, 1057U, 0U, 136U, 145U, 1810U, 1030U, 0U, 1816U, 409U, 425U, 0U, 838U, 0U}; static char *q_dev_state[8U] = { (char *)"Unknown", (char *)"Cold", (char *)"Initializing", (char *)"Ready", (char *)"Need Reset", (char *)"Need Quiescent", (char *)"Failed", (char *)"Quiescent"}; char *qdev_state(uint32_t dev_state ) { { return (q_dev_state[dev_state]); } } static void qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha , ulong *off ) { u32 win_read ; scsi_qla_host_t *vha ; void *tmp ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ha->crb_win = (qla82xx_crb_hub_agt[(*off >> 20) & 63UL] << 20) | ((uint32_t )*off & 983040U); writel(ha->crb_win, (void volatile *)(ha->nx_pcibase + 1245280UL)); win_read = readl((void const volatile *)(ha->nx_pcibase + 1245280UL)); if (ha->crb_win != win_read) { ql_dbg(524288U, vha, 45056, "%s: Written crbwin (0x%x) != Read crbwin (0x%x), off=0x%lx.\n", "qla82xx_pci_set_crbwindow_2M", ha->crb_win, win_read, *off); } else { } *off = (ulong )((((unsigned long long )*off & 65535ULL) + (unsigned long long )ha->nx_pcibase) + 1966080ULL); return; } } static int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha , ulong *off ) { struct crb_128M_2M_sub_block_map *m ; { if (*off > 167772159UL) { return (-1); } else { } if (*off > 75497471UL && *off <= 75499519UL) { *off = (*off + ha->nx_pcibase) - 74450944UL; return (0); } else { } if (*off <= 100663295UL) { return (-1); } else { } *off = *off - 100663296UL; m = (struct crb_128M_2M_sub_block_map *)(& crb_128M_2M_map[(*off >> 20) & 63UL].sub_block) + ((*off >> 16) & 15UL); if ((m->valid != 0U && (ulong )m->start_128M <= *off) && (ulong )m->end_128M > *off) { *off = ((*off + (ulong )m->start_2M) - (ulong )m->start_128M) + ha->nx_pcibase; return (0); } else { } return (1); } } static int qla82xx_crb_win_lock(struct qla_hw_data *ha ) { int done ; int timeout ; { done = 0; timeout = 0; goto ldv_65961; ldv_65960: done = qla82xx_rd_32(ha, 101826616UL); if (done == 1) { goto ldv_65959; } else { } if (timeout > 99999999) { return (-1); } else { } timeout = timeout + 1; ldv_65961: ; if (done == 0) { goto ldv_65960; } else { } ldv_65959: qla82xx_wr_32(ha, 136323364UL, (u32 )ha->portnum); return (0); } } int qla82xx_wr_32(struct qla_hw_data *ha , ulong off , u32 data ) { unsigned long flags ; int rv ; long tmp ; { flags = 0UL; rv = qla82xx_pci_get_crb_addr_2M(ha, & off); tmp = ldv__builtin_expect(rv == -1, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_nx.c"), "i" (475), "i" (12UL)); ldv_65969: ; goto ldv_65969; } else { } if (rv == 1) { flags = _raw_write_lock_irqsave(& ha->hw_lock); qla82xx_crb_win_lock(ha); qla82xx_pci_set_crbwindow_2M(ha, & off); } else { } writel(data, (void volatile *)off); if (rv == 1) { qla82xx_rd_32(ha, 101826620UL); _raw_write_unlock_irqrestore(& ha->hw_lock, flags); } else { } return (0); } } int qla82xx_rd_32(struct qla_hw_data *ha , ulong off ) { unsigned long flags ; int rv ; u32 data ; long tmp ; { flags = 0UL; rv = qla82xx_pci_get_crb_addr_2M(ha, & off); tmp = ldv__builtin_expect(rv == -1, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_nx.c"), "i" (501), "i" (12UL)); ldv_65983: ; goto ldv_65983; } else { } if (rv == 1) { flags = _raw_write_lock_irqsave(& ha->hw_lock); qla82xx_crb_win_lock(ha); qla82xx_pci_set_crbwindow_2M(ha, & off); } else { } data = readl((void const volatile *)off); if (rv == 1) { qla82xx_rd_32(ha, 101826620UL); _raw_write_unlock_irqrestore(& ha->hw_lock, flags); } else { } return ((int )data); } } int qla82xx_idc_lock(struct qla_hw_data *ha ) { int i ; int done ; int timeout ; int tmp ; { done = 0; timeout = 0; goto ldv_66001; ldv_66000: done = qla82xx_rd_32(ha, 101826600UL); if (done == 1) { goto ldv_65996; } else { } if (timeout > 99999999) { return (-1); } else { } timeout = timeout + 1; tmp = preempt_count(); if (((unsigned long )tmp & 2096896UL) == 0UL) { schedule(); } else { i = 0; goto ldv_65998; ldv_65997: cpu_relax(); i = i + 1; ldv_65998: ; if (i <= 19) { goto ldv_65997; } else { } } ldv_66001: ; if (done == 0) { goto ldv_66000; } else { } ldv_65996: ; return (0); } } void qla82xx_idc_unlock(struct qla_hw_data *ha ) { { qla82xx_rd_32(ha, 101826604UL); return; } } static unsigned long qla82xx_pci_mem_bound_check(struct qla_hw_data *ha , unsigned long long addr , int size ) { { if ((addr > 268435455ULL || ((unsigned long long )size + addr) - 1ULL > 268435455ULL) || (((size != 1 && size != 2) && size != 4) && size != 8)) { return (0UL); } else { return (1UL); } } } static int qla82xx_pci_set_window_warning_count ; static unsigned long qla82xx_pci_set_window(struct qla_hw_data *ha , unsigned long long addr ) { int window ; u32 win_read ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; unsigned int temp1 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; if (addr <= 268435455ULL) { window = (int )((unsigned int )((addr & 33292288ULL) >> 1) | ((unsigned int )(addr >> 25) & 1023U)); ha->ddr_mn_window = (uint32_t )window; qla82xx_wr_32(ha, ha->mn_win_crb | 100663296UL, (u32 )window); tmp___0 = qla82xx_rd_32(ha, ha->mn_win_crb | 100663296UL); win_read = (u32 )tmp___0; if (win_read << 17 != (u32 )window) { ql_dbg(524288U, vha, 45059, "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n", "qla82xx_pci_set_window", window, win_read); } else { } addr = addr & 262143ULL; } else if (addr <= 8590983167ULL && addr > 8589934591ULL) { if ((addr & 1046528ULL) == 1046528ULL) { ql_log(1U, vha, 45060, "%s: QM access not handled.\n", "qla82xx_pci_set_window"); addr = 0xffffffffffffffffULL; } else { } window = (int )((unsigned int )((addr & 33488896ULL) >> 1) | ((unsigned int )(addr >> 25) & 1023U)); ha->ddr_mn_window = (uint32_t )window; qla82xx_wr_32(ha, ha->mn_win_crb | 100663296UL, (u32 )window); tmp___1 = qla82xx_rd_32(ha, ha->mn_win_crb | 100663296UL); win_read = (u32 )tmp___1; temp1 = ((unsigned int )(window << 7) & 65535U) | ((unsigned int )window >> 17); if (win_read != temp1) { ql_log(1U, vha, 45061, "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n", "qla82xx_pci_set_window", temp1, win_read); } else { } addr = (addr & 262143ULL) + 786432ULL; } else if (addr <= 12952010751ULL && addr > 12884901887ULL) { window = (int )addr & 268173312; ha->qdr_sn_window = window; qla82xx_wr_32(ha, ha->ms_win_crb | 100663296UL, (u32 )window); tmp___2 = qla82xx_rd_32(ha, ha->ms_win_crb | 100663296UL); win_read = (u32 )tmp___2; if ((u32 )window != win_read) { ql_log(1U, vha, 45062, "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n", "qla82xx_pci_set_window", window, win_read); } else { } addr = (addr & 262143ULL) + 67108864ULL; } else { tmp___3 = qla82xx_pci_set_window_warning_count; qla82xx_pci_set_window_warning_count = qla82xx_pci_set_window_warning_count + 1; if (tmp___3 <= 7 || ((unsigned int )qla82xx_pci_set_window_warning_count & 63U) == 0U) { ql_log(1U, vha, 45063, "%s: Warning:%s Unknown address range!.\n", "qla82xx_pci_set_window", (char *)"qla2xxx"); } else { } addr = 0xffffffffffffffffULL; } return ((unsigned long )addr); } } static int qla82xx_pci_is_same_window(struct qla_hw_data *ha , unsigned long long addr ) { int window ; unsigned long long qdr_max ; { qdr_max = 12952010751ULL; if (addr <= 268435455ULL) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_nx.c"), "i" (661), "i" (12UL)); ldv_66026: ; goto ldv_66026; } else if (addr <= 8590983167ULL && addr > 8589934591ULL) { return (1); } else if (addr <= 8595177471ULL && addr > 8594128895ULL) { return (1); } else if (addr <= qdr_max && addr > 12884901887ULL) { window = (int )((addr - 12884901888ULL) >> 22) & 63; if (ha->qdr_sn_window == window) { return (1); } else { } } else { } return (0); } } static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha , u64 off , void *data , int size ) { unsigned long flags ; void *addr ; int ret ; u64 start ; uint8_t *mem_ptr ; unsigned long mem_base ; unsigned long mem_page ; scsi_qla_host_t *vha ; void *tmp ; unsigned long tmp___0 ; int tmp___1 ; void *tmp___2 ; void *tmp___3 ; unsigned long tmp___4 ; { addr = (void *)0; ret = 0; mem_ptr = (uint8_t *)0U; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; flags = _raw_write_lock_irqsave(& ha->hw_lock); tmp___0 = qla82xx_pci_set_window(ha, off); start = (u64 )tmp___0; if (start == 0xffffffffffffffffULL) { _raw_write_unlock_irqrestore(& ha->hw_lock, flags); ql_log(0U, vha, 45064, "%s out of bound pci memory access, offset is 0x%llx.\n", (char *)"qla2xxx", off); return (-1); } else { tmp___1 = qla82xx_pci_is_same_window(ha, ((u64 )size + off) - 1ULL); if (tmp___1 == 0) { _raw_write_unlock_irqrestore(& ha->hw_lock, flags); ql_log(0U, vha, 45064, "%s out of bound pci memory access, offset is 0x%llx.\n", (char *)"qla2xxx", off); return (-1); } else { } } _raw_write_unlock_irqrestore(& ha->hw_lock, flags); mem_base = (unsigned long )(ha->pdev)->resource[0].start; mem_page = (unsigned long )start & 0xfffffffffffff000UL; if (((((u64 )size + start) - 1ULL) & 0xfffffffffffff000ULL) != (unsigned long long )mem_page) { tmp___2 = ioremap((resource_size_t )(mem_base + mem_page), 8192UL); mem_ptr = (uint8_t *)tmp___2; } else { tmp___3 = ioremap((resource_size_t )(mem_base + mem_page), 4096UL); mem_ptr = (uint8_t *)tmp___3; } if ((unsigned long )mem_ptr == (unsigned long )((uint8_t *)0U)) { *((u8 *)data) = 0U; return (-1); } else { } addr = (void *)mem_ptr; addr = addr + (start & 4095ULL); flags = _raw_write_lock_irqsave(& ha->hw_lock); switch (size) { case 1: *((u8 *)data) = readb((void const volatile *)addr); goto ldv_66054; case 2: *((u16 *)data) = readw((void const volatile *)addr); goto ldv_66054; case 4: *((u32 *)data) = readl((void const volatile *)addr); goto ldv_66054; case 8: tmp___4 = readq((void const volatile *)addr); *((u64 *)data) = (u64 )tmp___4; goto ldv_66054; default: ret = -1; goto ldv_66054; } ldv_66054: _raw_write_unlock_irqrestore(& ha->hw_lock, flags); if ((unsigned long )mem_ptr != (unsigned long )((uint8_t *)0U)) { iounmap((void volatile *)mem_ptr); } else { } return (ret); } } static int qla82xx_pci_mem_write_direct(struct qla_hw_data *ha , u64 off , void *data , int size ) { unsigned long flags ; void *addr ; int ret ; u64 start ; uint8_t *mem_ptr ; unsigned long mem_base ; unsigned long mem_page ; scsi_qla_host_t *vha ; void *tmp ; unsigned long tmp___0 ; int tmp___1 ; void *tmp___2 ; void *tmp___3 ; { addr = (void *)0; ret = 0; mem_ptr = (uint8_t *)0U; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; flags = _raw_write_lock_irqsave(& ha->hw_lock); tmp___0 = qla82xx_pci_set_window(ha, off); start = (u64 )tmp___0; if (start == 0xffffffffffffffffULL) { _raw_write_unlock_irqrestore(& ha->hw_lock, flags); ql_log(0U, vha, 45065, "%s out of bount memory access, offset is 0x%llx.\n", (char *)"qla2xxx", off); return (-1); } else { tmp___1 = qla82xx_pci_is_same_window(ha, ((u64 )size + off) - 1ULL); if (tmp___1 == 0) { _raw_write_unlock_irqrestore(& ha->hw_lock, flags); ql_log(0U, vha, 45065, "%s out of bount memory access, offset is 0x%llx.\n", (char *)"qla2xxx", off); return (-1); } else { } } _raw_write_unlock_irqrestore(& ha->hw_lock, flags); mem_base = (unsigned long )(ha->pdev)->resource[0].start; mem_page = (unsigned long )start & 0xfffffffffffff000UL; if (((((u64 )size + start) - 1ULL) & 0xfffffffffffff000ULL) != (unsigned long long )mem_page) { tmp___2 = ioremap((resource_size_t )(mem_base + mem_page), 8192UL); mem_ptr = (uint8_t *)tmp___2; } else { tmp___3 = ioremap((resource_size_t )(mem_base + mem_page), 4096UL); mem_ptr = (uint8_t *)tmp___3; } if ((unsigned long )mem_ptr == (unsigned long )((uint8_t *)0U)) { return (-1); } else { } addr = (void *)mem_ptr; addr = addr + (start & 4095ULL); flags = _raw_write_lock_irqsave(& ha->hw_lock); switch (size) { case 1: writeb((int )*((u8 *)data), (void volatile *)addr); goto ldv_66089; case 2: writew((int )*((u16 *)data), (void volatile *)addr); goto ldv_66089; case 4: writel(*((u32 *)data), (void volatile *)addr); goto ldv_66089; case 8: writeq((unsigned long )*((u64 *)data), (void volatile *)addr); goto ldv_66089; default: ret = -1; goto ldv_66089; } ldv_66089: _raw_write_unlock_irqrestore(& ha->hw_lock, flags); if ((unsigned long )mem_ptr != (unsigned long )((uint8_t *)0U)) { iounmap((void volatile *)mem_ptr); } else { } return (ret); } } static unsigned long qla82xx_decode_crb_addr(unsigned long addr ) { int i ; unsigned long base_addr ; unsigned long offset ; unsigned long pci_base ; { if (qla82xx_crb_table_initialized == 0) { qla82xx_crb_addr_transform_setup(); } else { } pci_base = 4294967295UL; base_addr = addr & 4293918720UL; offset = addr & 1048575UL; i = 0; goto ldv_66106; ldv_66105: ; if (crb_addr_xform[i] == base_addr) { pci_base = (unsigned long )(i << 20); goto ldv_66104; } else { } i = i + 1; ldv_66106: ; if (i <= 59) { goto ldv_66105; } else { } ldv_66104: ; if (pci_base == 4294967295UL) { return (pci_base); } else { } return (pci_base + offset); } } static long rom_max_timeout = 100L; static long qla82xx_rom_lock_timeout = 100L; static int qla82xx_rom_lock(struct qla_hw_data *ha ) { int done ; int timeout ; uint32_t lock_owner ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { done = 0; timeout = 0; lock_owner = 0U; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; goto ldv_66119; ldv_66118: done = qla82xx_rd_32(ha, 101826576UL); if (done == 1) { goto ldv_66116; } else { } if ((long )timeout >= qla82xx_rom_lock_timeout) { tmp___0 = qla82xx_rd_32(ha, 136323328UL); lock_owner = (uint32_t )tmp___0; ql_dbg(524288U, vha, 45399, "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", "qla82xx_rom_lock", (int )ha->portnum, lock_owner); return (-1); } else { } timeout = timeout + 1; ldv_66119: ; if (done == 0) { goto ldv_66118; } else { } ldv_66116: qla82xx_wr_32(ha, 136323328UL, (u32 )ha->portnum); return (0); } } static void qla82xx_rom_unlock(struct qla_hw_data *ha ) { { qla82xx_wr_32(ha, 136323328UL, 4294967295U); qla82xx_rd_32(ha, 101826580UL); return; } } static int qla82xx_wait_rom_busy(struct qla_hw_data *ha ) { long timeout ; long done ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { timeout = 0L; done = 0L; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; goto ldv_66130; ldv_66129: tmp___0 = qla82xx_rd_32(ha, 154140676UL); done = (long )tmp___0; done = done & 4L; timeout = timeout + 1L; if (timeout >= rom_max_timeout) { ql_dbg(524288U, vha, 45066, "%s: Timeout reached waiting for rom busy.\n", (char *)"qla2xxx"); return (-1); } else { } ldv_66130: ; if (done == 0L) { goto ldv_66129; } else { } return (0); } } static int qla82xx_wait_rom_done(struct qla_hw_data *ha ) { long timeout ; long done ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { timeout = 0L; done = 0L; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; goto ldv_66139; ldv_66138: tmp___0 = qla82xx_rd_32(ha, 154140676UL); done = (long )tmp___0; done = done & 2L; timeout = timeout + 1L; if (timeout >= rom_max_timeout) { ql_dbg(524288U, vha, 45067, "%s: Timeout reached waiting for rom done.\n", (char *)"qla2xxx"); return (-1); } else { } ldv_66139: ; if (done == 0L) { goto ldv_66138; } else { } return (0); } } static int qla82xx_md_rw_32(struct qla_hw_data *ha , uint32_t off , u32 data , uint8_t flag ) { uint32_t off_value ; uint32_t rval ; { rval = 0U; writel(off & 4294901760U, (void volatile *)(ha->nx_pcibase + 1245280UL)); readl((void const volatile *)(ha->nx_pcibase + 1245280UL)); off_value = off & 65535U; if ((unsigned int )flag != 0U) { writel(data, (void volatile *)(((unsigned long )off_value + ha->nx_pcibase) + 1966080UL)); } else { rval = readl((void const volatile *)(((unsigned long )off_value + ha->nx_pcibase) + 1966080UL)); } return ((int )rval); } } static int qla82xx_do_rom_fast_read(struct qla_hw_data *ha , int addr , int *valp ) { { qla82xx_md_rw_32(ha, 1108410416U, (unsigned int )addr & 4294901760U, 1); *valp = qla82xx_md_rw_32(ha, (uint32_t )((addr & 65535) + 1108672512), 0U, 0); return (0); } } static int qla82xx_rom_fast_read(struct qla_hw_data *ha , int addr , int *valp ) { int ret ; int loops ; uint32_t lock_owner ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; int tmp___1 ; { loops = 0; lock_owner = 0U; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; goto ldv_66164; ldv_66163: __const_udelay(429500UL); schedule(); loops = loops + 1; ldv_66164: tmp___0 = qla82xx_rom_lock(ha); if (tmp___0 != 0 && loops <= 49999) { goto ldv_66163; } else { } if (loops > 49999) { tmp___1 = qla82xx_rd_32(ha, 136323328UL); lock_owner = (uint32_t )tmp___1; ql_log(0U, vha, 185, "Failed to acquire SEM2 lock, Lock Owner %u.\n", lock_owner); return (-1); } else { } ret = qla82xx_do_rom_fast_read(ha, addr, valp); qla82xx_rom_unlock(ha); return (ret); } } static int qla82xx_read_status_reg(struct qla_hw_data *ha , uint32_t *val ) { scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; int tmp___1 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; qla82xx_wr_32(ha, 154206212UL, 5U); qla82xx_wait_rom_busy(ha); tmp___0 = qla82xx_wait_rom_done(ha); if (tmp___0 != 0) { ql_log(1U, vha, 45068, "Error waiting for rom done.\n"); return (-1); } else { } tmp___1 = qla82xx_rd_32(ha, 154206232UL); *val = (uint32_t )tmp___1; return (0); } } static int qla82xx_flash_wait_write_finish(struct qla_hw_data *ha ) { long timeout ; uint32_t done ; uint32_t val ; int ret ; scsi_qla_host_t *vha ; void *tmp ; { timeout = 0L; done = 1U; ret = 0; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; qla82xx_wr_32(ha, 154206224UL, 0U); goto ldv_66181; ldv_66180: ret = qla82xx_read_status_reg(ha, & val); done = val & 1U; timeout = timeout + 1L; __const_udelay(42950UL); ___might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_nx.c", 1008, 0); _cond_resched(); if (timeout > 49999L) { ql_log(1U, vha, 45069, "Timeout reached waiting for write finish.\n"); return (-1); } else { } ldv_66181: ; if (done != 0U && ret == 0) { goto ldv_66180; } else { } return (ret); } } static int qla82xx_flash_set_write_enable(struct qla_hw_data *ha ) { uint32_t val ; int tmp ; int tmp___0 ; { qla82xx_wait_rom_busy(ha); qla82xx_wr_32(ha, 154206224UL, 0U); qla82xx_wr_32(ha, 154206212UL, 6U); qla82xx_wait_rom_busy(ha); tmp = qla82xx_wait_rom_done(ha); if (tmp != 0) { return (-1); } else { } tmp___0 = qla82xx_read_status_reg(ha, & val); if (tmp___0 != 0) { return (-1); } else { } if ((val & 2U) == 0U) { return (-1); } else { } return (0); } } static int qla82xx_write_status_reg(struct qla_hw_data *ha , uint32_t val ) { scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = qla82xx_flash_set_write_enable(ha); if (tmp___0 != 0) { return (-1); } else { } qla82xx_wr_32(ha, 154206220UL, val); qla82xx_wr_32(ha, 154206212UL, 1U); tmp___1 = qla82xx_wait_rom_done(ha); if (tmp___1 != 0) { ql_log(1U, vha, 45070, "Error waiting for rom done.\n"); return (-1); } else { } tmp___2 = qla82xx_flash_wait_write_finish(ha); return (tmp___2); } } static int qla82xx_write_disable_flash(struct qla_hw_data *ha ) { scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; qla82xx_wr_32(ha, 154206212UL, 4U); tmp___0 = qla82xx_wait_rom_done(ha); if (tmp___0 != 0) { ql_log(1U, vha, 45071, "Error waiting for rom done.\n"); return (-1); } else { } return (0); } } static int ql82xx_rom_lock_d(struct qla_hw_data *ha ) { int loops ; uint32_t lock_owner ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; int tmp___1 ; { loops = 0; lock_owner = 0U; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; goto ldv_66204; ldv_66203: __const_udelay(429500UL); ___might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_nx.c", 1073, 0); _cond_resched(); loops = loops + 1; ldv_66204: tmp___0 = qla82xx_rom_lock(ha); if (tmp___0 != 0 && loops <= 49999) { goto ldv_66203; } else { } if (loops > 49999) { tmp___1 = qla82xx_rd_32(ha, 136323328UL); lock_owner = (uint32_t )tmp___1; ql_log(1U, vha, 45072, "ROM lock failed, Lock Owner %u.\n", lock_owner); return (-1); } else { } return (0); } } static int qla82xx_write_flash_dword(struct qla_hw_data *ha , uint32_t flashaddr , uint32_t data ) { int ret ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; int tmp___1 ; { ret = 0; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(1U, vha, 45073, "ROM lock failed.\n"); return (ret); } else { } tmp___0 = qla82xx_flash_set_write_enable(ha); if (tmp___0 != 0) { goto done_write; } else { } qla82xx_wr_32(ha, 154206220UL, data); qla82xx_wr_32(ha, 154206216UL, flashaddr); qla82xx_wr_32(ha, 154206224UL, 3U); qla82xx_wr_32(ha, 154206212UL, 2U); qla82xx_wait_rom_busy(ha); tmp___1 = qla82xx_wait_rom_done(ha); if (tmp___1 != 0) { ql_log(1U, vha, 45074, "Error waiting for rom done.\n"); ret = -1; goto done_write; } else { } ret = qla82xx_flash_wait_write_finish(ha); done_write: qla82xx_rom_unlock(ha); return (ret); } } static int qla82xx_pinit_from_rom(scsi_qla_host_t *vha ) { int addr ; int val ; int i ; struct crb_addr_pair *buf ; unsigned long off ; unsigned int offset ; unsigned int n ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; void *tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long tmp___5 ; { ha = vha->hw; qla82xx_rom_lock(ha); qla82xx_wr_32(ha, 153092112UL, 0U); qla82xx_wr_32(ha, 153092116UL, 0U); qla82xx_wr_32(ha, 153092120UL, 0U); qla82xx_wr_32(ha, 153092124UL, 0U); qla82xx_wr_32(ha, 153092128UL, 0U); qla82xx_wr_32(ha, 153092132UL, 0U); qla82xx_wr_32(ha, 106954816UL, 255U); qla82xx_wr_32(ha, 107413504UL, 0U); qla82xx_wr_32(ha, 107479040UL, 0U); qla82xx_wr_32(ha, 107544576UL, 0U); qla82xx_wr_32(ha, 107610112UL, 0U); qla82xx_wr_32(ha, 107675648UL, 0U); val = qla82xx_rd_32(ha, 105910272UL); qla82xx_wr_32(ha, 105910272UL, (u32 )val & 4294967294U); qla82xx_wr_32(ha, 133174016UL, 1U); qla82xx_wr_32(ha, 142606336UL, 0U); qla82xx_wr_32(ha, 142606344UL, 0U); qla82xx_wr_32(ha, 142606352UL, 0U); qla82xx_wr_32(ha, 142606360UL, 0U); qla82xx_wr_32(ha, 142606592UL, 0U); qla82xx_wr_32(ha, 142606848UL, 0U); qla82xx_wr_32(ha, 118489148UL, 1U); qla82xx_wr_32(ha, 119537724UL, 1U); qla82xx_wr_32(ha, 120586300UL, 1U); qla82xx_wr_32(ha, 121634876UL, 1U); qla82xx_wr_32(ha, 116391996UL, 1U); msleep(20U); tmp = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp != 0) { qla82xx_wr_32(ha, 154140680UL, 4278190079U); } else { qla82xx_wr_32(ha, 154140680UL, 4294967295U); } qla82xx_rom_unlock(ha); tmp___0 = qla82xx_rom_fast_read(ha, 0, (int *)(& n)); if (tmp___0 != 0 || n != 3405695742U) { ql_log(0U, vha, 110, "Error Reading crb_init area: n: %08x.\n", n); return (-1); } else { tmp___1 = qla82xx_rom_fast_read(ha, 4, (int *)(& n)); if (tmp___1 != 0) { ql_log(0U, vha, 110, "Error Reading crb_init area: n: %08x.\n", n); return (-1); } else { } } offset = n & 65535U; n = n >> 16; if (n > 1023U) { ql_log(0U, vha, 113, "Card flash not initialized:n=0x%x.\n", n); return (-1); } else { } ql_log(2U, vha, 114, "%d CRB init values found in ROM.\n", n); tmp___2 = kmalloc((unsigned long )n * 16UL, 208U); buf = (struct crb_addr_pair___0 *)tmp___2; if ((unsigned long )buf == (unsigned long )((struct crb_addr_pair___0 *)0)) { ql_log(0U, vha, 268, "Unable to allocate memory.\n"); return (-1); } else { } i = 0; goto ldv_66229; ldv_66228: tmp___3 = qla82xx_rom_fast_read(ha, (int )(((unsigned int )(i * 2) + offset) * 4U), & val); if (tmp___3 != 0) { kfree((void const *)buf); return (-1); } else { tmp___4 = qla82xx_rom_fast_read(ha, (int )((((unsigned int )(i * 2) + offset) + 1U) * 4U), & addr); if (tmp___4 != 0) { kfree((void const *)buf); return (-1); } else { } } (buf + (unsigned long )i)->addr = (long )addr; (buf + (unsigned long )i)->data = (long )val; i = i + 1; ldv_66229: ; if ((unsigned int )i < n) { goto ldv_66228; } else { } i = 0; goto ldv_66233; ldv_66232: tmp___5 = qla82xx_decode_crb_addr((unsigned long )(buf + (unsigned long )i)->addr); off = tmp___5 + 100663296UL; if (off == 136323580UL) { goto ldv_66231; } else { } if (off == 154140860UL) { goto ldv_66231; } else { } if (off == 154140872UL) { goto ldv_66231; } else { } if (off == 101785664UL) { goto ldv_66231; } else { } if (off == 101785672UL) { goto ldv_66231; } else { } if ((off & 267386880UL) == 161480704UL) { goto ldv_66231; } else { } if ((off & 267386880UL) == 102760448UL) { goto ldv_66231; } else { } if (off == 4294967295UL) { ql_log(0U, vha, 278, "Unknown addr: 0x%08lx.\n", (buf + (unsigned long )i)->addr); goto ldv_66231; } else { } qla82xx_wr_32(ha, off, (u32 )(buf + (unsigned long )i)->data); if (off == 154140680UL) { msleep(1000U); } else { } msleep(1U); ldv_66231: i = i + 1; ldv_66233: ; if ((unsigned int )i < n) { goto ldv_66232; } else { } kfree((void const *)buf); qla82xx_wr_32(ha, 122683628UL, 30U); qla82xx_wr_32(ha, 122683468UL, 8U); qla82xx_wr_32(ha, 123732044UL, 8U); qla82xx_wr_32(ha, 118489096UL, 0U); qla82xx_wr_32(ha, 118489100UL, 0U); qla82xx_wr_32(ha, 119537672UL, 0U); qla82xx_wr_32(ha, 119537676UL, 0U); qla82xx_wr_32(ha, 120586248UL, 0U); qla82xx_wr_32(ha, 120586252UL, 0U); qla82xx_wr_32(ha, 121634824UL, 0U); qla82xx_wr_32(ha, 121634828UL, 0U); return (0); } } static int qla82xx_pci_mem_write_2M(struct qla_hw_data *ha , u64 off , void *data , int size ) { int i ; int j ; int ret ; int loop ; int sz[2U] ; int off0 ; int scale ; int shift_amount ; int startword ; uint32_t temp ; uint64_t off8 ; uint64_t mem_crb ; uint64_t tmpw ; uint64_t word[2U] ; int tmp ; unsigned long tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { ret = 0; word[0] = 0ULL; word[1] = 0ULL; if (off > 12884901887ULL && off <= 12952010751ULL) { mem_crb = 131072000ULL; } else { mem_crb = 102760448ULL; tmp___0 = qla82xx_pci_mem_bound_check(ha, off, size); if (tmp___0 == 0UL) { tmp = qla82xx_pci_mem_write_direct(ha, off, data, size); return (tmp); } else { } } off0 = (int )off & 7; sz[0] = size < 8 - off0 ? size : 8 - off0; sz[1] = size - sz[0]; off8 = off & 4294967280ULL; loop = (int )((unsigned int )((((off & 15ULL) + (u64 )size) - 1ULL) >> 4) + 1U); shift_amount = 4; scale = 2; startword = (int )((off & 15ULL) / 8ULL); i = 0; goto ldv_66256; ldv_66255: tmp___1 = qla82xx_pci_mem_read_2M(ha, (uint64_t )(i << shift_amount) + off8, (void *)(& word) + (unsigned long )(i * scale), 8); if (tmp___1 != 0) { return (-1); } else { } i = i + 1; ldv_66256: ; if (i < loop) { goto ldv_66255; } else { } switch (size) { case 1: tmpw = (uint64_t )*((uint8_t *)data); goto ldv_66259; case 2: tmpw = (uint64_t )*((uint16_t *)data); goto ldv_66259; case 4: tmpw = (uint64_t )*((uint32_t *)data); goto ldv_66259; case 8: ; default: tmpw = *((uint64_t *)data); goto ldv_66259; } ldv_66259: ; if (sz[0] == 8) { word[startword] = tmpw; } else { word[startword] = word[startword] & ~ (~ (0xffffffffffffffffULL << sz[0] * 8) << off0 * 8); word[startword] = word[startword] | (tmpw << off0 * 8); } if (sz[1] != 0) { word[startword + 1] = word[startword + 1] & ~ (0xffffffffffffffffULL << sz[1] * 8); word[startword + 1] = word[startword + 1] | (tmpw >> sz[0] * 8); } else { } i = 0; goto ldv_66270; ldv_66269: temp = (uint32_t )(i << shift_amount) + (uint32_t )off8; qla82xx_wr_32(ha, (ulong )(mem_crb + 148ULL), temp); temp = 0U; qla82xx_wr_32(ha, (ulong )(mem_crb + 152ULL), temp); temp = (uint32_t )word[i * scale]; qla82xx_wr_32(ha, (ulong )(mem_crb + 160ULL), temp); temp = (uint32_t )(word[i * scale] >> 32); qla82xx_wr_32(ha, (ulong )(mem_crb + 164ULL), temp); temp = (uint32_t )word[i * scale + 1]; qla82xx_wr_32(ha, (ulong )(mem_crb + 176ULL), temp); temp = (uint32_t )(word[i * scale + 1] >> 32); qla82xx_wr_32(ha, (ulong )(mem_crb + 180ULL), temp); temp = 6U; qla82xx_wr_32(ha, (ulong )(mem_crb + 144ULL), temp); temp = 7U; qla82xx_wr_32(ha, (ulong )(mem_crb + 144ULL), temp); j = 0; goto ldv_66266; ldv_66265: tmp___2 = qla82xx_rd_32(ha, (ulong )(mem_crb + 144ULL)); temp = (uint32_t )tmp___2; if ((temp & 8U) == 0U) { goto ldv_66264; } else { } j = j + 1; ldv_66266: ; if (j <= 999) { goto ldv_66265; } else { } ldv_66264: ; if (j > 999) { tmp___3 = __printk_ratelimit("qla82xx_pci_mem_write_2M"); if (tmp___3 != 0) { dev_err((struct device const *)(& (ha->pdev)->dev), "failed to write through agent.\n"); } else { } ret = -1; goto ldv_66268; } else { } i = i + 1; ldv_66270: ; if (i < loop) { goto ldv_66269; } else { } ldv_66268: ; return (ret); } } static int qla82xx_fw_load_from_flash(struct qla_hw_data *ha ) { int i ; long size ; long flashaddr ; long memaddr ; u64 data ; u32 high ; u32 low ; int tmp ; int tmp___0 ; { size = 0L; flashaddr = (long )(ha->flt_region_bootload << 2); memaddr = 65536L; size = 122880L; i = 0; goto ldv_66282; ldv_66281: tmp = qla82xx_rom_fast_read(ha, (int )flashaddr, (int *)(& low)); if (tmp != 0) { return (-1); } else { tmp___0 = qla82xx_rom_fast_read(ha, (int )((unsigned int )flashaddr + 4U), (int *)(& high)); if (tmp___0 != 0) { return (-1); } else { } } data = ((unsigned long long )high << 32) | (unsigned long long )low; qla82xx_pci_mem_write_2M(ha, (u64 )memaddr, (void *)(& data), 8); flashaddr = flashaddr + 8L; memaddr = memaddr + 8L; if (((unsigned int )i & 4095U) == 0U) { msleep(1U); } else { } i = i + 1; ldv_66282: ; if ((long )i < size) { goto ldv_66281; } else { } __const_udelay(429500UL); _raw_read_lock(& ha->hw_lock); qla82xx_wr_32(ha, 118489112UL, 4128U); qla82xx_wr_32(ha, 154140680UL, 8388638U); _raw_read_unlock(& ha->hw_lock); return (0); } } int qla82xx_pci_mem_read_2M(struct qla_hw_data *ha , u64 off , void *data , int size ) { int i ; int j ; int k ; int start ; int end ; int loop ; int sz[2U] ; int off0[2U] ; int shift_amount ; uint32_t temp ; uint64_t off8 ; uint64_t val ; uint64_t mem_crb ; uint64_t word[2U] ; int tmp ; unsigned long tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { j = 0; word[0] = 0ULL; word[1] = 0ULL; if (off > 12884901887ULL && off <= 12952010751ULL) { mem_crb = 131072000ULL; } else { mem_crb = 102760448ULL; tmp___0 = qla82xx_pci_mem_bound_check(ha, off, size); if (tmp___0 == 0UL) { tmp = qla82xx_pci_mem_read_direct(ha, off, data, size); return (tmp); } else { } } off8 = off & 4294967280ULL; off0[0] = (int )off & 15; sz[0] = size < 16 - off0[0] ? size : 16 - off0[0]; shift_amount = 4; loop = (((off0[0] + size) + -1) >> shift_amount) + 1; off0[1] = 0; sz[1] = size - sz[0]; i = 0; goto ldv_66313; ldv_66312: temp = (uint32_t )(i << shift_amount) + (uint32_t )off8; qla82xx_wr_32(ha, (ulong )(mem_crb + 148ULL), temp); temp = 0U; qla82xx_wr_32(ha, (ulong )(mem_crb + 152ULL), temp); temp = 2U; qla82xx_wr_32(ha, (ulong )(mem_crb + 144ULL), temp); temp = 3U; qla82xx_wr_32(ha, (ulong )(mem_crb + 144ULL), temp); j = 0; goto ldv_66306; ldv_66305: tmp___1 = qla82xx_rd_32(ha, (ulong )(mem_crb + 144ULL)); temp = (uint32_t )tmp___1; if ((temp & 8U) == 0U) { goto ldv_66304; } else { } j = j + 1; ldv_66306: ; if (j <= 999) { goto ldv_66305; } else { } ldv_66304: ; if (j > 999) { tmp___2 = __printk_ratelimit("qla82xx_pci_mem_read_2M"); if (tmp___2 != 0) { dev_err((struct device const *)(& (ha->pdev)->dev), "failed to read through agent.\n"); } else { } goto ldv_66308; } else { } start = off0[i] >> 2; end = ((off0[i] + sz[i]) + -1) >> 2; k = start; goto ldv_66310; ldv_66309: tmp___3 = qla82xx_rd_32(ha, (ulong )((uint64_t )((k + 42) * 4) + mem_crb)); temp = (uint32_t )tmp___3; word[i] = word[i] | ((unsigned long long )temp << (k & 1) * 32); k = k + 1; ldv_66310: ; if (k <= end) { goto ldv_66309; } else { } i = i + 1; ldv_66313: ; if (i < loop) { goto ldv_66312; } else { } ldv_66308: ; if (j > 999) { return (-1); } else { } if ((off0[0] & 7) == 0) { val = word[0]; } else { val = ((word[0] >> off0[0] * 8) & ~ (0xffffffffffffffffULL << sz[0] * 8)) | ((word[1] & ~ (0xffffffffffffffffULL << sz[1] * 8)) << sz[0] * 8); } switch (size) { case 1: *((uint8_t *)data) = (uint8_t )val; goto ldv_66315; case 2: *((uint16_t *)data) = (uint16_t )val; goto ldv_66315; case 4: *((uint32_t *)data) = (uint32_t )val; goto ldv_66315; case 8: *((uint64_t *)data) = val; goto ldv_66315; } ldv_66315: ; return (0); } } static struct qla82xx_uri_table_desc *qla82xx_get_table_desc(u8 const *unirom , int section ) { uint32_t i ; struct qla82xx_uri_table_desc *directory ; __le32 offset ; __le32 tab_type ; __le32 entries ; { directory = (struct qla82xx_uri_table_desc *)unirom; entries = directory->num_entries; i = 0U; goto ldv_66329; ldv_66328: offset = directory->findex + directory->entry_size * i; tab_type = *((u32 *)unirom + ((unsigned long )offset + 8UL)); if ((__le32 )section == tab_type) { return ((struct qla82xx_uri_table_desc *)unirom + (unsigned long )offset); } else { } i = i + 1U; ldv_66329: ; if (i < entries) { goto ldv_66328; } else { } return ((struct qla82xx_uri_table_desc *)0); } } static struct qla82xx_uri_data_desc *qla82xx_get_data_desc(struct qla_hw_data *ha , u32 section , u32 idx_offset ) { u8 const *unirom ; int idx ; struct qla82xx_uri_table_desc *tab_desc ; __le32 offset ; { unirom = ((ha->hablob)->fw)->data; idx = *((int *)unirom + ((unsigned long )ha->file_prd_off + (unsigned long )idx_offset)); tab_desc = (struct qla82xx_uri_table_desc *)0; tab_desc = qla82xx_get_table_desc(unirom, (int )section); if ((unsigned long )tab_desc == (unsigned long )((struct qla82xx_uri_table_desc *)0)) { return ((struct qla82xx_uri_data_desc *)0); } else { } offset = tab_desc->findex + tab_desc->entry_size * (uint32_t )idx; return ((struct qla82xx_uri_data_desc *)unirom + (unsigned long )offset); } } static u8 *qla82xx_get_bootld_offset(struct qla_hw_data *ha ) { u32 offset ; struct qla82xx_uri_data_desc *uri_desc ; { offset = 65536U; uri_desc = (struct qla82xx_uri_data_desc *)0; if ((unsigned int )ha->fw_type == 3U) { uri_desc = qla82xx_get_data_desc(ha, 6U, 27U); if ((unsigned long )uri_desc != (unsigned long )((struct qla82xx_uri_data_desc *)0)) { offset = uri_desc->findex; } else { } } else { } return ((u8 *)((ha->hablob)->fw)->data + (unsigned long )offset); } } static __le32 qla82xx_get_fw_size(struct qla_hw_data *ha ) { struct qla82xx_uri_data_desc *uri_desc ; { uri_desc = (struct qla82xx_uri_data_desc *)0; if ((unsigned int )ha->fw_type == 3U) { uri_desc = qla82xx_get_data_desc(ha, 7U, 29U); if ((unsigned long )uri_desc != (unsigned long )((struct qla82xx_uri_data_desc *)0)) { return (uri_desc->size); } else { } } else { } return (*((u32 *)((ha->hablob)->fw)->data + 4097036U)); } } static u8 *qla82xx_get_fw_offs(struct qla_hw_data *ha ) { u32 offset ; struct qla82xx_uri_data_desc *uri_desc ; { offset = 1048576U; uri_desc = (struct qla82xx_uri_data_desc *)0; if ((unsigned int )ha->fw_type == 3U) { uri_desc = qla82xx_get_data_desc(ha, 7U, 29U); if ((unsigned long )uri_desc != (unsigned long )((struct qla82xx_uri_data_desc *)0)) { offset = uri_desc->findex; } else { } } else { } return ((u8 *)((ha->hablob)->fw)->data + (unsigned long )offset); } } int qla82xx_pci_region_offset(struct pci_dev *pdev , int region ) { unsigned long val ; u32 control ; { val = 0UL; switch (region) { case 0: val = 0UL; goto ldv_66361; case 1: pci_read_config_dword((struct pci_dev const *)pdev, 68, & control); val = (unsigned long )(control + 8192U); goto ldv_66361; } ldv_66361: ; return ((int )val); } } int qla82xx_iospace_config(struct qla_hw_data *ha ) { uint32_t len ; int tmp ; void *tmp___0 ; void *tmp___1 ; uint8_t tmp___2 ; { len = 0U; tmp = pci_request_regions(ha->pdev, "qla2xxx"); if (tmp != 0) { ql_log_pci(0U, ha->pdev, 12, "Failed to reserver selected regions.\n"); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[0].flags & 512UL) == 0UL) { ql_log_pci(0U, ha->pdev, 13, "Region #0 not an MMIO resource, aborting.\n"); goto iospace_error_exit; } else { } len = (ha->pdev)->resource[0].start != 0ULL || (ha->pdev)->resource[0].end != (ha->pdev)->resource[0].start ? ((uint32_t )(ha->pdev)->resource[0].end - (uint32_t )(ha->pdev)->resource[0].start) + 1U : 0U; tmp___0 = ioremap((ha->pdev)->resource[0].start, (unsigned long )len); ha->nx_pcibase = (unsigned long )tmp___0; if (ha->nx_pcibase == 0UL) { ql_log_pci(0U, ha->pdev, 14, "Cannot remap pcibase MMIO, aborting.\n"); goto iospace_error_exit; } else { } if ((ha->device_type & 262144U) != 0U) { ha->iobase = (device_reg_t *)ha->nx_pcibase; } else if ((ha->device_type & 16384U) != 0U) { ha->iobase = (device_reg_t *)(((unsigned long )((ha->pdev)->devfn << 11) + ha->nx_pcibase) + 770048UL); } else { } if (ql2xdbwr == 0) { tmp___1 = ioremap((ha->pdev)->resource[4].start + (resource_size_t )((ha->pdev)->devfn << 12), 4UL); ha->nxdb_wr_ptr = (unsigned long )tmp___1; if (ha->nxdb_wr_ptr == 0UL) { ql_log_pci(0U, ha->pdev, 15, "Cannot remap MMIO, aborting.\n"); goto iospace_error_exit; } else { } ha->nxdb_rd_ptr = (uint8_t *)(((unsigned long )((ha->pdev)->devfn * 8U) + ha->nx_pcibase) + 524288UL); } else { ha->nxdb_wr_ptr = (ha->pdev)->devfn == 6U ? 136323512UL : 136323516UL; } tmp___2 = 1U; ha->max_rsp_queues = tmp___2; ha->max_req_queues = tmp___2; ha->msix_count = (unsigned int )((uint16_t )ha->max_rsp_queues) + 1U; ql_dbg_pci(1048576U, ha->pdev, 49158, "nx_pci_base=%p iobase=%p max_req_queues=%d msix_count=%d.\n", (void *)ha->nx_pcibase, ha->iobase, (int )ha->max_req_queues, (int )ha->msix_count); ql_dbg_pci(1073741824U, ha->pdev, 16, "nx_pci_base=%p iobase=%p max_req_queues=%d msix_count=%d.\n", (void *)ha->nx_pcibase, ha->iobase, (int )ha->max_req_queues, (int )ha->msix_count); return (0); iospace_error_exit: ; return (-12); } } int qla82xx_pci_config(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; int ret ; { ha = vha->hw; pci_set_master(ha->pdev); ret = pci_set_mwi(ha->pdev); ha->chip_revision = (uint16_t )(ha->pdev)->revision; ql_dbg(1073741824U, vha, 67, "Chip revision:%d.\n", (int )ha->chip_revision); return (0); } } void qla82xx_reset_chip(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; (*((ha->isp_ops)->disable_intrs))(ha); return; } } void qla82xx_config_rings(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct device_reg_82xx *reg ; struct init_cb_81xx *icb ; struct req_que *req ; struct rsp_que *rsp ; { ha = vha->hw; reg = & (ha->iobase)->isp82; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); icb = (struct init_cb_81xx *)ha->init_cb; icb->request_q_outpointer = 0U; icb->response_q_inpointer = 0U; icb->request_q_length = req->length; icb->response_q_length = rsp->length; icb->request_q_address[0] = (unsigned int )req->dma; icb->request_q_address[1] = (unsigned int )(req->dma >> 32ULL); icb->response_q_address[0] = (unsigned int )rsp->dma; icb->response_q_address[1] = (unsigned int )(rsp->dma >> 32ULL); writel(0U, (void volatile *)(& reg->req_q_out)); writel(0U, (void volatile *)(& reg->rsp_q_in)); writel(0U, (void volatile *)(& reg->rsp_q_out)); return; } } static int qla82xx_fw_load_from_blob(struct qla_hw_data *ha ) { u64 *ptr64 ; u32 i ; u32 flashaddr ; u32 size ; __le64 data ; u8 *tmp ; int tmp___0 ; __le32 tmp___1 ; u8 *tmp___2 ; int tmp___3 ; { size = 122880U; tmp = qla82xx_get_bootld_offset(ha); ptr64 = (u64 *)tmp; flashaddr = 65536U; i = 0U; goto ldv_66394; ldv_66393: data = *(ptr64 + (unsigned long )i); tmp___0 = qla82xx_pci_mem_write_2M(ha, (u64 )flashaddr, (void *)(& data), 8); if (tmp___0 != 0) { return (-5); } else { } flashaddr = flashaddr + 8U; i = i + 1U; ldv_66394: ; if (i < size) { goto ldv_66393; } else { } flashaddr = 274432U; tmp___1 = qla82xx_get_fw_size(ha); size = tmp___1 / 8U; tmp___2 = qla82xx_get_fw_offs(ha); ptr64 = (u64 *)tmp___2; i = 0U; goto ldv_66397; ldv_66396: data = *(ptr64 + (unsigned long )i); tmp___3 = qla82xx_pci_mem_write_2M(ha, (u64 )flashaddr, (void *)(& data), 8); if (tmp___3 != 0) { return (-5); } else { } flashaddr = flashaddr + 8U; i = i + 1U; ldv_66397: ; if (i < size) { goto ldv_66396; } else { } __const_udelay(429500UL); qla82xx_wr_32(ha, 136323580UL, 305419896U); _raw_read_lock(& ha->hw_lock); qla82xx_wr_32(ha, 118489112UL, 4128U); qla82xx_wr_32(ha, 154140680UL, 8388638U); _raw_read_unlock(& ha->hw_lock); return (0); } } static int qla82xx_set_product_offset(struct qla_hw_data *ha ) { struct qla82xx_uri_table_desc *ptab_desc ; uint8_t const *unirom ; uint32_t i ; __le32 entries ; __le32 flags ; __le32 file_chiprev ; __le32 offset ; uint8_t chiprev ; int mn_present ; uint32_t flagbit ; { ptab_desc = (struct qla82xx_uri_table_desc *)0; unirom = (uint8_t const *)((ha->hablob)->fw)->data; chiprev = (uint8_t )ha->chip_revision; mn_present = 0; ptab_desc = qla82xx_get_table_desc(unirom, 0); if ((unsigned long )ptab_desc == (unsigned long )((struct qla82xx_uri_table_desc *)0)) { return (-1); } else { } entries = ptab_desc->num_entries; i = 0U; goto ldv_66413; ldv_66412: offset = ptab_desc->findex + ptab_desc->entry_size * i; flags = (unsigned int )*((int *)unirom + ((unsigned long )offset + 11UL)); file_chiprev = (unsigned int )*((int *)unirom + ((unsigned long )offset + 10UL)); flagbit = mn_present != 0 ? 1U : 2U; if ((__le32 )chiprev == file_chiprev && (int )((unsigned long long )flags >> (int )flagbit) & 1) { ha->file_prd_off = offset; return (0); } else { } i = i + 1U; ldv_66413: ; if (i < entries) { goto ldv_66412; } else { } return (-1); } } static int qla82xx_validate_firmware_blob(scsi_qla_host_t *vha , uint8_t fw_type ) { __le32 val ; uint32_t min_size ; struct qla_hw_data *ha ; struct firmware const *fw ; int tmp ; { ha = vha->hw; fw = (ha->hablob)->fw; ha->fw_type = fw_type; if ((unsigned int )fw_type == 3U) { tmp = qla82xx_set_product_offset(ha); if (tmp != 0) { return (-22); } else { } min_size = 819200U; } else { val = *((u32 *)fw->data + 16680U); if (val != 305419896U) { return (-22); } else { } min_size = 4194303U; } if ((unsigned long )fw->size < (unsigned long )min_size) { return (-22); } else { } return (0); } } static int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha ) { u32 val ; int retries ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; int tmp___1 ; { val = 0U; retries = 60; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ldv_66434: _raw_read_lock(& ha->hw_lock); tmp___0 = qla82xx_rd_32(ha, 136323664UL); val = (u32 )tmp___0; _raw_read_unlock(& ha->hw_lock); switch (val) { case 65281U: ; case 61455U: ; return (0); case 65535U: ; goto ldv_66432; default: ; goto ldv_66432; } ldv_66432: ql_log(2U, vha, 168, "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n", val, retries); msleep(500U); retries = retries - 1; if (retries != 0) { goto ldv_66434; } else { } ql_log(0U, vha, 169, "Cmd Peg initialization failed: 0x%x.\n", val); tmp___1 = qla82xx_rd_32(ha, 154140764UL); val = (u32 )tmp___1; _raw_read_lock(& ha->hw_lock); qla82xx_wr_32(ha, 136323664UL, 65535U); _raw_read_unlock(& ha->hw_lock); return (258); } } static int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha ) { u32 val ; int retries ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { val = 0U; retries = 60; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ldv_66447: _raw_read_lock(& ha->hw_lock); tmp___0 = qla82xx_rd_32(ha, 136323900UL); val = (u32 )tmp___0; _raw_read_unlock(& ha->hw_lock); switch (val) { case 65281U: ; case 61455U: ; return (0); case 65535U: ; goto ldv_66445; default: ; goto ldv_66445; } ldv_66445: ql_log(2U, vha, 171, "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n", val, retries); msleep(500U); retries = retries - 1; if (retries != 0) { goto ldv_66447; } else { } ql_log(0U, vha, 172, "Rcv Peg initializatin failed: 0x%x.\n", val); _raw_read_lock(& ha->hw_lock); qla82xx_wr_32(ha, 136323900UL, 65535U); _raw_read_unlock(& ha->hw_lock); return (258); } } static struct qla82xx_legacy_intr_set legacy_intr[8U] = { {128U, 101777688U, 101777704U, 101789696U}, {256U, 101777760U, 101777776U, 101789700U}, {512U, 101777764U, 101777780U, 101789704U}, {1024U, 101777768U, 101777784U, 101789708U}, {2048U, 101778272U, 101778288U, 101789712U}, {4096U, 101778276U, 101778292U, 101789716U}, {8192U, 101778280U, 101778296U, 101789720U}, {16384U, 101778284U, 101778300U, 101789724U}}; void qla82xx_mbx_completion(scsi_qla_host_t *vha , uint16_t mb0 ) { uint16_t cnt ; uint16_t *wptr ; struct qla_hw_data *ha ; struct device_reg_82xx *reg ; { ha = vha->hw; reg = & (ha->iobase)->isp82; wptr = (uint16_t *)(& reg->mailbox_out) + 1UL; ha->flags.mbox_int = 1U; ha->mailbox_out[0] = mb0; cnt = 1U; goto ldv_66459; ldv_66458: ha->mailbox_out[(int )cnt] = readw((void const volatile *)wptr); wptr = wptr + 1; cnt = (uint16_t )((int )cnt + 1); ldv_66459: ; if ((int )((unsigned short )ha->mbx_count) > (int )cnt) { goto ldv_66458; } else { } if ((unsigned long )ha->mcp == (unsigned long )((mbx_cmd_t *)0)) { ql_dbg(33554432U, vha, 20563, "MBX pointer OLD_ERROR.\n"); } else { } return; } } irqreturn_t qla82xx_intr_handler(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_82xx *reg ; int status ; int status1 ; unsigned long flags ; unsigned long iter ; uint32_t stat ; uint16_t mb[4U] ; raw_spinlock_t *tmp ; void *tmp___0 ; unsigned int tmp___1 ; unsigned long tmp___2 ; { status = 0; status1 = 0; stat = 0U; rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 45139, "%s: NULL response queue pointer.\n", "qla82xx_intr_handler"); return (0); } else { } ha = rsp->hw; if (*((unsigned long *)ha + 2UL) == 0UL) { status = qla82xx_rd_32(ha, 101777664UL); if ((ha->nx_legacy_intr.int_vec_bit & (uint32_t )status) == 0U) { return (0); } else { } status1 = qla82xx_rd_32(ha, 101785708UL); if ((status1 & 768) != 512) { return (0); } else { } } else { } qla82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_status_reg, 4294967295U); qla82xx_rd_32(ha, 101777664UL); qla82xx_rd_32(ha, 101777664UL); reg = & (ha->iobase)->isp82; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; iter = 1UL; goto ldv_66488; ldv_66487: tmp___1 = readl((void const volatile *)(& reg->host_int)); if (tmp___1 != 0U) { stat = readl((void const volatile *)(& reg->host_status)); switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla82xx_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; goto ldv_66483; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((void const volatile *)(& reg->mailbox_out) + 1U); mb[2] = readw((void const volatile *)(& reg->mailbox_out) + 2U); mb[3] = readw((void const volatile *)(& reg->mailbox_out) + 3U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_66483; case 19U: qla24xx_process_response_queue(vha, rsp); goto ldv_66483; default: ql_dbg(33554432U, vha, 20564, "Unrecognized interrupt type (%d).\n", stat & 255U); goto ldv_66483; } ldv_66483: ; } else { } writel(0U, (void volatile *)(& reg->host_int)); ldv_66488: tmp___2 = iter; iter = iter - 1UL; if (tmp___2 != 0UL) { goto ldv_66487; } else { } qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); if (*((unsigned long *)ha + 2UL) == 0UL) { qla82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_mask_reg, 64511U); } else { } return (1); } } irqreturn_t qla82xx_msix_default(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_82xx *reg ; int status ; unsigned long flags ; uint32_t stat ; uint32_t host_int ; uint16_t mb[4U] ; raw_spinlock_t *tmp ; void *tmp___0 ; bool tmp___1 ; { status = 0; stat = 0U; host_int = 0U; rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { printk("\016%s(): NULL response queue pointer.\n", "qla82xx_msix_default"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp82; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; host_int = readl((void const volatile *)(& reg->host_int)); tmp___1 = qla2x00_check_reg32_for_disconnect(vha, host_int); if ((int )tmp___1) { goto ldv_66507; } else { } if (host_int != 0U) { stat = readl((void const volatile *)(& reg->host_status)); switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla82xx_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; goto ldv_66512; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((void const volatile *)(& reg->mailbox_out) + 1U); mb[2] = readw((void const volatile *)(& reg->mailbox_out) + 2U); mb[3] = readw((void const volatile *)(& reg->mailbox_out) + 3U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_66512; case 19U: qla24xx_process_response_queue(vha, rsp); goto ldv_66512; default: ql_dbg(33554432U, vha, 20545, "Unrecognized interrupt type (%d).\n", stat & 255U); goto ldv_66512; } ldv_66512: ; } else { } writel(0U, (void volatile *)(& reg->host_int)); ldv_66507: qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } irqreturn_t qla82xx_msix_rsp_q(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_82xx *reg ; unsigned long flags ; uint32_t host_int ; raw_spinlock_t *tmp ; void *tmp___0 ; bool tmp___1 ; { host_int = 0U; rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { printk("\016%s(): NULL response queue pointer.\n", "qla82xx_msix_rsp_q"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp82; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; host_int = readl((void const volatile *)(& reg->host_int)); tmp___1 = qla2x00_check_reg32_for_disconnect(vha, host_int); if ((int )tmp___1) { goto out; } else { } qla24xx_process_response_queue(vha, rsp); writel(0U, (void volatile *)(& reg->host_int)); out: spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } void qla82xx_poll(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_82xx *reg ; int status ; uint32_t stat ; uint32_t host_int ; uint16_t mb[4U] ; unsigned long flags ; raw_spinlock_t *tmp ; void *tmp___0 ; bool tmp___1 ; { status = 0; host_int = 0U; rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { printk("\016%s(): NULL response queue pointer.\n", "qla82xx_poll"); return; } else { } ha = rsp->hw; reg = & (ha->iobase)->isp82; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; host_int = readl((void const volatile *)(& reg->host_int)); tmp___1 = qla2x00_check_reg32_for_disconnect(vha, host_int); if ((int )tmp___1) { goto out; } else { } if (host_int != 0U) { stat = readl((void const volatile *)(& reg->host_status)); switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla82xx_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; goto ldv_66553; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((void const volatile *)(& reg->mailbox_out) + 1U); mb[2] = readw((void const volatile *)(& reg->mailbox_out) + 2U); mb[3] = readw((void const volatile *)(& reg->mailbox_out) + 3U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_66553; case 19U: qla24xx_process_response_queue(vha, rsp); goto ldv_66553; default: ql_dbg(524288U, vha, 45075, "Unrecognized interrupt type (%d).\n", stat * 255U); goto ldv_66553; } ldv_66553: writel(0U, (void volatile *)(& reg->host_int)); } else { } out: spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } void qla82xx_enable_intrs(struct qla_hw_data *ha ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; qla82xx_mbx_intr_enable(vha); spin_lock_irq(& ha->hardware_lock); if ((ha->device_type & 262144U) != 0U) { qla8044_wr_reg(ha, 14536UL, 0U); } else { qla82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_mask_reg, 64511U); } spin_unlock_irq(& ha->hardware_lock); ha->interrupts_on = 1U; return; } } void qla82xx_disable_intrs(struct qla_hw_data *ha ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; qla82xx_mbx_intr_disable(vha); spin_lock_irq(& ha->hardware_lock); if ((ha->device_type & 262144U) != 0U) { qla8044_wr_reg(ha, 14536UL, 1U); } else { qla82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_mask_reg, 1024U); } spin_unlock_irq(& ha->hardware_lock); ha->interrupts_on = 0U; return; } } void qla82xx_init_flags(struct qla_hw_data *ha ) { struct qla82xx_legacy_intr_set *nx_legacy_intr ; struct lock_class_key __key ; { __rwlock_init(& ha->hw_lock, "&ha->hw_lock", & __key); ha->qdr_sn_window = -1; ha->ddr_mn_window = 4294967295U; ha->curr_window = 255U; ha->portnum = (unsigned int )((uint16_t )(ha->pdev)->devfn) & 7U; nx_legacy_intr = (struct qla82xx_legacy_intr_set *)(& legacy_intr) + (unsigned long )ha->portnum; ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg; ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; return; } } __inline void qla82xx_set_idc_version(scsi_qla_host_t *vha ) { int idc_ver ; uint32_t drv_active ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp; if ((uint32_t )(1 << (int )ha->portnum * 4) == drv_active) { qla82xx_wr_32(ha, 136323444UL, 1U); ql_log(2U, vha, 45186, "IDC version updated to %d\n", 1); } else { idc_ver = qla82xx_rd_32(ha, 136323444UL); if (idc_ver != 1) { ql_log(2U, vha, 45187, "qla2xxx driver IDC version %d is not compatible with IDC version %d of the other drivers\n", 1, idc_ver); } else { } } return; } } void qla82xx_set_drv_active(scsi_qla_host_t *vha ) { uint32_t drv_active ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = vha->hw; tmp = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp; if (drv_active == 4294967295U) { qla82xx_wr_32(ha, 136323384UL, 0U); tmp___0 = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp___0; } else { } drv_active = (uint32_t )(1 << (int )ha->portnum * 4) | drv_active; qla82xx_wr_32(ha, 136323384UL, drv_active); return; } } void qla82xx_clear_drv_active(struct qla_hw_data *ha ) { uint32_t drv_active ; int tmp ; { tmp = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp; drv_active = (uint32_t )(~ (1 << (int )ha->portnum * 4)) & drv_active; qla82xx_wr_32(ha, 136323384UL, drv_active); return; } } __inline static int qla82xx_need_reset(struct qla_hw_data *ha ) { uint32_t drv_state ; int rval ; int tmp ; { if (*((unsigned long *)ha + 2UL) != 0UL) { return (1); } else { tmp = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp; rval = (int )((uint32_t )(1 << (int )ha->portnum * 4) & drv_state); return (rval); } } } __inline static void qla82xx_set_rst_ready(struct qla_hw_data *ha ) { uint32_t drv_state ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; int tmp___1 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp___0; if (drv_state == 4294967295U) { qla82xx_wr_32(ha, 136323396UL, 0U); tmp___1 = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp___1; } else { } drv_state = (uint32_t )(1 << (int )ha->portnum * 4) | drv_state; ql_dbg(1073741824U, vha, 187, "drv_state = 0x%08x.\n", drv_state); qla82xx_wr_32(ha, 136323396UL, drv_state); return; } } __inline static void qla82xx_clear_rst_ready(struct qla_hw_data *ha ) { uint32_t drv_state ; int tmp ; { tmp = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp; drv_state = (uint32_t )(~ (1 << (int )ha->portnum * 4)) & drv_state; qla82xx_wr_32(ha, 136323396UL, drv_state); return; } } __inline static void qla82xx_set_qsnt_ready(struct qla_hw_data *ha ) { uint32_t qsnt_state ; int tmp ; { tmp = qla82xx_rd_32(ha, 136323396UL); qsnt_state = (uint32_t )tmp; qsnt_state = (uint32_t )(2 << (int )ha->portnum * 4) | qsnt_state; qla82xx_wr_32(ha, 136323396UL, qsnt_state); return; } } void qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t qsnt_state ; int tmp ; { ha = vha->hw; tmp = qla82xx_rd_32(ha, 136323396UL); qsnt_state = (uint32_t )tmp; qsnt_state = (uint32_t )(~ (2 << (int )ha->portnum * 4)) & qsnt_state; qla82xx_wr_32(ha, 136323396UL, qsnt_state); return; } } static int qla82xx_load_fw(scsi_qla_host_t *vha ) { int rst ; struct fw_blob *blob ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; struct fw_blob *tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { ha = vha->hw; tmp = qla82xx_pinit_from_rom(vha); if (tmp != 0) { ql_log(0U, vha, 159, "Error during CRB initialization.\n"); return (258); } else { } __const_udelay(2147500UL); rst = qla82xx_rd_32(ha, 154140680UL); rst = rst & -285212673; qla82xx_wr_32(ha, 154140680UL, (u32 )rst); if (ql2xfwloadbin == 2) { goto try_blob_fw; } else { } ql_log(2U, vha, 160, "Attempting to load firmware from flash.\n"); tmp___0 = qla82xx_fw_load_from_flash(ha); if (tmp___0 == 0) { ql_log(2U, vha, 161, "Firmware loaded successfully from flash.\n"); return (0); } else { ql_log(1U, vha, 264, "Firmware load from flash failed.\n"); } try_blob_fw: ql_log(2U, vha, 162, "Attempting to load firmware from blob.\n"); tmp___1 = qla2x00_request_firmware(vha); ha->hablob = tmp___1; blob = tmp___1; if ((unsigned long )blob == (unsigned long )((struct fw_blob *)0)) { ql_log(0U, vha, 163, "Firmware image not present.\n"); goto fw_load_failed; } else { } tmp___3 = qla82xx_validate_firmware_blob(vha, 4); if (tmp___3 != 0) { tmp___2 = qla82xx_validate_firmware_blob(vha, 3); if (tmp___2 != 0) { ql_log(0U, vha, 164, "No valid firmware image found.\n"); return (258); } else { } } else { } tmp___4 = qla82xx_fw_load_from_blob(ha); if (tmp___4 == 0) { ql_log(2U, vha, 165, "Firmware loaded successfully from binary blob.\n"); return (0); } else { ql_log(0U, vha, 166, "Firmware load failed for binary blob.\n"); blob->fw = (struct firmware const *)0; blob = (struct fw_blob *)0; goto fw_load_failed; } return (0); fw_load_failed: ; return (258); } } int qla82xx_start_firmware(scsi_qla_host_t *vha ) { uint16_t lnk ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; { ha = vha->hw; qla82xx_wr_32(ha, 136323788UL, 1431655765U); qla82xx_wr_32(ha, 136323664UL, 0U); qla82xx_wr_32(ha, 136323900UL, 0U); qla82xx_wr_32(ha, 136323240UL, 0U); qla82xx_wr_32(ha, 136323244UL, 0U); tmp = qla82xx_load_fw(vha); if (tmp != 0) { ql_log(0U, vha, 167, "Error trying to start fw.\n"); return (258); } else { } tmp___0 = qla82xx_check_cmdpeg_state(ha); if (tmp___0 != 0) { ql_log(0U, vha, 170, "Error during card handshake.\n"); return (258); } else { } pcie_capability_read_word(ha->pdev, 18, & lnk); ha->link_width = ((int )lnk >> 4) & 63; tmp___1 = qla82xx_check_rcvpeg_state(ha); return (tmp___1); } } static uint32_t *qla82xx_read_flash_data(scsi_qla_host_t *vha , uint32_t *dwptr , uint32_t faddr , uint32_t length ) { uint32_t i ; uint32_t val ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; i = 0U; goto ldv_66632; ldv_66631: tmp = qla82xx_rom_fast_read(ha, (int )faddr, (int *)(& val)); if (tmp != 0) { ql_log(1U, vha, 262, "Do ROM fast read failed.\n"); goto done_read; } else { } *(dwptr + (unsigned long )i) = val; i = i + 1U; faddr = faddr + 4U; ldv_66632: ; if (length / 4U > i) { goto ldv_66631; } else { } done_read: ; return (dwptr); } } static int qla82xx_unprotect_flash(struct qla_hw_data *ha ) { int ret ; uint32_t val ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(1U, vha, 45076, "ROM Lock failed.\n"); return (ret); } else { } ret = qla82xx_read_status_reg(ha, & val); if (ret < 0) { goto done_unprotect; } else { } val = val & 4294967235U; ret = qla82xx_write_status_reg(ha, val); if (ret < 0) { val = val | 60U; qla82xx_write_status_reg(ha, val); } else { } tmp___0 = qla82xx_write_disable_flash(ha); if (tmp___0 != 0) { ql_log(1U, vha, 45077, "Write disable failed.\n"); } else { } done_unprotect: qla82xx_rom_unlock(ha); return (ret); } } static int qla82xx_protect_flash(struct qla_hw_data *ha ) { int ret ; uint32_t val ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(1U, vha, 45078, "ROM Lock failed.\n"); return (ret); } else { } ret = qla82xx_read_status_reg(ha, & val); if (ret < 0) { goto done_protect; } else { } val = val | 60U; ret = qla82xx_write_status_reg(ha, val); if (ret < 0) { ql_log(1U, vha, 45079, "Write status register failed.\n"); } else { } tmp___0 = qla82xx_write_disable_flash(ha); if (tmp___0 != 0) { ql_log(1U, vha, 45080, "Write disable failed.\n"); } else { } done_protect: qla82xx_rom_unlock(ha); return (ret); } } static int qla82xx_erase_sector(struct qla_hw_data *ha , int addr ) { int ret ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { ret = 0; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(1U, vha, 45081, "ROM Lock failed.\n"); return (ret); } else { } qla82xx_flash_set_write_enable(ha); qla82xx_wr_32(ha, 154206216UL, (u32 )addr); qla82xx_wr_32(ha, 154206224UL, 3U); qla82xx_wr_32(ha, 154206212UL, 216U); tmp___0 = qla82xx_wait_rom_done(ha); if (tmp___0 != 0) { ql_log(1U, vha, 45082, "Error waiting for rom done.\n"); ret = -1; goto done; } else { } ret = qla82xx_flash_wait_write_finish(ha); done: qla82xx_rom_unlock(ha); return (ret); } } uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { { scsi_block_requests(vha->host); qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length); scsi_unblock_requests(vha->host); return (buf); } } static int qla82xx_write_flash_data(struct scsi_qla_host *vha , uint32_t *dwptr , uint32_t faddr , uint32_t dwords ) { int ret ; uint32_t liter ; uint32_t sec_mask ; uint32_t rest_addr ; dma_addr_t optrom_dma ; void *optrom ; int page_mode ; struct qla_hw_data *ha ; { optrom = (void *)0; page_mode = 0; ha = vha->hw; ret = -1; if ((page_mode != 0 && (faddr & 4095U) == 0U) && dwords > 1024U) { optrom = dma_alloc_attrs(& (ha->pdev)->dev, 4096UL, & optrom_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )optrom == (unsigned long )((void *)0)) { ql_log(1U, vha, 45083, "Unable to allocate memory for optrom burst write (%x KB).\n", 4); } else { } } else { } rest_addr = ha->fdt_block_size - 1U; sec_mask = ~ rest_addr; ret = qla82xx_unprotect_flash(ha); if (ret != 0) { ql_log(1U, vha, 45084, "Unable to unprotect flash for update.\n"); goto write_done; } else { } liter = 0U; goto ldv_66679; ldv_66678: ; if ((faddr & rest_addr) == 0U) { ret = qla82xx_erase_sector(ha, (int )faddr); if (ret != 0) { ql_log(1U, vha, 45085, "Unable to erase sector: address=%x.\n", faddr); goto ldv_66676; } else { } } else { } if ((unsigned long )optrom != (unsigned long )((void *)0) && liter + 1024U <= dwords) { memcpy(optrom, (void const *)dwptr, 4096UL); ret = qla2x00_load_ram(vha, optrom_dma, ha->flash_data_off | faddr, 1024U); if (ret != 0) { ql_log(1U, vha, 45086, "Unable to burst-write optrom segment (%x/%x/%llx).\n", ret, ha->flash_data_off | faddr, optrom_dma); ql_log(1U, vha, 45087, "Reverting to slow-write.\n"); dma_free_attrs(& (ha->pdev)->dev, 4096UL, optrom, optrom_dma, (struct dma_attrs *)0); optrom = (void *)0; } else { liter = liter + 1023U; faddr = faddr + 1023U; dwptr = dwptr + 1023UL; goto ldv_66677; } } else { } ret = qla82xx_write_flash_dword(ha, faddr, *dwptr); if (ret != 0) { ql_dbg(524288U, vha, 45088, "Unable to program flash address=%x data=%x.\n", faddr, *dwptr); goto ldv_66676; } else { } ldv_66677: liter = liter + 1U; faddr = faddr + 4U; dwptr = dwptr + 1; ldv_66679: ; if (liter < dwords) { goto ldv_66678; } else { } ldv_66676: ret = qla82xx_protect_flash(ha); if (ret != 0) { ql_log(1U, vha, 45089, "Unable to protect flash after update.\n"); } else { } write_done: ; if ((unsigned long )optrom != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 4096UL, optrom, optrom_dma, (struct dma_attrs *)0); } else { } return (ret); } } int qla82xx_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { int rval ; { scsi_block_requests(vha->host); rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset, length >> 2); scsi_unblock_requests(vha->host); if (rval != 0) { rval = 258; } else { rval = 0; } return (rval); } } void qla82xx_start_iocbs(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct req_que *req ; struct device_reg_82xx *reg ; uint32_t dbval ; unsigned int tmp ; { ha = vha->hw; req = *(ha->req_q_map); req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } reg = & (ha->iobase)->isp82; dbval = (uint32_t )(((int )ha->portnum << 5) | 4); dbval = ((uint32_t )((int )req->id << 8) | dbval) | (uint32_t )((int )req->ring_index << 16); if (ql2xdbwr != 0) { qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); } else { writel(dbval, (void volatile *)ha->nxdb_wr_ptr); __asm__ volatile ("sfence": : : "memory"); goto ldv_66695; ldv_66694: writel(dbval, (void volatile *)ha->nxdb_wr_ptr); __asm__ volatile ("sfence": : : "memory"); ldv_66695: tmp = readl((void const volatile *)ha->nxdb_rd_ptr); if (tmp != dbval) { goto ldv_66694; } else { } } return; } } static void qla82xx_rom_lock_recovery(struct qla_hw_data *ha ) { scsi_qla_host_t *vha ; void *tmp ; uint32_t lock_owner ; int tmp___0 ; int tmp___1 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; lock_owner = 0U; tmp___1 = qla82xx_rom_lock(ha); if (tmp___1 != 0) { tmp___0 = qla82xx_rd_32(ha, 136323328UL); lock_owner = (uint32_t )tmp___0; ql_log(2U, vha, 45090, "Resetting rom_lock, Lock Owner %u.\n", lock_owner); } else { } qla82xx_rom_unlock(ha); return; } } static int qla82xx_device_bootstrap(scsi_qla_host_t *vha ) { int rval ; int i ; uint32_t old_count ; uint32_t count ; struct qla_hw_data *ha ; int need_reset ; int tmp ; int tmp___0 ; { rval = 0; ha = vha->hw; need_reset = 0; need_reset = qla82xx_need_reset(ha); if (need_reset != 0) { if (*((unsigned long *)ha + 2UL) != 0UL) { qla82xx_rom_lock_recovery(ha); } else { } } else { tmp = qla82xx_rd_32(ha, 136323248UL); old_count = (uint32_t )tmp; i = 0; goto ldv_66713; ldv_66712: msleep(200U); tmp___0 = qla82xx_rd_32(ha, 136323248UL); count = (uint32_t )tmp___0; if (count != old_count) { rval = 0; goto dev_ready; } else { } i = i + 1; ldv_66713: ; if (i <= 9) { goto ldv_66712; } else { } qla82xx_rom_lock_recovery(ha); } ql_log(2U, vha, 158, "HW State: INITIALIZING.\n"); qla82xx_wr_32(ha, 136323392UL, 2U); qla82xx_idc_unlock(ha); rval = qla82xx_start_firmware(vha); qla82xx_idc_lock(ha); if (rval != 0) { ql_log(0U, vha, 173, "HW State: FAILED.\n"); qla82xx_clear_drv_active(ha); qla82xx_wr_32(ha, 136323392UL, 6U); return (rval); } else { } dev_ready: ql_log(2U, vha, 174, "HW State: READY.\n"); qla82xx_wr_32(ha, 136323392UL, 3U); return (0); } } static void qla82xx_need_qsnt_handler(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t dev_state ; uint32_t drv_state ; uint32_t drv_active ; unsigned long reset_timeout ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { ha = vha->hw; if (*((unsigned long *)vha + 19UL) != 0UL) { qla2x00_quiesce_io(vha); } else { } qla82xx_set_qsnt_ready(ha); reset_timeout = (unsigned long )jiffies + 7500UL; tmp = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp; tmp___0 = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp___0; drv_active = drv_active << 1; goto ldv_66730; ldv_66729: ; if ((long )((unsigned long )jiffies - reset_timeout) >= 0L) { ql_log(2U, vha, 45091, "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d DRV_STATE:%d.\n", (char *)"qla2xxx", drv_active, drv_state); qla82xx_wr_32(ha, 136323392UL, 3U); ql_log(2U, vha, 45093, "HW State: DEV_READY.\n"); qla82xx_idc_unlock(ha); qla2x00_perform_loop_resync(vha); qla82xx_idc_lock(ha); qla82xx_clear_qsnt_ready(vha); return; } else { } qla82xx_idc_unlock(ha); msleep(1000U); qla82xx_idc_lock(ha); tmp___1 = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp___1; tmp___2 = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp___2; drv_active = drv_active << 1; ldv_66730: ; if (drv_state != drv_active) { goto ldv_66729; } else { } tmp___3 = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp___3; if (dev_state == 5U) { ql_log(2U, vha, 45094, "HW State: DEV_QUIESCENT.\n"); qla82xx_wr_32(ha, 136323392UL, 7U); } else { } return; } } uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *vha , uint32_t curr_state ) { struct qla_hw_data *ha ; uint32_t dev_state ; int tmp ; { ha = vha->hw; ldv_66738: msleep(1000U); qla82xx_idc_lock(ha); tmp = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp; qla82xx_idc_unlock(ha); if (dev_state == curr_state) { goto ldv_66738; } else { } return (dev_state); } } void qla8xxx_dev_failed_handler(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; ql_log(0U, vha, 184, "Disabling the board.\n"); if ((ha->device_type & 16384U) != 0U) { qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_clear_drv_active(ha); qla8044_idc_unlock(ha); } else { } vha->device_flags = vha->device_flags | 32U; qla2x00_abort_all_cmds(vha, 65536); qla2x00_mark_all_devices_lost(vha, 0); vha->flags.online = 0U; vha->flags.init_done = 0U; return; } } static void qla82xx_need_reset_handler(scsi_qla_host_t *vha ) { uint32_t dev_state ; uint32_t drv_state ; uint32_t drv_active ; uint32_t active_mask ; unsigned long reset_timeout ; struct qla_hw_data *ha ; struct req_que *req ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; char *tmp___6 ; char *tmp___7 ; int tmp___8 ; { active_mask = 0U; ha = vha->hw; req = *(ha->req_q_map); if (*((unsigned long *)vha + 19UL) != 0UL) { qla82xx_idc_unlock(ha); qla2x00_abort_isp_cleanup(vha); (*((ha->isp_ops)->get_flash_version))(vha, (void *)req->ring); (*((ha->isp_ops)->nvram_config))(vha); qla82xx_idc_lock(ha); } else { } tmp = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp; if (*((unsigned long *)ha + 2UL) == 0UL) { ql_dbg(524288U, vha, 45096, "reset_acknowledged by 0x%x\n", (int )ha->portnum); qla82xx_set_rst_ready(ha); } else { active_mask = (uint32_t )(~ (1 << (int )ha->portnum * 4)); drv_active = drv_active & active_mask; ql_dbg(524288U, vha, 45097, "active_mask: 0x%08x\n", active_mask); } reset_timeout = (unsigned long )(ha->fcoe_reset_timeout * 250U) + (unsigned long )jiffies; tmp___0 = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp___0; tmp___1 = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp___1; tmp___2 = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp___2; ql_dbg(524288U, vha, 45098, "drv_state: 0x%08x, drv_active: 0x%08x, dev_state: 0x%08x, active_mask: 0x%08x\n", drv_state, drv_active, dev_state, active_mask); goto ldv_66762; ldv_66761: ; if ((long )((unsigned long )jiffies - reset_timeout) >= 0L) { ql_log(1U, vha, 181, "Reset timeout.\n"); goto ldv_66760; } else { } qla82xx_idc_unlock(ha); msleep(1000U); qla82xx_idc_lock(ha); tmp___3 = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp___3; tmp___4 = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp___4; if (*((unsigned long *)ha + 2UL) != 0UL) { drv_active = drv_active & active_mask; } else { } tmp___5 = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp___5; ldv_66762: ; if (drv_state != drv_active && dev_state != 2U) { goto ldv_66761; } else { } ldv_66760: ql_dbg(524288U, vha, 45099, "drv_state: 0x%08x, drv_active: 0x%08x, dev_state: 0x%08x, active_mask: 0x%08x\n", drv_state, drv_active, dev_state, active_mask); if (dev_state <= 7U) { tmp___6 = qdev_state(dev_state); tmp___7 = tmp___6; } else { tmp___7 = (char *)"Unknown"; } ql_log(2U, vha, 182, "Device state is 0x%x = %s.\n", dev_state, tmp___7); if (dev_state != 2U && dev_state != 1U) { ql_log(2U, vha, 183, "HW State: COLD/RE-INIT.\n"); qla82xx_wr_32(ha, 136323392UL, 1U); qla82xx_set_rst_ready(ha); if (ql2xmdenable != 0) { tmp___8 = qla82xx_md_collect(vha); if (tmp___8 != 0) { ql_log(1U, vha, 45100, "Minidump not collected.\n"); } else { } } else { ql_log(1U, vha, 45135, "Minidump disabled.\n"); } } else { } return; } } int qla82xx_check_md_needed(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint16_t fw_major_version ; uint16_t fw_minor_version ; uint16_t fw_subminor_version ; int rval ; { ha = vha->hw; rval = 0; fw_major_version = ha->fw_major_version; fw_minor_version = ha->fw_minor_version; fw_subminor_version = ha->fw_subminor_version; rval = qla2x00_get_fw_version(vha); if (rval != 0) { return (rval); } else { } if (ql2xmdenable != 0) { if (ha->fw_dumped == 0) { if ((((int )ha->fw_major_version != (int )fw_major_version || (int )ha->fw_minor_version != (int )fw_minor_version) || (int )ha->fw_subminor_version != (int )fw_subminor_version) || ha->prev_minidump_failed != 0) { ql_dbg(524288U, vha, 45101, "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n", (int )fw_major_version, (int )fw_minor_version, (int )fw_subminor_version, (int )ha->fw_major_version, (int )ha->fw_minor_version, (int )ha->fw_subminor_version, ha->prev_minidump_failed); qla82xx_md_free(vha); qla82xx_md_prep(vha); } else { } } else { ql_log(2U, vha, 45102, "Firmware dump available to retrieve\n"); } } else { } return (rval); } } static int qla82xx_check_fw_alive(scsi_qla_host_t *vha ) { uint32_t fw_heartbeat_counter ; int status ; int tmp ; { status = 0; tmp = qla82xx_rd_32(vha->hw, 136323248UL); fw_heartbeat_counter = (uint32_t )tmp; if (fw_heartbeat_counter == 4294967295U) { ql_dbg(16777216U, vha, 24579, "FW heartbeat counter is 0xffffffff, returning status=%d.\n", status); return (status); } else { } if ((uint32_t )vha->fw_heartbeat_counter == fw_heartbeat_counter) { vha->seconds_since_last_heartbeat = vha->seconds_since_last_heartbeat + 1; if (vha->seconds_since_last_heartbeat == 2) { vha->seconds_since_last_heartbeat = 0; status = 1; } else { } } else { vha->seconds_since_last_heartbeat = 0; } vha->fw_heartbeat_counter = (int )fw_heartbeat_counter; if (status != 0) { ql_dbg(16777216U, vha, 24580, "Returning status=%d.\n", status); } else { } return (status); } } int qla82xx_device_state_handler(scsi_qla_host_t *vha ) { uint32_t dev_state ; uint32_t old_dev_state ; int rval ; unsigned long dev_init_timeout ; struct qla_hw_data *ha ; int loopcount ; int tmp ; char *tmp___0 ; char *tmp___1 ; int tmp___2 ; char *tmp___3 ; char *tmp___4 ; { rval = 0; ha = vha->hw; loopcount = 0; qla82xx_idc_lock(ha); if (*((unsigned long *)vha + 19UL) == 0UL) { qla82xx_set_drv_active(vha); qla82xx_set_idc_version(vha); } else { } tmp = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp; old_dev_state = dev_state; if (dev_state <= 7U) { tmp___0 = qdev_state(dev_state); tmp___1 = tmp___0; } else { tmp___1 = (char *)"Unknown"; } ql_log(2U, vha, 155, "Device state is 0x%x = %s.\n", dev_state, tmp___1); dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; ldv_66803: ; if ((long )((unsigned long )jiffies - dev_init_timeout) >= 0L) { ql_log(0U, vha, 156, "Device init failed.\n"); rval = 258; goto ldv_66791; } else { } tmp___2 = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp___2; if (old_dev_state != dev_state) { loopcount = 0; old_dev_state = dev_state; } else { } if (loopcount <= 4) { if (dev_state <= 7U) { tmp___3 = qdev_state(dev_state); tmp___4 = tmp___3; } else { tmp___4 = (char *)"Unknown"; } ql_log(2U, vha, 157, "Device state is 0x%x = %s.\n", dev_state, tmp___4); } else { } switch (dev_state) { case 3U: ha->flags.nic_core_reset_owner = 0U; goto rel_lock; case 1U: rval = qla82xx_device_bootstrap(vha); goto ldv_66795; case 2U: qla82xx_idc_unlock(ha); msleep(1000U); qla82xx_idc_lock(ha); goto ldv_66795; case 4U: ; if (ql2xdontresethba == 0) { qla82xx_need_reset_handler(vha); } else { qla82xx_idc_unlock(ha); msleep(1000U); qla82xx_idc_lock(ha); } dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; goto ldv_66795; case 5U: qla82xx_need_qsnt_handler(vha); dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; goto ldv_66795; case 7U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { goto rel_lock; } else { } qla82xx_idc_unlock(ha); msleep(1000U); qla82xx_idc_lock(ha); dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; goto ldv_66795; case 6U: qla8xxx_dev_failed_handler(vha); rval = 258; goto exit; default: qla82xx_idc_unlock(ha); msleep(1000U); qla82xx_idc_lock(ha); } ldv_66795: loopcount = loopcount + 1; goto ldv_66803; ldv_66791: ; rel_lock: qla82xx_idc_unlock(ha); exit: ; return (rval); } } static int qla82xx_check_temp(scsi_qla_host_t *vha ) { uint32_t temp ; uint32_t temp_state ; uint32_t temp_val ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla82xx_rd_32(ha, 136324020UL); temp = (uint32_t )tmp; temp_state = temp & 65535U; temp_val = temp >> 16; if (temp_state == 3U) { ql_log(1U, vha, 24590, "Device temperature %d degrees C exceeds maximum allowed. Hardware has been shut down.\n", temp_val); return (1); } else if (temp_state == 2U) { ql_log(1U, vha, 24591, "Device temperature %d degrees C exceeds operating range. Immediate action needed.\n", temp_val); } else { } return (0); } } int qla82xx_read_temperature(scsi_qla_host_t *vha ) { uint32_t temp ; int tmp ; { tmp = qla82xx_rd_32(vha->hw, 136324020UL); temp = (uint32_t )tmp; return ((int )(temp >> 16)); } } void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; if (*((unsigned long *)ha + 2UL) != 0UL) { ha->flags.mbox_int = 1U; ha->flags.mbox_busy = 0U; ql_log(1U, vha, 24592, "Doing premature completion of mbx command.\n"); tmp = test_and_clear_bit(2L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (tmp != 0) { complete(& ha->mbx_intr_comp); } else { } } else { } return; } } void qla82xx_watchdog(scsi_qla_host_t *vha ) { uint32_t dev_state ; uint32_t halt_status ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; { ha = vha->hw; if (*((unsigned long *)ha + 2UL) == 0UL) { tmp = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp; tmp___11 = qla82xx_check_temp(vha); if (tmp___11 != 0) { set_bit(17L, (unsigned long volatile *)(& vha->dpc_flags)); ha->flags.isp82xx_fw_hung = 1U; qla82xx_clear_pending_mbx(vha); } else if (dev_state == 4U) { tmp___10 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___10 == 0) { ql_log(1U, vha, 24577, "Adapter reset needed.\n"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { goto _L___2; } } else _L___2: /* CIL Label */ if (dev_state == 5U) { tmp___9 = constant_test_bit(20L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___9 == 0) { ql_log(1U, vha, 24578, "Quiescent needed.\n"); set_bit(20L, (unsigned long volatile *)(& vha->dpc_flags)); } else { goto _L___1; } } else _L___1: /* CIL Label */ if (dev_state == 6U) { tmp___8 = constant_test_bit(17L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___8 == 0) { if (*((unsigned long *)vha + 19UL) != 0UL) { ql_log(1U, vha, 45141, "Adapter state is failed. Offlining.\n"); set_bit(17L, (unsigned long volatile *)(& vha->dpc_flags)); ha->flags.isp82xx_fw_hung = 1U; qla82xx_clear_pending_mbx(vha); } else { goto _L___0; } } else { goto _L___0; } } else { _L___0: /* CIL Label */ tmp___7 = qla82xx_check_fw_alive(vha); if (tmp___7 != 0) { ql_dbg(16777216U, vha, 24593, "disabling pause transmit on port 0 & 1.\n"); qla82xx_wr_32(ha, 106954904UL, 9U); tmp___0 = qla82xx_rd_32(ha, 136323240UL); halt_status = (uint32_t )tmp___0; tmp___1 = qla82xx_rd_32(ha, 116391996UL); tmp___2 = qla82xx_rd_32(ha, 121634876UL); tmp___3 = qla82xx_rd_32(ha, 120586300UL); tmp___4 = qla82xx_rd_32(ha, 119537724UL); tmp___5 = qla82xx_rd_32(ha, 118489148UL); tmp___6 = qla82xx_rd_32(ha, 136323244UL); ql_log(2U, vha, 24581, "dumping hw/fw registers:.\n PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n PEG_NET_4_PC: 0x%x.\n", halt_status, tmp___6, tmp___5, tmp___4, tmp___3, tmp___2, tmp___1); if ((halt_status & 536870656U) >> 8 == 103U) { ql_log(1U, vha, 45138, "Firmware aborted with error code 0x00006700. Device is being reset.\n"); } else { } if ((int )halt_status < 0) { set_bit(17L, (unsigned long volatile *)(& vha->dpc_flags)); } else { ql_log(2U, vha, 24582, "Detect abort needed.\n"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } ha->flags.isp82xx_fw_hung = 1U; ql_log(1U, vha, 24583, "Firmware hung.\n"); qla82xx_clear_pending_mbx(vha); } else { } } } else { } return; } } int qla82xx_load_risc(scsi_qla_host_t *vha , uint32_t *srisc_addr ) { int rval ; struct qla_hw_data *ha ; { rval = -1; ha = vha->hw; if ((ha->device_type & 16384U) != 0U) { rval = qla82xx_device_state_handler(vha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); qla83xx_reset_ownership(vha); qla8044_idc_unlock(ha); rval = qla8044_device_state_handler(vha); } else { } return (rval); } } void qla82xx_set_reset_owner(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t dev_state ; int tmp ; int tmp___0 ; char *tmp___1 ; char *tmp___2 ; { ha = vha->hw; dev_state = 0U; if ((ha->device_type & 16384U) != 0U) { tmp = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp; } else if ((ha->device_type & 262144U) != 0U) { tmp___0 = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp___0; } else { } if (dev_state == 3U) { ql_log(2U, vha, 45103, "HW State: NEED RESET\n"); if ((ha->device_type & 16384U) != 0U) { qla82xx_wr_32(ha, 136323392UL, 4U); ha->flags.nic_core_reset_owner = 1U; ql_dbg(524288U, vha, 45104, "reset_owner is 0x%x\n", (int )ha->portnum); } else if ((ha->device_type & 262144U) != 0U) { qla8044_wr_direct(vha, 4U, 4U); } else { } } else { if (dev_state <= 7U) { tmp___1 = qdev_state(dev_state); tmp___2 = tmp___1; } else { tmp___2 = (char *)"Unknown"; } ql_log(2U, vha, 45105, "Device state is 0x%x = %s.\n", dev_state, tmp___2); } return; } } int qla82xx_abort_isp(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; int tmp ; { rval = -1; ha = vha->hw; if ((vha->device_flags & 32U) != 0U) { ql_log(1U, vha, 32804, "Device in failed state, exiting.\n"); return (0); } else { } ha->flags.nic_core_reset_hdlr_active = 1U; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); if ((ha->device_type & 16384U) != 0U) { rval = qla82xx_device_state_handler(vha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); qla83xx_reset_ownership(vha); qla8044_idc_unlock(ha); rval = qla8044_device_state_handler(vha); } else { } qla82xx_idc_lock(ha); qla82xx_clear_rst_ready(ha); qla82xx_idc_unlock(ha); if (rval == 0) { ha->flags.isp82xx_fw_hung = 0U; ha->flags.nic_core_reset_hdlr_active = 0U; qla82xx_restart_isp(vha); } else { } if (rval != 0) { vha->flags.online = 1U; tmp = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp != 0) { if (ha->isp_abort_cnt == 0U) { ql_log(1U, vha, 32807, "ISP error recover failed - board disabled.\n"); (*((ha->isp_ops)->reset_adapter))(vha); vha->flags.online = 0U; clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); rval = 0; } else { ha->isp_abort_cnt = ha->isp_abort_cnt - 1U; ql_log(1U, vha, 32822, "ISP abort - retry remaining %d.\n", ha->isp_abort_cnt); rval = 258; } } else { ha->isp_abort_cnt = 5U; ql_dbg(4194304U, vha, 32809, "ISP error recovery - retrying (%d) more times.\n", ha->isp_abort_cnt); set_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); rval = 258; } } else { } return (rval); } } int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha ) { int rval ; { rval = 258; if (*((unsigned long *)vha + 19UL) != 0UL) { qla2x00_abort_isp_cleanup(vha); } else { } qla2x00_try_to_stop_firmware(vha); rval = qla82xx_restart_isp(vha); return (rval); } } int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha ) { int status ; unsigned long wait_reset ; struct task_struct *tmp ; long volatile __ret ; struct task_struct *tmp___0 ; struct task_struct *tmp___1 ; struct task_struct *tmp___2 ; struct task_struct *tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; { status = 258; wait_reset = (unsigned long )jiffies + 75000UL; goto ldv_66866; ldv_66865: tmp = get_current(); tmp->task_state_change = 0UL; __ret = 2L; switch (8UL) { case 1UL: tmp___0 = get_current(); __asm__ volatile ("xchgb %b0, %1\n": "+q" (__ret), "+m" (tmp___0->state): : "memory", "cc"); goto ldv_66858; case 2UL: tmp___1 = get_current(); __asm__ volatile ("xchgw %w0, %1\n": "+r" (__ret), "+m" (tmp___1->state): : "memory", "cc"); goto ldv_66858; case 4UL: tmp___2 = get_current(); __asm__ volatile ("xchgl %0, %1\n": "+r" (__ret), "+m" (tmp___2->state): : "memory", "cc"); goto ldv_66858; case 8UL: tmp___3 = get_current(); __asm__ volatile ("xchgq %q0, %1\n": "+r" (__ret), "+m" (tmp___3->state): : "memory", "cc"); goto ldv_66858; default: __xchg_wrong_size(); } ldv_66858: schedule_timeout(250L); tmp___4 = constant_test_bit(18L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___4 == 0) { tmp___5 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___5 == 0) { status = 0; goto ldv_66864; } else { } } else { } ldv_66866: tmp___6 = constant_test_bit(18L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___6 != 0) { goto _L; } else { tmp___7 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___7 != 0) { _L: /* CIL Label */ if ((long )((unsigned long )jiffies - wait_reset) < 0L) { goto ldv_66865; } else { goto ldv_66864; } } else { goto ldv_66864; } } ldv_66864: ql_dbg(524288U, vha, 45095, "%s: status=%d.\n", "qla2x00_wait_for_fcoe_ctx_reset", status); return (status); } } void qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha ) { int i ; int fw_state ; unsigned long flags ; struct qla_hw_data *ha ; int cnt ; int que ; srb_t *sp ; struct req_que *req ; raw_spinlock_t *tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; int tmp___2 ; { fw_state = 0; ha = vha->hw; if (*((unsigned long *)ha + 2UL) == 0UL) { i = 0; goto ldv_66877; ldv_66876: msleep(1000U); if ((ha->device_type & 16384U) != 0U) { fw_state = qla82xx_check_fw_alive(vha); } else if ((ha->device_type & 262144U) != 0U) { fw_state = qla8044_check_fw_alive(vha); } else { } if (fw_state != 0) { ha->flags.isp82xx_fw_hung = 1U; qla82xx_clear_pending_mbx(vha); goto ldv_66875; } else { } i = i + 1; ldv_66877: ; if (i <= 1) { goto ldv_66876; } else { } ldv_66875: ; } else { } ql_dbg(1073741824U, vha, 176, "Entered %s fw_hung=%d.\n", "qla82xx_chip_reset_cleanup", (int )ha->flags.isp82xx_fw_hung); if (*((unsigned long *)ha + 2UL) == 0UL) { tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); que = 0; goto ldv_66894; ldv_66893: req = *(ha->req_q_map + (unsigned long )que); if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { goto ldv_66886; } else { } cnt = 1; goto ldv_66891; ldv_66890: sp = *(req->outstanding_cmds + (unsigned long )cnt); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { if (((unsigned long )sp->u.scmd.ctx == (unsigned long )((void *)0) || ((int )sp->flags & 4096) != 0) && *((unsigned long *)ha + 2UL) == 0UL) { spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___0 = (*((ha->isp_ops)->abort_command))(sp); if (tmp___0 != 0) { ql_log(2U, vha, 177, "mbx abort failed.\n"); } else { ql_log(2U, vha, 178, "mbx abort success.\n"); } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); } else { } } else { } cnt = cnt + 1; ldv_66891: ; if ((int )req->num_outstanding_cmds > cnt) { goto ldv_66890; } else { } ldv_66886: que = que + 1; ldv_66894: ; if ((int )ha->max_req_queues > que) { goto ldv_66893; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___2 = qla2x00_eh_wait_for_pending_commands(vha, 0U, 0ULL, 0); if (tmp___2 != 0) { ql_dbg(1073741824U, vha, 179, "Done wait for pending commands.\n"); } else { } } else { } return; } } static int qla82xx_minidump_process_control(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; struct qla82xx_md_entry_crb *crb_entry ; uint32_t read_value ; uint32_t opcode ; uint32_t poll_time ; uint32_t addr ; uint32_t index ; uint32_t crb_addr ; unsigned long wtime ; struct qla82xx_md_template_hdr *tmplt_hdr ; uint32_t rval ; int i ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { ha = vha->hw; rval = 0U; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr; crb_addr = crb_entry->addr; i = 0; goto ldv_66922; ldv_66921: opcode = (uint32_t )crb_entry->crb_ctrl.opcode; if ((int )opcode & 1) { qla82xx_md_rw_32(ha, crb_addr, crb_entry->value_1, 1); opcode = opcode & 4294967294U; } else { } if ((opcode & 2U) != 0U) { tmp = qla82xx_md_rw_32(ha, crb_addr, 0U, 0); read_value = (uint32_t )tmp; qla82xx_md_rw_32(ha, crb_addr, read_value, 1); opcode = opcode & 4294967293U; } else { } if ((opcode & 4U) != 0U) { tmp___0 = qla82xx_md_rw_32(ha, crb_addr, 0U, 0); read_value = (uint32_t )tmp___0; read_value = crb_entry->value_2 & read_value; opcode = opcode & 4294967291U; if ((opcode & 8U) != 0U) { read_value = crb_entry->value_3 | read_value; opcode = opcode & 4294967287U; } else { } qla82xx_md_rw_32(ha, crb_addr, read_value, 1); } else { } if ((opcode & 8U) != 0U) { tmp___1 = qla82xx_md_rw_32(ha, crb_addr, 0U, 0); read_value = (uint32_t )tmp___1; read_value = crb_entry->value_3 | read_value; qla82xx_md_rw_32(ha, crb_addr, read_value, 1); opcode = opcode & 4294967287U; } else { } if ((opcode & 16U) != 0U) { poll_time = (uint32_t )crb_entry->crb_strd.poll_timeout; wtime = (unsigned long )poll_time + (unsigned long )jiffies; tmp___2 = qla82xx_md_rw_32(ha, crb_addr, 0U, 0); read_value = (uint32_t )tmp___2; ldv_66920: ; if ((crb_entry->value_2 & read_value) == crb_entry->value_1) { goto ldv_66913; } else if ((long )((unsigned long )jiffies - wtime) >= 0L) { rval = 258U; goto ldv_66913; } else { tmp___3 = qla82xx_md_rw_32(ha, crb_addr, 0U, 0); read_value = (uint32_t )tmp___3; } goto ldv_66920; ldv_66913: opcode = opcode & 4294967279U; } else { } if ((opcode & 32U) != 0U) { if ((unsigned int )crb_entry->crb_strd.state_index_a != 0U) { index = (uint32_t )crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } tmp___4 = qla82xx_md_rw_32(ha, addr, 0U, 0); read_value = (uint32_t )tmp___4; index = (uint32_t )crb_entry->crb_ctrl.state_index_v; tmplt_hdr->saved_state_array[index] = read_value; opcode = opcode & 4294967263U; } else { } if ((opcode & 64U) != 0U) { if ((unsigned int )crb_entry->crb_strd.state_index_a != 0U) { index = (uint32_t )crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } if ((unsigned int )crb_entry->crb_ctrl.state_index_v != 0U) { index = (uint32_t )crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; } else { read_value = crb_entry->value_1; } qla82xx_md_rw_32(ha, addr, read_value, 1); opcode = opcode & 4294967231U; } else { } if ((opcode & 128U) != 0U) { index = (uint32_t )crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; read_value = read_value << (int )crb_entry->crb_ctrl.shl; read_value = read_value >> (int )crb_entry->crb_ctrl.shr; if (crb_entry->value_2 != 0U) { read_value = crb_entry->value_2 & read_value; } else { } read_value = crb_entry->value_3 | read_value; read_value = crb_entry->value_1 + read_value; tmplt_hdr->saved_state_array[index] = read_value; opcode = opcode & 4294967167U; } else { } crb_addr = (uint32_t )crb_entry->crb_strd.addr_stride + crb_addr; i = i + 1; ldv_66922: ; if ((uint32_t )i < crb_entry->op_count) { goto ldv_66921; } else { } return ((int )rval); } } static void qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t r_addr ; uint32_t r_stride ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla82xx_md_entry_rdocm *ocm_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; { ha = vha->hw; data_ptr = *d_ptr; ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr; r_addr = ocm_hdr->read_addr; r_stride = ocm_hdr->read_addr_stride; loop_cnt = ocm_hdr->op_count; i = 0U; goto ldv_66938; ldv_66937: r_value = readl((void const volatile *)((unsigned long )r_addr + ha->nx_pcibase)); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; r_addr = r_addr + r_stride; i = i + 1U; ldv_66938: ; if (i < loop_cnt) { goto ldv_66937; } else { } *d_ptr = data_ptr; return; } } static void qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t r_addr ; uint32_t s_stride ; uint32_t s_addr ; uint32_t s_value ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla82xx_md_entry_mux *mux_hdr ; uint32_t *data_ptr ; int tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; { ha = vha->hw; data_ptr = *d_ptr; mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr; r_addr = mux_hdr->read_addr; s_addr = mux_hdr->select_addr; s_stride = mux_hdr->select_value_stride; s_value = mux_hdr->select_value; loop_cnt = mux_hdr->op_count; i = 0U; goto ldv_66956; ldv_66955: qla82xx_md_rw_32(ha, s_addr, s_value, 1); tmp = qla82xx_md_rw_32(ha, r_addr, 0U, 0); r_value = (uint32_t )tmp; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = s_value; tmp___1 = data_ptr; data_ptr = data_ptr + 1; *tmp___1 = r_value; s_value = s_value + s_stride; i = i + 1U; ldv_66956: ; if (i < loop_cnt) { goto ldv_66955; } else { } *d_ptr = data_ptr; return; } } static void qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t r_addr ; uint32_t r_stride ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla82xx_md_entry_crb *crb_hdr ; uint32_t *data_ptr ; int tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; { ha = vha->hw; data_ptr = *d_ptr; crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr; r_addr = crb_hdr->addr; r_stride = (uint32_t )crb_hdr->crb_strd.addr_stride; loop_cnt = crb_hdr->op_count; i = 0U; goto ldv_66972; ldv_66971: tmp = qla82xx_md_rw_32(ha, r_addr, 0U, 0); r_value = (uint32_t )tmp; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_addr; tmp___1 = data_ptr; data_ptr = data_ptr + 1; *tmp___1 = r_value; r_addr = r_addr + r_stride; i = i + 1U; ldv_66972: ; if (i < loop_cnt) { goto ldv_66971; } else { } *d_ptr = data_ptr; return; } } static int qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t addr ; uint32_t r_addr ; uint32_t c_addr ; uint32_t t_r_addr ; uint32_t i ; uint32_t k ; uint32_t loop_count ; uint32_t t_value ; uint32_t r_cnt ; uint32_t r_value ; unsigned long p_wait ; unsigned long w_time ; unsigned long p_mask ; uint32_t c_value_w ; uint32_t c_value_r ; struct qla82xx_md_entry_cache *cache_hdr ; int rval ; uint32_t *data_ptr ; int tmp ; int tmp___0 ; uint32_t *tmp___1 ; { ha = vha->hw; rval = 258; data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = (uint32_t )cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = (uint32_t )cache_hdr->addr_ctrl.init_tag_value; r_cnt = (uint32_t )cache_hdr->read_ctrl.read_addr_cnt; p_wait = (unsigned long )cache_hdr->cache_ctrl.poll_wait; p_mask = (unsigned long )cache_hdr->cache_ctrl.poll_mask; i = 0U; goto ldv_67010; ldv_67009: qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); if (c_value_w != 0U) { qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); } else { } if (p_mask != 0UL) { w_time = (unsigned long )jiffies + p_wait; ldv_67005: tmp = qla82xx_md_rw_32(ha, c_addr, 0U, 0); c_value_r = (uint32_t )tmp; if (((unsigned long )c_value_r & p_mask) == 0UL) { goto ldv_66998; } else if ((long )((unsigned long )jiffies - w_time) >= 0L) { ql_dbg(524288U, vha, 45106, "c_value_r: 0x%x, poll_mask: 0x%lx, w_time: 0x%lx\n", c_value_r, p_mask, w_time); return (rval); } else { } goto ldv_67005; ldv_66998: ; } else { } addr = r_addr; k = 0U; goto ldv_67007; ldv_67006: tmp___0 = qla82xx_md_rw_32(ha, addr, 0U, 0); r_value = (uint32_t )tmp___0; tmp___1 = data_ptr; data_ptr = data_ptr + 1; *tmp___1 = r_value; addr = (uint32_t )cache_hdr->read_ctrl.read_addr_stride + addr; k = k + 1U; ldv_67007: ; if (k < r_cnt) { goto ldv_67006; } else { } t_value = (uint32_t )cache_hdr->addr_ctrl.tag_value_stride + t_value; i = i + 1U; ldv_67010: ; if (i < loop_count) { goto ldv_67009; } else { } *d_ptr = data_ptr; return (0); } } static void qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t addr ; uint32_t r_addr ; uint32_t c_addr ; uint32_t t_r_addr ; uint32_t i ; uint32_t k ; uint32_t loop_count ; uint32_t t_value ; uint32_t r_cnt ; uint32_t r_value ; uint32_t c_value_w ; struct qla82xx_md_entry_cache *cache_hdr ; uint32_t *data_ptr ; int tmp ; uint32_t *tmp___0 ; { ha = vha->hw; data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = (uint32_t )cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = (uint32_t )cache_hdr->addr_ctrl.init_tag_value; r_cnt = (uint32_t )cache_hdr->read_ctrl.read_addr_cnt; i = 0U; goto ldv_67035; ldv_67034: qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); addr = r_addr; k = 0U; goto ldv_67032; ldv_67031: tmp = qla82xx_md_rw_32(ha, addr, 0U, 0); r_value = (uint32_t )tmp; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; addr = (uint32_t )cache_hdr->read_ctrl.read_addr_stride + addr; k = k + 1U; ldv_67032: ; if (k < r_cnt) { goto ldv_67031; } else { } t_value = (uint32_t )cache_hdr->addr_ctrl.tag_value_stride + t_value; i = i + 1U; ldv_67035: ; if (i < loop_count) { goto ldv_67034; } else { } *d_ptr = data_ptr; return; } } static void qla82xx_minidump_process_queue(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t s_addr ; uint32_t r_addr ; uint32_t r_stride ; uint32_t r_value ; uint32_t r_cnt ; uint32_t qid ; uint32_t i ; uint32_t k ; uint32_t loop_cnt ; struct qla82xx_md_entry_queue *q_hdr ; uint32_t *data_ptr ; int tmp ; uint32_t *tmp___0 ; { ha = vha->hw; qid = 0U; data_ptr = *d_ptr; q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr; s_addr = q_hdr->select_addr; r_cnt = (uint32_t )q_hdr->rd_strd.read_addr_cnt; r_stride = (uint32_t )q_hdr->rd_strd.read_addr_stride; loop_cnt = q_hdr->op_count; i = 0U; goto ldv_67058; ldv_67057: qla82xx_md_rw_32(ha, s_addr, qid, 1); r_addr = q_hdr->read_addr; k = 0U; goto ldv_67055; ldv_67054: tmp = qla82xx_md_rw_32(ha, r_addr, 0U, 0); r_value = (uint32_t )tmp; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; r_addr = r_addr + r_stride; k = k + 1U; ldv_67055: ; if (k < r_cnt) { goto ldv_67054; } else { } qid = (uint32_t )q_hdr->q_strd.queue_id_stride + qid; i = i + 1U; ldv_67058: ; if (i < loop_cnt) { goto ldv_67057; } else { } *d_ptr = data_ptr; return; } } static void qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t r_addr ; uint32_t r_value ; uint32_t i ; uint32_t loop_cnt ; struct qla82xx_md_entry_rdrom *rom_hdr ; uint32_t *data_ptr ; int tmp ; uint32_t *tmp___0 ; { ha = vha->hw; data_ptr = *d_ptr; rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr; r_addr = rom_hdr->read_addr; loop_cnt = rom_hdr->read_data_size / 4U; i = 0U; goto ldv_67073; ldv_67072: qla82xx_md_rw_32(ha, 1108410416U, r_addr & 4294901760U, 1); tmp = qla82xx_md_rw_32(ha, (r_addr & 65535U) + 1108672512U, 0U, 0); r_value = (uint32_t )tmp; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; r_addr = r_addr + 4U; i = i + 1U; ldv_67073: ; if (i < loop_cnt) { goto ldv_67072; } else { } *d_ptr = data_ptr; return; } } static int qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t r_addr ; uint32_t r_value ; uint32_t r_data ; uint32_t i ; uint32_t j ; uint32_t loop_cnt ; struct qla82xx_md_entry_rdmem *m_hdr ; unsigned long flags ; int rval ; uint32_t *data_ptr ; int tmp ; struct ratelimit_state _rs ; int tmp___0 ; int tmp___1 ; uint32_t *tmp___2 ; { ha = vha->hw; rval = 258; data_ptr = *d_ptr; m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr; r_addr = m_hdr->read_addr; loop_cnt = m_hdr->read_data_size / 16U; if ((r_addr & 15U) != 0U) { ql_log(1U, vha, 45107, "Read addr 0x%x not 16 bytes aligned\n", r_addr); return (rval); } else { } if ((m_hdr->read_data_size & 15U) != 0U) { ql_log(1U, vha, 45108, "Read data[0x%x] not multiple of 16 bytes\n", m_hdr->read_data_size); return (rval); } else { } ql_dbg(524288U, vha, 45109, "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", "qla82xx_minidump_process_rdmem", r_addr, m_hdr->read_data_size, loop_cnt); flags = _raw_write_lock_irqsave(& ha->hw_lock); i = 0U; goto ldv_67107; ldv_67106: qla82xx_md_rw_32(ha, 1090519188U, r_addr, 1); r_value = 0U; qla82xx_md_rw_32(ha, 1090519192U, r_value, 1); r_value = 2U; qla82xx_md_rw_32(ha, 1090519184U, r_value, 1); r_value = 3U; qla82xx_md_rw_32(ha, 1090519184U, r_value, 1); j = 0U; goto ldv_67097; ldv_67096: tmp = qla82xx_md_rw_32(ha, 1090519184U, 0U, 0); r_value = (uint32_t )tmp; if ((r_value & 8U) == 0U) { goto ldv_67095; } else { } j = j + 1U; ldv_67097: ; if (j <= 999U) { goto ldv_67096; } else { } ldv_67095: ; if (j > 999U) { _rs.lock.raw_lock.val.counter = 0; _rs.lock.magic = 3735899821U; _rs.lock.owner_cpu = 4294967295U; _rs.lock.owner = (void *)-1; _rs.lock.dep_map.key = 0; _rs.lock.dep_map.class_cache[0] = 0; _rs.lock.dep_map.class_cache[1] = 0; _rs.lock.dep_map.name = "_rs.lock"; _rs.lock.dep_map.cpu = 0; _rs.lock.dep_map.ip = 0UL; _rs.interval = 1250; _rs.burst = 10; _rs.printed = 0; _rs.missed = 0; _rs.begin = 0UL; tmp___0 = ___ratelimit(& _rs, "qla82xx_minidump_process_rdmem"); if (tmp___0 != 0) { printk("\vfailed to read through agent\n"); } else { } _raw_write_unlock_irqrestore(& ha->hw_lock, flags); return (rval); } else { } j = 0U; goto ldv_67104; ldv_67103: tmp___1 = qla82xx_md_rw_32(ha, (uint32_t )MD_MIU_TEST_AGT_RDDATA[j], 0U, 0); r_data = (uint32_t )tmp___1; tmp___2 = data_ptr; data_ptr = data_ptr + 1; *tmp___2 = r_data; j = j + 1U; ldv_67104: ; if (j <= 3U) { goto ldv_67103; } else { } r_addr = r_addr + 16U; i = i + 1U; ldv_67107: ; if (i < loop_cnt) { goto ldv_67106; } else { } _raw_write_unlock_irqrestore(& ha->hw_lock, flags); *d_ptr = data_ptr; return (0); } } int qla82xx_validate_template_chksum(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint64_t chksum ; uint32_t *d_ptr ; int count ; uint32_t *tmp ; int tmp___0 ; { ha = vha->hw; chksum = 0ULL; d_ptr = (uint32_t *)ha->md_tmplt_hdr; count = (int )(ha->md_template_size / 4U); goto ldv_67120; ldv_67119: tmp = d_ptr; d_ptr = d_ptr + 1; chksum = (uint64_t )*tmp + chksum; ldv_67120: tmp___0 = count; count = count - 1; if (tmp___0 > 0) { goto ldv_67119; } else { } goto ldv_67123; ldv_67122: chksum = (chksum & 4294967295ULL) + (chksum >> 32); ldv_67123: ; if (chksum >> 32 != 0ULL) { goto ldv_67122; } else { } return (~ ((int )chksum)); } } static void qla82xx_mark_entry_skipped(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , int index ) { { entry_hdr->d_ctrl.driver_flags = (uint8_t )((unsigned int )entry_hdr->d_ctrl.driver_flags | 128U); ql_dbg(524288U, vha, 45110, "Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", index, entry_hdr->entry_type, (int )entry_hdr->d_ctrl.entry_capture_mask); return; } } int qla82xx_md_collect(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int no_entry_hdr ; qla82xx_md_entry_hdr_t *entry_hdr ; struct qla82xx_md_template_hdr *tmplt_hdr ; uint32_t *data_ptr ; uint32_t total_data_size ; uint32_t f_capture_mask ; uint32_t data_collected ; int i ; int rval ; int tmp ; { ha = vha->hw; no_entry_hdr = 0; total_data_size = 0U; data_collected = 0U; i = 0; rval = 258; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; data_ptr = (uint32_t *)ha->md_dump; if (ha->fw_dumped != 0) { ql_log(1U, vha, 45111, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto md_failed; } else { } ha->fw_dumped = 0; if ((unsigned long )ha->md_tmplt_hdr == (unsigned long )((void *)0) || (unsigned long )ha->md_dump == (unsigned long )((void *)0)) { ql_log(1U, vha, 45112, "Memory not allocated for minidump capture\n"); goto md_failed; } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { ql_log(1U, vha, 45140, "Forced reset from application, ignore minidump capture\n"); ha->flags.isp82xx_no_md_cap = 0U; goto md_failed; } else { } tmp = qla82xx_validate_template_chksum(vha); if (tmp != 0) { ql_log(2U, vha, 45113, "Template checksum validation error\n"); goto md_failed; } else { } no_entry_hdr = (int )tmplt_hdr->num_of_entries; ql_dbg(524288U, vha, 45114, "No of entry headers in Template: 0x%x\n", no_entry_hdr); ql_dbg(524288U, vha, 45115, "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); f_capture_mask = tmplt_hdr->capture_debug_level & 255U; if ((f_capture_mask & 3U) != 3U) { ql_log(1U, vha, 45116, "Minimum required capture mask[0x%x] level not set\n", f_capture_mask); goto md_failed; } else { } tmplt_hdr->driver_capture_mask = (uint32_t )ql2xmdcapmask; tmplt_hdr->driver_info[0] = (uint32_t )vha->host_no; tmplt_hdr->driver_info[1] = 134676480U; total_data_size = ha->md_dump_size; ql_dbg(524288U, vha, 45117, "Total minidump data_size 0x%x to be captured\n", total_data_size); if (tmplt_hdr->entry_type != 99U) { ql_log(1U, vha, 45134, "Bad template header entry type: 0x%x obtained\n", tmplt_hdr->entry_type); goto md_failed; } else { } entry_hdr = (qla82xx_md_entry_hdr_t *)ha->md_tmplt_hdr + (unsigned long )tmplt_hdr->first_entry_offset; i = 0; goto ldv_67165; ldv_67164: ; if (data_collected > total_data_size) { ql_log(1U, vha, 45118, "More MiniDump data collected: [0x%x]\n", data_collected); goto md_failed; } else { } if (((int )entry_hdr->d_ctrl.entry_capture_mask & ql2xmdcapmask) == 0) { entry_hdr->d_ctrl.driver_flags = (uint8_t )((unsigned int )entry_hdr->d_ctrl.driver_flags | 128U); ql_dbg(524288U, vha, 45119, "Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", i, entry_hdr->entry_type, (int )entry_hdr->d_ctrl.entry_capture_mask); goto skip_nxt_entry; } else { } ql_dbg(524288U, vha, 45120, "[%s]: data ptr[%d]: %p, entry_hdr: %p\nentry_type: 0x%x, captrue_mask: 0x%x\n", "qla82xx_md_collect", i, data_ptr, entry_hdr, entry_hdr->entry_type, (int )entry_hdr->d_ctrl.entry_capture_mask); ql_dbg(524288U, vha, 45121, "Data collected: [0x%x], Dump size left:[0x%x]\n", data_collected, ha->md_dump_size - data_collected); switch (entry_hdr->entry_type) { case 255U: qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto ldv_67147; case 98U: rval = qla82xx_minidump_process_control(vha, entry_hdr, & data_ptr); if (rval != 0) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } else { } goto ldv_67147; case 1U: qla82xx_minidump_process_rdcrb(vha, entry_hdr, & data_ptr); goto ldv_67147; case 72U: rval = qla82xx_minidump_process_rdmem(vha, entry_hdr, & data_ptr); if (rval != 0) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } else { } goto ldv_67147; case 4U: ; case 71U: qla82xx_minidump_process_rdrom(vha, entry_hdr, & data_ptr); goto ldv_67147; case 21U: ; case 22U: ; case 23U: ; case 24U: rval = qla82xx_minidump_process_l2tag(vha, entry_hdr, & data_ptr); if (rval != 0) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } else { } goto ldv_67147; case 11U: ; case 12U: qla82xx_minidump_process_l1cache(vha, entry_hdr, & data_ptr); goto ldv_67147; case 6U: qla82xx_minidump_process_rdocm(vha, entry_hdr, & data_ptr); goto ldv_67147; case 2U: qla82xx_minidump_process_rdmux(vha, entry_hdr, & data_ptr); goto ldv_67147; case 3U: qla82xx_minidump_process_queue(vha, entry_hdr, & data_ptr); goto ldv_67147; case 0U: ; default: qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto ldv_67147; } ldv_67147: ql_dbg(524288U, vha, 45122, "[%s]: data ptr[%d]: %p\n", "qla82xx_md_collect", i, data_ptr); data_collected = (uint32_t )((long )data_ptr) - (uint32_t )((long )ha->md_dump); skip_nxt_entry: entry_hdr = entry_hdr + (unsigned long )entry_hdr->entry_size; i = i + 1; ldv_67165: ; if (i < no_entry_hdr) { goto ldv_67164; } else { } if (data_collected != total_data_size) { ql_dbg(524288U, vha, 45123, "MiniDump data mismatch: Data collected: [0x%x],total_data_size:[0x%x]\n", data_collected, total_data_size); goto md_failed; } else { } ql_log(2U, vha, 45124, "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, 0U); md_failed: ; return (rval); } } int qla82xx_md_alloc(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int i ; int k ; struct qla82xx_md_template_hdr *tmplt_hdr ; { ha = vha->hw; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; if (ql2xmdcapmask <= 2 || ql2xmdcapmask > 127) { ql2xmdcapmask = (int )tmplt_hdr->capture_debug_level & 255; ql_log(2U, vha, 45125, "Forcing driver capture mask to firmware default capture mask: 0x%x.\n", ql2xmdcapmask); } else { } i = 2; k = 1; goto ldv_67175; ldv_67174: ; if ((i & ql2xmdcapmask) != 0) { ha->md_dump_size = ha->md_dump_size + tmplt_hdr->capture_size_array[k]; } else { } i = i << 1; k = k + 1; ldv_67175: ; if ((i & 255) != 0) { goto ldv_67174; } else { } if ((unsigned long )ha->md_dump != (unsigned long )((void *)0)) { ql_log(1U, vha, 45126, "Firmware dump previously allocated.\n"); return (1); } else { } ha->md_dump = vmalloc((unsigned long )ha->md_dump_size); if ((unsigned long )ha->md_dump == (unsigned long )((void *)0)) { ql_log(1U, vha, 45127, "Unable to allocate memory for Minidump size (0x%x).\n", ha->md_dump_size); return (1); } else { } return (0); } } void qla82xx_md_free(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((unsigned long )ha->md_tmplt_hdr != (unsigned long )((void *)0)) { ql_log(2U, vha, 45128, "Free MiniDump template: %p, size (%d KB)\n", ha->md_tmplt_hdr, ha->md_template_size / 1024U); dma_free_attrs(& (ha->pdev)->dev, (size_t )ha->md_template_size, ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma, (struct dma_attrs *)0); ha->md_tmplt_hdr = (void *)0; } else { } if ((unsigned long )ha->md_dump != (unsigned long )((void *)0)) { ql_log(2U, vha, 45129, "Free MiniDump memory: %p, size (%d KB)\n", ha->md_dump, ha->md_dump_size / 1024U); vfree((void const *)ha->md_dump); ha->md_dump_size = 0U; ha->md_dump = (void *)0; } else { } return; } } void qla82xx_md_prep(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int rval ; { ha = vha->hw; rval = qla82xx_md_get_template_size(vha); if (rval == 0) { ql_log(2U, vha, 45130, "MiniDump Template size obtained (%d KB)\n", ha->md_template_size / 1024U); if ((ha->device_type & 262144U) != 0U) { rval = qla8044_md_get_template(vha); } else { rval = qla82xx_md_get_template(vha); } if (rval == 0) { ql_dbg(524288U, vha, 45131, "MiniDump Template obtained\n"); rval = qla82xx_md_alloc(vha); if (rval == 0) { ql_log(2U, vha, 45132, "MiniDump memory allocated (%d KB)\n", ha->md_dump_size / 1024U); } else { ql_log(2U, vha, 45133, "Free MiniDump template: %p, size: (%d KB)\n", ha->md_tmplt_hdr, ha->md_template_size / 1024U); dma_free_attrs(& (ha->pdev)->dev, (size_t )ha->md_template_size, ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma, (struct dma_attrs *)0); ha->md_tmplt_hdr = (void *)0; } } else { } } else { } return; } } int qla82xx_beacon_on(struct scsi_qla_host *vha ) { int rval ; struct qla_hw_data *ha ; { ha = vha->hw; qla82xx_idc_lock(ha); rval = qla82xx_mbx_beacon_ctl(vha, 1); if (rval != 0) { ql_log(1U, vha, 45136, "mbx set led config failed in %s\n", "qla82xx_beacon_on"); goto exit; } else { } ha->beacon_blink_led = 1U; exit: qla82xx_idc_unlock(ha); return (rval); } } int qla82xx_beacon_off(struct scsi_qla_host *vha ) { int rval ; struct qla_hw_data *ha ; { ha = vha->hw; qla82xx_idc_lock(ha); rval = qla82xx_mbx_beacon_ctl(vha, 0); if (rval != 0) { ql_log(1U, vha, 45137, "mbx set led config failed in %s\n", "qla82xx_beacon_off"); goto exit; } else { } ha->beacon_blink_led = 0U; exit: qla82xx_idc_unlock(ha); return (rval); } } void qla82xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { struct qla_hw_data *ha ; { ha = vha->hw; if (ha->allow_cna_fw_dump == 0) { return; } else { } scsi_block_requests(vha->host); ha->flags.isp82xx_no_md_cap = 1U; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); return; } } void choose_timer_26(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_26 = 2; return; } } void activate_pending_timer_26(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_26 == (unsigned long )timer) { if (ldv_timer_state_26 == 2 || pending_flag != 0) { ldv_timer_list_26 = timer; ldv_timer_list_26->data = data; ldv_timer_state_26 = 1; } else { } return; } else { } reg_timer_26(timer); ldv_timer_list_26->data = data; return; } } void disable_suitable_timer_26(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_26) { ldv_timer_state_26 = 0; return; } else { } return; } } int reg_timer_26(struct timer_list *timer ) { { ldv_timer_list_26 = timer; ldv_timer_state_26 = 1; return (0); } } bool ldv_queue_work_on_247(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_248(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_249(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_250(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_251(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_252(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern unsigned long find_next_bit(unsigned long const * , unsigned long , unsigned long ) ; extern unsigned long find_first_bit(unsigned long const * , unsigned long ) ; extern int strcmp(char const * , char const * ) ; extern char *strim(char * ) ; __inline static unsigned long arch_local_save_flags___0(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static void arch_local_irq_restore___0(unsigned long f ) { unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.restore_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (836), "i" (12UL)); ldv_4870: ; goto ldv_4870; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (44UL), [paravirt_opptr] "i" (& pv_irq_ops.restore_fl.func), [paravirt_clobber] "i" (1), "D" (f): "memory", "cc"); return; } } __inline static void arch_local_irq_disable___0(void) { unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.irq_disable.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (841), "i" (12UL)); ldv_4879: ; goto ldv_4879; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (45UL), [paravirt_opptr] "i" (& pv_irq_ops.irq_disable.func), [paravirt_clobber] "i" (1): "memory", "cc"); return; } } __inline static unsigned long arch_local_irq_save___0(void) { unsigned long f ; { f = arch_local_save_flags___0(); arch_local_irq_disable___0(); return (f); } } extern void do_gettimeofday(struct timeval * ) ; bool ldv_queue_work_on_263(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_265(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_264(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_267(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_266(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void memcpy_fromio(void *dst , void const volatile *src , size_t count ) { { memcpy(dst, (void const *)src, count); return; } } __inline static void memcpy_toio(void volatile *dst , void const *src , size_t count ) { { memcpy((void *)dst, src, count); return; } } void choose_timer_27(struct timer_list *timer ) ; void activate_pending_timer_27(struct timer_list *timer , unsigned long data , int pending_flag ) ; void disable_suitable_timer_27(struct timer_list *timer ) ; int reg_timer_27(struct timer_list *timer ) ; int ldv_scsi_add_host_with_dma_268(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; static char const * const port_state_str___4[5U] = { "Unknown", "UNCONFIGURED", "DEAD", "LOST", "ONLINE"}; extern void usleep_range(unsigned long , unsigned long ) ; __inline static void qla2x00_poll___0(struct rsp_que *rsp ) { unsigned long flags ; struct qla_hw_data *ha ; int tmp ; { ha = rsp->hw; flags = arch_local_irq_save___0(); trace_hardirqs_off(); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { qla82xx_poll(0, (void *)rsp); } else { (*((ha->isp_ops)->intr_handler))(0, (void *)rsp); } tmp = arch_irqs_disabled_flags(flags); if (tmp != 0) { arch_local_irq_restore___0(flags); trace_hardirqs_off(); } else { trace_hardirqs_on(); arch_local_irq_restore___0(flags); } return; } } __inline static void host_to_adap(uint8_t *src , uint8_t *dst , uint32_t bsize ) { uint32_t *isrc ; __le32 *odest ; uint32_t iter ; __le32 *tmp ; uint32_t *tmp___0 ; { isrc = (uint32_t *)src; odest = (__le32 *)dst; iter = bsize >> 2; goto ldv_65668; ldv_65667: tmp = odest; odest = odest + 1; tmp___0 = isrc; isrc = isrc + 1; *tmp = *tmp___0; iter = iter - 1U; ldv_65668: ; if (iter != 0U) { goto ldv_65667; } else { } return; } } __inline static void qla2x00_set_fcport_state___3(fc_port_t *fcport , int state ) { int old_state ; { old_state = atomic_read((atomic_t const *)(& fcport->state)); atomic_set(& fcport->state, state); if (old_state != 0 && old_state != state) { ql_dbg(268435456U, fcport->vha, 8317, "FCPort state transitioned from %s to %s - portid=%02x%02x%02x.\n", port_state_str___4[old_state], port_state_str___4[state], (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } return; } } static int qlafx00_mailbox_command(scsi_qla_host_t *vha , struct mbx_cmd_32 *mcp ) { int rval ; unsigned long flags ; device_reg_t *reg ; uint8_t abort_active ; uint8_t io_lock_on ; uint16_t command ; uint32_t *iptr ; uint32_t *optr ; uint32_t cnt ; uint32_t mboxes ; unsigned long wait_time ; struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; unsigned long tmp___1 ; raw_spinlock_t *tmp___2 ; uint32_t *iptr2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; { flags = 0UL; command = 0U; ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; if ((ha->pdev)->error_state > 2U) { ql_log(1U, vha, 4444, "error_state is greater than pci_channel_io_frozen, exiting.\n"); return (256); } else { } if ((vha->device_flags & 32U) != 0U) { ql_log(1U, vha, 4447, "Device in failed state, exiting.\n"); return (256); } else { } reg = ha->iobase; io_lock_on = (uint8_t )base_vha->flags.init_done; rval = 0; tmp___0 = constant_test_bit(3L, (unsigned long const volatile *)(& base_vha->dpc_flags)); abort_active = (uint8_t )tmp___0; if (*((unsigned long *)ha + 2UL) != 0UL) { ql_log(1U, vha, 4469, "Perm failure on EEH timeout MBX, exiting.\n"); return (256); } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { mcp->mb[0] = 16395U; ql_log(1U, vha, 4470, "FW hung = %d.\n", (int )ha->flags.isp82xx_fw_hung); rval = 258; goto premature_exit; } else { } tmp___1 = wait_for_completion_timeout(& ha->mbx_cmd_comp, (unsigned long )(mcp->tov * 250U)); if (tmp___1 == 0UL) { ql_log(1U, vha, 4471, "Cmd access timeout, cmd=0x%x, Exiting.\n", mcp->mb[0]); return (256); } else { } ha->flags.mbox_busy = 1U; ha->mcp32 = mcp; ql_dbg(536870912U, vha, 4472, "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); optr = & reg->ispfx00.mailbox0; iptr = (uint32_t *)(& mcp->mb); command = (uint16_t )mcp->mb[0]; mboxes = mcp->out_mb; cnt = 0U; goto ldv_65998; ldv_65997: ; if ((int )mboxes & 1) { writel(*iptr, (void volatile *)optr); } else { } mboxes = mboxes >> 1; optr = optr + 1; iptr = iptr + 1; cnt = cnt + 1U; ldv_65998: ; if ((uint32_t )ha->mbx_count > cnt) { goto ldv_65997; } else { } ha->flags.mbox_int = 0U; clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); ql_dump_buffer(537001984U, vha, 4466, (uint8_t *)(& mcp->mb), 16U); ql_dump_buffer(537001984U, vha, 4467, (uint8_t *)(& mcp->mb) + 16UL, 16U); ql_dump_buffer(537001984U, vha, 4468, (uint8_t *)(& mcp->mb) + 32UL, 8U); ql_dbg(536870912U, vha, 4473, "Going to unlock irq & waiting for interrupts. jiffies=%lx.\n", jiffies); if (((unsigned int )abort_active == 0U && (unsigned int )io_lock_on != 0U) || ((ha->device_type & 8192U) != 0U && *((unsigned long *)ha + 2UL) != 0UL)) { set_bit(2L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); writel(ha->mbx_intr_code, (void volatile *)ha->cregbase + 133636U); spin_unlock_irqrestore(& ha->hardware_lock, flags); wait_for_completion_timeout(& ha->mbx_intr_comp, (unsigned long )(mcp->tov * 250U)); } else { ql_dbg(536870912U, vha, 4396, "Cmd=%x Polling Mode.\n", (int )command); writel(ha->mbx_intr_code, (void volatile *)ha->cregbase + 133636U); spin_unlock_irqrestore(& ha->hardware_lock, flags); wait_time = (unsigned long )(mcp->tov * 250U) + (unsigned long )jiffies; goto ldv_66008; ldv_66007: ; if ((long )(wait_time - (unsigned long )jiffies) < 0L) { goto ldv_66006; } else { } qla2x00_poll___0(*(ha->rsp_q_map)); if (*((unsigned long *)ha + 2UL) == 0UL && ((ha->device_type & 2U) == 0U || (unsigned int )command != 11U)) { usleep_range(10000UL, 11000UL); } else { } ldv_66008: ; if (*((unsigned long *)ha + 2UL) == 0UL) { goto ldv_66007; } else { } ldv_66006: ql_dbg(536870912U, vha, 4397, "Waited %d sec.\n", (unsigned int )((((unsigned long )(mcp->tov * 250U) - wait_time) + (unsigned long )jiffies) / 250UL)); } if (*((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(536870912U, vha, 4398, "Cmd=%x completed.\n", (int )command); ha->flags.mbox_int = 0U; clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (ha->mailbox_out32[0] != 16384U) { rval = 258; } else { } iptr2 = (uint32_t *)(& mcp->mb); iptr = (uint32_t *)(& ha->mailbox_out32); mboxes = mcp->in_mb; cnt = 0U; goto ldv_66011; ldv_66010: ; if ((int )mboxes & 1) { *iptr2 = *iptr; } else { } mboxes = mboxes >> 1; iptr2 = iptr2 + 1; iptr = iptr + 1; cnt = cnt + 1U; ldv_66011: ; if ((uint32_t )ha->mbx_count > cnt) { goto ldv_66010; } else { } } else { rval = 256; } ha->flags.mbox_busy = 0U; ha->mcp32 = (struct mbx_cmd_32 *)0; if (((unsigned int )abort_active != 0U || (unsigned int )io_lock_on == 0U) && ((ha->device_type & 8192U) == 0U || *((unsigned long *)ha + 2UL) == 0UL)) { ql_dbg(536870912U, vha, 4410, "checking for additional resp interrupt.\n"); qla2x00_poll___0(*(ha->rsp_q_map)); } else { } if (rval == 256 && mcp->mb[0] != 42U) { if (((unsigned int )io_lock_on == 0U || ((int )mcp->flags & 4) != 0) || *((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(536870912U, vha, 4445, "Timeout, schedule isp_abort_needed.\n"); tmp___3 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___3 == 0) { tmp___4 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___4 == 0) { tmp___5 = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___5 == 0) { ql_log(2U, base_vha, 4446, "Mailbox cmd timeout occurred, cmd=0x%x, mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP abort.\n", (int )command, mcp->mb[0], (int )ha->flags.eeh_busy); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else { } } else { } } else { } } else if ((unsigned int )abort_active == 0U) { ql_dbg(536870912U, vha, 4448, "Timeout, calling abort_isp.\n"); tmp___7 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___7 == 0) { tmp___8 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___8 == 0) { tmp___9 = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___9 == 0) { ql_log(2U, base_vha, 4449, "Mailbox cmd timeout occurred, cmd=0x%x, mb[0]=0x%x. Scheduling ISP abort ", (int )command, mcp->mb[0]); set_bit(3L, (unsigned long volatile *)(& vha->dpc_flags)); clear_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); tmp___6 = (*((ha->isp_ops)->abort_isp))(vha); if (tmp___6 != 0) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } clear_bit(3L, (unsigned long volatile *)(& vha->dpc_flags)); ql_dbg(536870912U, vha, 4450, "Finished abort_isp.\n"); } else { } } else { } } else { } } else { } } else { } premature_exit: complete(& ha->mbx_cmd_comp); if (rval != 0) { ql_log(1U, base_vha, 4451, "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], (int )command); } else { ql_dbg(536870912U, base_vha, 4452, "Done %s.\n", "qlafx00_mailbox_command"); } return (rval); } } int qlafx00_driver_shutdown(scsi_qla_host_t *vha , int tmo ) { int rval ; struct mbx_cmd_32 mc ; struct mbx_cmd_32 *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4454, "Entered %s.\n", "qlafx00_driver_shutdown"); mcp->mb[0] = 106U; mcp->out_mb = 1U; mcp->in_mb = 1U; if (tmo != 0) { mcp->tov = (uint32_t )tmo; } else { mcp->tov = 30U; } mcp->flags = 0U; rval = qlafx00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4455, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4456, "Done %s.\n", "qlafx00_driver_shutdown"); } return (rval); } } static int qlafx00_get_firmware_state(scsi_qla_host_t *vha , uint32_t *states ) { int rval ; struct mbx_cmd_32 mc ; struct mbx_cmd_32 *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4457, "Entered %s.\n", "qlafx00_get_firmware_state"); mcp->mb[0] = 105U; mcp->out_mb = 1U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qlafx00_mailbox_command(vha, mcp); *states = mcp->mb[1]; if (rval != 0) { ql_dbg(536870912U, vha, 4458, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4459, "Done %s.\n", "qlafx00_get_firmware_state"); } return (rval); } } int qlafx00_init_firmware(scsi_qla_host_t *vha , uint16_t size ) { int rval ; struct mbx_cmd_32 mc ; struct mbx_cmd_32 *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4460, "Entered %s.\n", "qlafx00_init_firmware"); mcp->mb[0] = 96U; mcp->mb[1] = 0U; mcp->mb[2] = (unsigned int )(ha->init_cb_dma >> 32ULL); mcp->mb[3] = (unsigned int )ha->init_cb_dma; mcp->out_mb = 15U; mcp->in_mb = 1U; mcp->buf_size = (long )size; mcp->flags = 2U; mcp->tov = 30U; rval = qlafx00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4461, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4462, "Done %s.\n", "qlafx00_init_firmware"); } return (rval); } } static int qlafx00_mbx_reg_test(scsi_qla_host_t *vha ) { int rval ; struct mbx_cmd_32 mc ; struct mbx_cmd_32 *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4463, "Entered %s.\n", "qlafx00_mbx_reg_test"); mcp->mb[0] = 6U; mcp->mb[1] = 43690U; mcp->mb[2] = 21845U; mcp->mb[3] = 43605U; mcp->mb[4] = 21930U; mcp->mb[5] = 42405U; mcp->mb[6] = 23130U; mcp->mb[7] = 9509U; mcp->mb[8] = 48059U; mcp->mb[9] = 26214U; mcp->mb[10] = 47974U; mcp->mb[11] = 26299U; mcp->mb[12] = 46774U; mcp->mb[13] = 27499U; mcp->mb[14] = 13878U; mcp->mb[15] = 52428U; mcp->out_mb = 65535U; mcp->in_mb = 65535U; mcp->buf_size = 0L; mcp->flags = 2U; mcp->tov = 30U; rval = qlafx00_mailbox_command(vha, mcp); if (rval == 0) { if (((mcp->mb[17] != 43690U || mcp->mb[18] != 21845U) || mcp->mb[19] != 43605U) || mcp->mb[20] != 21930U) { rval = 258; } else { } if (((mcp->mb[21] != 42405U || mcp->mb[22] != 23130U) || mcp->mb[23] != 9509U) || mcp->mb[24] != 48059U) { rval = 258; } else { } if (((mcp->mb[25] != 26214U || mcp->mb[26] != 47974U) || mcp->mb[27] != 26299U) || mcp->mb[28] != 46774U) { rval = 258; } else { } if ((mcp->mb[29] != 27499U || mcp->mb[30] != 13878U) || mcp->mb[31] != 52428U) { rval = 258; } else { } } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4464, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4465, "Done %s.\n", "qlafx00_mbx_reg_test"); } return (rval); } } int qlafx00_pci_config(struct scsi_qla_host *vha ) { uint16_t w ; struct qla_hw_data *ha ; bool tmp ; { ha = vha->hw; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & w); w = (uint16_t )((unsigned int )w | 320U); w = (unsigned int )w & 64511U; pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )w); tmp = pci_is_pcie(ha->pdev); if ((int )tmp) { pcie_set_readrq(ha->pdev, 2048); } else { } ha->chip_revision = (uint16_t )(ha->pdev)->revision; return (0); } } __inline static void qlafx00_soc_cpu_reset(scsi_qla_host_t *vha ) { unsigned long flags ; struct qla_hw_data *ha ; int i ; int core ; uint32_t cnt ; uint32_t reg_val ; raw_spinlock_t *tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; { flags = 0UL; ha = vha->hw; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writel(0U, (void volatile *)ha->cregbase + 524292U); writel(0U, (void volatile *)ha->cregbase + 532484U); writel(2U, (void volatile *)ha->cregbase + 395552U); writel(2U, (void volatile *)ha->cregbase + 395556U); writel(2U, (void volatile *)ha->cregbase + 985376U); writel(2U, (void volatile *)ha->cregbase + 985380U); reg_val = readl((void const volatile *)ha->cregbase + 395328U); reg_val = reg_val & 4294963199U; writel(reg_val, (void volatile *)ha->cregbase + 395328U); reg_val = readl((void const volatile *)ha->cregbase + 395332U); reg_val = reg_val & 4294963199U; writel(reg_val, (void volatile *)ha->cregbase + 395332U); reg_val = readl((void const volatile *)ha->cregbase + 395336U); reg_val = reg_val & 4294963199U; writel(reg_val, (void volatile *)ha->cregbase + 395336U); reg_val = readl((void const volatile *)ha->cregbase + 395340U); reg_val = reg_val & 4294963199U; writel(reg_val, (void volatile *)ha->cregbase + 395340U); i = 0; goto ldv_66065; ldv_66064: tmp___0 = readl((void const volatile *)ha->cregbase + 851968U); if ((tmp___0 & 268435456U) == 0U) { tmp___1 = readl((void const volatile *)ha->cregbase + 67072U); if ((tmp___1 & 1U) == 0U) { goto ldv_66063; } else { } } else { } __const_udelay(429500UL); i = i + 1; ldv_66065: ; if (i <= 99999) { goto ldv_66064; } else { } ldv_66063: i = 0; goto ldv_66067; ldv_66066: writel(3841U, (void volatile *)ha->cregbase + (unsigned long )((i + 16640) * 8)); writel(16843009U, (void volatile *)ha->cregbase + (unsigned long )(i * 8 + 133124)); i = i + 1; ldv_66067: ; if (i <= 3) { goto ldv_66066; } else { } writel(18809089U, (void volatile *)ha->cregbase + 133184U); writel(1U, (void volatile *)ha->cregbase + 67088U); writel(0U, (void volatile *)ha->cregbase + 67072U); i = 0; goto ldv_66070; ldv_66069: writel(0U, (void volatile *)ha->cregbase + (unsigned long )((i + 24707) * 4)); i = i + 1; ldv_66070: ; if (i <= 4) { goto ldv_66069; } else { } i = 0; goto ldv_66073; ldv_66072: writel(0U, (void volatile *)ha->cregbase + (unsigned long )((i + 33472) * 4)); i = i + 1; ldv_66073: ; if (i <= 114) { goto ldv_66072; } else { } core = 0; goto ldv_66079; ldv_66078: i = 0; goto ldv_66076; ldv_66075: writel(0U, (void volatile *)ha->cregbase + (unsigned long )(((core * 64 + i) + 34324) * 4)); i = i + 1; ldv_66076: ; if (i <= 7) { goto ldv_66075; } else { } core = core + 1; ldv_66079: ; if (core <= 3) { goto ldv_66078; } else { } core = 0; goto ldv_66082; ldv_66081: writel(1023U, (void volatile *)ha->cregbase + (unsigned long )(core * 256 + 137396)); core = core + 1; ldv_66082: ; if (core <= 3) { goto ldv_66081; } else { } writel(2U, (void volatile *)ha->cregbase + 131584U); writel(3U, (void volatile *)ha->cregbase + 131588U); writel(0U, (void volatile *)ha->cregbase + 133184U); writel(3840U, (void volatile *)ha->cregbase + 133120U); spin_unlock_irqrestore(& ha->hardware_lock, flags); cnt = 10U; goto ldv_66085; ldv_66084: msleep(1000U); __asm__ volatile ("": : : "memory"); cnt = cnt - 1U; ldv_66085: ; if (cnt != 0U) { goto ldv_66084; } else { } return; } } void qlafx00_soft_reset(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int tmp ; long tmp___0 ; { ha = vha->hw; tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect((long )(tmp != 0 && *((unsigned long *)ha + 2UL) != 0UL), 0L); if (tmp___0 != 0L) { return; } else { } (*((ha->isp_ops)->disable_intrs))(ha); qlafx00_soc_cpu_reset(vha); return; } } int qlafx00_chip_diag(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; struct req_que *req ; { rval = 0; ha = vha->hw; req = *(ha->req_q_map); ha->fw_transfer_size = (uint32_t )req->length * 64U; rval = qlafx00_mbx_reg_test(vha); if (rval != 0) { ql_log(1U, vha, 4453, "Failed mailbox send register test\n"); } else { rval = 0; } return (rval); } } void qlafx00_config_rings(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct device_reg_fx00 *reg ; { ha = vha->hw; reg = & (ha->iobase)->ispfx00; writel(0U, (void volatile *)(& reg->req_q_in)); writel(0U, (void volatile *)(& reg->req_q_out)); writel(0U, (void volatile *)(& reg->rsp_q_in)); writel(0U, (void volatile *)(& reg->rsp_q_out)); readl((void const volatile *)(& reg->rsp_q_out)); return; } } char *qlafx00_pci_info_str(struct scsi_qla_host *vha , char *str ) { struct qla_hw_data *ha ; bool tmp ; { ha = vha->hw; tmp = pci_is_pcie(ha->pdev); if ((int )tmp) { strcpy(str, "PCIe iSA"); return (str); } else { } return (str); } } char *qlafx00_fw_version_str(struct scsi_qla_host *vha , char *str , size_t size ) { struct qla_hw_data *ha ; { ha = vha->hw; snprintf(str, size, "%s", (uint8_t *)(& ha->mr.fw_version)); return (str); } } void qlafx00_enable_intrs(struct qla_hw_data *ha ) { unsigned long flags ; raw_spinlock_t *tmp ; unsigned int tmp___0 ; { flags = 0UL; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ha->interrupts_on = 1U; tmp___0 = readl((void const volatile *)ha->cregbase + 133896U); writel(tmp___0 | 2147483648U, (void volatile *)ha->cregbase + 133896U); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } void qlafx00_disable_intrs(struct qla_hw_data *ha ) { unsigned long flags ; raw_spinlock_t *tmp ; unsigned int tmp___0 ; { flags = 0UL; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ha->interrupts_on = 0U; tmp___0 = readl((void const volatile *)ha->cregbase + 133896U); writel(tmp___0 & 2147483647U, (void volatile *)ha->cregbase + 133896U); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } int qlafx00_abort_target(fc_port_t *fcport , uint64_t l , int tag ) { int tmp ; { tmp = qla2x00_async_tm_cmd(fcport, 2U, (uint32_t )l, (uint32_t )tag); return (tmp); } } int qlafx00_lun_reset(fc_port_t *fcport , uint64_t l , int tag ) { int tmp ; { tmp = qla2x00_async_tm_cmd(fcport, 16U, (uint32_t )l, (uint32_t )tag); return (tmp); } } int qlafx00_loop_reset(scsi_qla_host_t *vha ) { int ret ; struct fc_port *fcport ; struct qla_hw_data *ha ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { ha = vha->hw; if (ql2xtargetreset != 0) { __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (struct fc_port *)__mptr; goto ldv_66149; ldv_66148: ; if ((unsigned int )fcport->port_type != 5U) { goto ldv_66147; } else { } ret = (*((ha->isp_ops)->target_reset))(fcport, 0ULL, 0); if (ret != 0) { ql_dbg(4194304U, vha, 32829, "Bus Reset failed: Reset=%d d_id=%x.\n", ret, (int )fcport->d_id.b24); } else { } ldv_66147: __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (struct fc_port *)__mptr___0; ldv_66149: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66148; } else { } } else { } return (0); } } int qlafx00_iospace_config(struct qla_hw_data *ha ) { char const *tmp ; int tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; char const *tmp___4 ; char const *tmp___5 ; void *tmp___6 ; char const *tmp___7 ; uint8_t tmp___8 ; { tmp___0 = pci_request_selected_regions(ha->pdev, ha->bars, "qla2xxx"); if (tmp___0 != 0) { tmp = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 334, "Failed to reserve PIO/MMIO regions (%s), aborting.\n", tmp); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[0].flags & 512UL) == 0UL) { tmp___1 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 335, "Invalid pci I/O region size (%s).\n", tmp___1); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[0].start == 0ULL && (ha->pdev)->resource[0].end == (ha->pdev)->resource[0].start) || ((ha->pdev)->resource[0].end - (ha->pdev)->resource[0].start) + 1ULL <= 1048575ULL) { tmp___2 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 295, "Invalid PCI mem BAR0 region size (%s), aborting\n", tmp___2); goto iospace_error_exit; } else { } ha->cregbase = ioremap_nocache((ha->pdev)->resource[0].start, 1048576UL); if ((unsigned long )ha->cregbase == (unsigned long )((void *)0)) { tmp___3 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 296, "cannot remap MMIO (%s), aborting\n", tmp___3); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[2].flags & 512UL) == 0UL) { tmp___4 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 297, "region #2 not an MMIO resource (%s), aborting\n", tmp___4); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[2].start == 0ULL && (ha->pdev)->resource[2].end == (ha->pdev)->resource[2].start) || ((ha->pdev)->resource[2].end - (ha->pdev)->resource[2].start) + 1ULL <= 131071ULL) { tmp___5 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 298, "Invalid PCI mem BAR2 region size (%s), aborting\n", tmp___5); goto iospace_error_exit; } else { } tmp___6 = ioremap_nocache((ha->pdev)->resource[2].start, 131072UL); ha->iobase = (device_reg_t *)tmp___6; if ((unsigned long )ha->iobase == (unsigned long )((device_reg_t *)0)) { tmp___7 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 299, "cannot remap MMIO (%s), aborting\n", tmp___7); goto iospace_error_exit; } else { } tmp___8 = 1U; ha->max_rsp_queues = tmp___8; ha->max_req_queues = tmp___8; ql_log_pci(2U, ha->pdev, 300, "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n", ha->bars, ha->cregbase, ha->iobase); return (0); iospace_error_exit: ; return (-12); } } static void qlafx00_save_queue_ptrs(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; { ha = vha->hw; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); req->length_fx00 = req->length; req->ring_fx00 = req->ring; req->dma_fx00 = req->dma; rsp->length_fx00 = rsp->length; rsp->ring_fx00 = rsp->ring; rsp->dma_fx00 = rsp->dma; ql_dbg(1073741824U, vha, 301, "req: %p, ring_fx00: %p, length_fx00: 0x%x,req->dma_fx00: 0x%llx\n", req, req->ring_fx00, (int )req->length_fx00, req->dma_fx00); ql_dbg(1073741824U, vha, 302, "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00, (int )rsp->length_fx00, rsp->dma_fx00); return; } } static int qlafx00_config_queues(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; dma_addr_t bar2_hdl ; { ha = vha->hw; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); bar2_hdl = (ha->pdev)->resource[2].start; req->length = (uint16_t )ha->req_que_len; req->ring = (request_t *)ha->iobase + (unsigned long )ha->req_que_off; req->dma = (dma_addr_t )ha->req_que_off + bar2_hdl; if ((unsigned long )req->ring == (unsigned long )((request_t *)0) || (unsigned int )req->length == 0U) { ql_log_pci(2U, ha->pdev, 303, "Unable to allocate memory for req_ring\n"); return (258); } else { } ql_dbg(1073741824U, vha, 304, "req: %p req_ring pointer %p req len 0x%x req off 0x%x\n, req->dma: 0x%llx", req, req->ring, (int )req->length, ha->req_que_off, req->dma); rsp->length = (uint16_t )ha->rsp_que_len; rsp->ring = (response_t *)ha->iobase + (unsigned long )ha->rsp_que_off; rsp->dma = (dma_addr_t )ha->rsp_que_off + bar2_hdl; if ((unsigned long )rsp->ring == (unsigned long )((response_t *)0) || (unsigned int )rsp->length == 0U) { ql_log_pci(2U, ha->pdev, 305, "Unable to allocate memory for rsp_ring\n"); return (258); } else { } ql_dbg(1073741824U, vha, 306, "rsp: %p rsp_ring pointer %p rsp len 0x%x rsp off 0x%x, rsp->dma: 0x%llx\n", rsp, rsp->ring, (int )rsp->length, ha->rsp_que_off, rsp->dma); return (0); } } static int qlafx00_init_fw_ready(scsi_qla_host_t *vha ) { int rval ; unsigned long wtime ; uint16_t wait_time ; struct qla_hw_data *ha ; struct device_reg_fx00 *reg ; uint32_t aenmbx ; uint32_t aenmbx7 ; uint32_t pseudo_aen ; uint32_t state[5U] ; bool done ; unsigned int tmp ; { rval = 0; ha = vha->hw; reg = & (ha->iobase)->ispfx00; aenmbx7 = 0U; done = 0; wait_time = 30U; pseudo_aen = readl((void const volatile *)(& reg->pseudoaen)); if (pseudo_aen == 1U) { aenmbx7 = readl((void const volatile *)(& reg->initval7)); ha->mbx_intr_code = (uint32_t )((unsigned short )(aenmbx7 >> 16)); ha->rqstq_intr_code = (uint32_t )((unsigned short )aenmbx7); rval = qlafx00_driver_shutdown(vha, 10); if (rval != 0) { qlafx00_soft_reset(vha); } else { } } else { } wtime = (unsigned long )((int )wait_time * 250) + (unsigned long )jiffies; ldv_66197: aenmbx = readl((void const volatile *)(& reg->aenmailbox0)); __asm__ volatile ("": : : "memory"); ql_dbg(536870912U, vha, 307, "aenmbx: 0x%x\n", aenmbx); switch (aenmbx) { case 32848U: ; case 32849U: ; goto ldv_66183; case 32770U: ; case 32771U: ; case 32772U: ; case 33793U: qlafx00_soft_reset(vha); goto ldv_66183; case 32864U: aenmbx7 = readl((void const volatile *)(& reg->aenmailbox7)); ha->mbx_intr_code = (uint32_t )((unsigned short )(aenmbx7 >> 16)); ha->rqstq_intr_code = (uint32_t )((unsigned short )aenmbx7); ha->req_que_off = readl((void const volatile *)(& reg->aenmailbox1)); ha->rsp_que_off = readl((void const volatile *)(& reg->aenmailbox3)); ha->req_que_len = readl((void const volatile *)(& reg->aenmailbox5)); ha->rsp_que_len = readl((void const volatile *)(& reg->aenmailbox6)); writel(0U, (void volatile *)(& reg->aenmailbox0)); __readl((void const volatile *)(& reg->aenmailbox0)); ql_dbg(1073741824U, vha, 308, "f/w returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x\n", ha->mbx_intr_code, ha->rqstq_intr_code); writel(4294967288U, (void volatile *)ha->cregbase + 138096U); rval = 0; done = 1; goto ldv_66183; default: ; if ((aenmbx & 65280U) == 34048U) { goto ldv_66183; } else { } aenmbx7 = readl((void const volatile *)(& reg->initval7)); ha->mbx_intr_code = (uint32_t )((unsigned short )(aenmbx7 >> 16)); ha->rqstq_intr_code = (uint32_t )((unsigned short )aenmbx7); ha->req_que_off = readl((void const volatile *)(& reg->initval1)); ha->rsp_que_off = readl((void const volatile *)(& reg->initval3)); ha->req_que_len = readl((void const volatile *)(& reg->initval5)); ha->rsp_que_len = readl((void const volatile *)(& reg->initval6)); ql_dbg(1073741824U, vha, 309, "f/w returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x\n", ha->mbx_intr_code, ha->rqstq_intr_code); writel(4294967288U, (void volatile *)ha->cregbase + 138096U); rval = qlafx00_get_firmware_state(vha, (uint32_t *)(& state)); if (rval != 0) { goto ldv_66183; } else { } if (state[0] == 0U) { rval = 0; done = 1; goto ldv_66183; } else { } ql_dbg(1073741824U, vha, 310, "Sending Driver shutdown fw_state 0x%x\n", state[0]); rval = qlafx00_driver_shutdown(vha, 10); if (rval != 0) { rval = 258; goto ldv_66183; } else { } msleep(500U); wtime = (unsigned long )((int )wait_time * 250) + (unsigned long )jiffies; goto ldv_66183; } ldv_66183: ; if (! done) { if ((long )((unsigned long )jiffies - wtime) >= 0L) { tmp = readl((void const volatile *)(& reg->aenmailbox7)); ql_dbg(1073741824U, vha, 311, "Init f/w failed: aen[7]: 0x%x\n", tmp); rval = 258; done = 1; goto ldv_66196; } else { } msleep(500U); } else { } if (! done) { goto ldv_66197; } else { } ldv_66196: ; if (rval != 0) { ql_dbg(1073741824U, vha, 312, "%s **** FAILED ****.\n", "qlafx00_init_fw_ready"); } else { ql_dbg(1073741824U, vha, 313, "%s **** SUCCESS ****.\n", "qlafx00_init_fw_ready"); } return (rval); } } int qlafx00_fw_ready(scsi_qla_host_t *vha ) { int rval ; unsigned long wtime ; uint16_t wait_time ; uint32_t state[5U] ; { rval = 0; wait_time = 10U; wtime = (unsigned long )((int )wait_time * 250) + (unsigned long )jiffies; if (*((unsigned long *)vha + 19UL) == 0UL) { ql_dbg(1073741824U, vha, 314, "Waiting for init to complete...\n"); } else { } ldv_66213: rval = qlafx00_get_firmware_state(vha, (uint32_t *)(& state)); if (rval == 0) { if (state[0] == 4096U) { ql_dbg(1073741824U, vha, 315, "fw_state=%x\n", state[0]); rval = 0; goto ldv_66206; } else { } } else { } rval = 258; if ((long )((unsigned long )jiffies - wtime) >= 0L) { goto ldv_66206; } else { } msleep(500U); ql_dbg(1073741824U, vha, 316, "fw_state=%x curr time=%lx.\n", state[0], jiffies); goto ldv_66213; ldv_66206: ; if (rval != 0) { ql_dbg(1073741824U, vha, 317, "Firmware ready **** FAILED ****.\n"); } else { ql_dbg(1073741824U, vha, 318, "Firmware ready **** SUCCESS ****.\n"); } return (rval); } } static int qlafx00_find_all_targets(scsi_qla_host_t *vha , struct list_head *new_fcports ) { int rval ; uint16_t tgt_id ; fc_port_t *fcport ; fc_port_t *new_fcport ; int found ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; unsigned long tmp___3 ; struct list_head const *__mptr ; int tmp___4 ; int tmp___5 ; u64 tmp___6 ; u64 tmp___7 ; int tmp___8 ; u64 tmp___9 ; u64 tmp___10 ; int tmp___11 ; struct list_head const *__mptr___0 ; unsigned long tmp___12 ; { ha = vha->hw; rval = 0; tmp = constant_test_bit(5L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp == 0) { return (258); } else { } tmp___0 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___0 != 0) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); return (258); } else { tmp___1 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); return (258); } else { tmp___2 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 != 0) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); return (258); } else { } } } ql_dbg(1342177280U, vha, 8328, "Listing Target bit map...\n"); ql_dump_buffer(1342177280U, vha, 8329, (uint8_t *)ha->gid_list, 32U); new_fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )new_fcport == (unsigned long )((fc_port_t *)0)) { return (259); } else { } tmp___3 = find_first_bit((unsigned long const *)ha->gid_list, 128UL); tgt_id = (uint16_t )tmp___3; goto ldv_66235; ldv_66234: new_fcport->tgt_id = tgt_id; rval = qlafx00_fx_disc(vha, new_fcport, 128); if (rval != 0) { ql_log(1U, vha, 8330, "Target info scan failed -- assuming zero-entry result...\n"); goto ldv_66224; } else { } found = 0; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_66233; ldv_66232: tmp___4 = memcmp((void const *)(& new_fcport->port_name), (void const *)(& fcport->port_name), 8UL); if (tmp___4 != 0) { goto ldv_66229; } else { } found = found + 1; if ((int )fcport->tgt_id == (int )new_fcport->tgt_id) { tmp___5 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___5 == 4) { goto ldv_66230; } else { } } else { } tmp___6 = wwn_to_u64((u8 *)(& fcport->port_name)); tmp___7 = wwn_to_u64((u8 *)(& fcport->node_name)); tmp___8 = atomic_read((atomic_t const *)(& fcport->state)); ql_dbg(1342177280U, vha, 8331, "TGT-ID Change(%s): Present tgt id: 0x%x state: 0x%x wwnn = %llx wwpn = %llx.\n", "qlafx00_find_all_targets", (int )fcport->tgt_id, tmp___8, tmp___7, tmp___6); tmp___9 = wwn_to_u64((u8 *)(& new_fcport->port_name)); tmp___10 = wwn_to_u64((u8 *)(& new_fcport->node_name)); ql_log(2U, vha, 8332, "TGT-ID Announce(%s): Discovered tgt id 0x%x wwnn = %llx wwpn = %llx.\n", "qlafx00_find_all_targets", (int )new_fcport->tgt_id, tmp___10, tmp___9); tmp___11 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___11 != 4) { fcport->old_tgt_id = fcport->tgt_id; fcport->tgt_id = new_fcport->tgt_id; ql_log(2U, vha, 8333, "TGT-ID: New fcport Added: %p\n", fcport); qla2x00_update_fcport(vha, fcport); } else { ql_log(2U, vha, 8334, " Existing TGT-ID %x did not get offline event from firmware.\n", (int )fcport->old_tgt_id); qla2x00_mark_device_lost(vha, fcport, 0, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); kfree((void const *)new_fcport); return (rval); } goto ldv_66230; ldv_66229: __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_66233: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66232; } else { } ldv_66230: ; if (found != 0) { goto ldv_66224; } else { } list_add_tail(& new_fcport->list, new_fcports); new_fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )new_fcport == (unsigned long )((fc_port_t *)0)) { return (259); } else { } ldv_66224: tmp___12 = find_next_bit((unsigned long const *)ha->gid_list, 128UL, (unsigned long )((int )tgt_id + 1)); tgt_id = (uint16_t )tmp___12; ldv_66235: ; if ((unsigned int )tgt_id <= 127U) { goto ldv_66234; } else { } kfree((void const *)new_fcport); return (rval); } } static int qlafx00_configure_all_targets(scsi_qla_host_t *vha ) { int rval ; fc_port_t *fcport ; fc_port_t *rmptemp ; struct list_head new_fcports ; struct list_head const *__mptr ; int tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; int tmp___1 ; u64 tmp___2 ; u64 tmp___3 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; struct list_head const *__mptr___6 ; { new_fcports.next = & new_fcports; new_fcports.prev = & new_fcports; rval = qlafx00_fx_disc(vha, & (vha->hw)->mr.fcport, 129); if (rval != 0) { set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); return (rval); } else { } rval = qlafx00_find_all_targets(vha, & new_fcports); if (rval != 0) { set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); return (rval); } else { } __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_66250; ldv_66249: tmp = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp != 0) { goto ldv_66248; } else { } tmp___0 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___0 == 3) { if ((unsigned int )fcport->port_type != 4U) { qla2x00_mark_device_lost(vha, fcport, 0, 0); } else { } } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_66250: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66249; } else { } ldv_66248: __mptr___1 = (struct list_head const *)new_fcports.next; fcport = (fc_port_t *)__mptr___1; __mptr___2 = (struct list_head const *)fcport->list.next; rmptemp = (fc_port_t *)__mptr___2; goto ldv_66259; ldv_66258: tmp___1 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { goto ldv_66257; } else { } qla2x00_update_fcport(vha, fcport); list_move_tail(& fcport->list, & vha->vp_fcports); tmp___2 = wwn_to_u64((u8 *)(& fcport->port_name)); tmp___3 = wwn_to_u64((u8 *)(& fcport->node_name)); ql_log(2U, vha, 8335, "Attach new target id 0x%x wwnn = %llx wwpn = %llx.\n", (int )fcport->tgt_id, tmp___3, tmp___2); fcport = rmptemp; __mptr___3 = (struct list_head const *)rmptemp->list.next; rmptemp = (fc_port_t *)__mptr___3; ldv_66259: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& new_fcports)) { goto ldv_66258; } else { } ldv_66257: __mptr___4 = (struct list_head const *)new_fcports.next; fcport = (fc_port_t *)__mptr___4; __mptr___5 = (struct list_head const *)fcport->list.next; rmptemp = (fc_port_t *)__mptr___5; goto ldv_66267; ldv_66266: list_del(& fcport->list); kfree((void const *)fcport); fcport = rmptemp; __mptr___6 = (struct list_head const *)rmptemp->list.next; rmptemp = (fc_port_t *)__mptr___6; ldv_66267: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& new_fcports)) { goto ldv_66266; } else { } return (rval); } } int qlafx00_configure_devices(scsi_qla_host_t *vha ) { int rval ; unsigned long flags ; unsigned long save_flags ; int tmp ; { rval = 0; flags = vha->dpc_flags; save_flags = flags; ql_dbg(268435456U, vha, 8336, "Configure devices -- dpc flags =0x%lx\n", flags); rval = qlafx00_configure_all_targets(vha); if (rval == 0) { tmp = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp != 0) { rval = 258; } else { atomic_set(& vha->loop_state, 5); ql_log(2U, vha, 8337, "Device Ready\n"); } } else { } if (rval != 0) { ql_dbg(268435456U, vha, 8338, "%s *** FAILED ***.\n", "qlafx00_configure_devices"); } else { ql_dbg(268435456U, vha, 8339, "%s: exiting normally.\n", "qlafx00_configure_devices"); } return (rval); } } static void qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha , bool critemp ) { struct qla_hw_data *ha ; fc_port_t *fcport ; int tmp ; int tmp___0 ; struct list_head const *__mptr ; int tmp___1 ; struct list_head const *__mptr___0 ; { ha = vha->hw; vha->flags.online = 0U; ha->mr.fw_hbt_en = 0U; if (! critemp) { ha->flags.chip_reset_done = 0U; clear_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); vha->qla_stats.total_isp_aborts = vha->qla_stats.total_isp_aborts + 1U; ql_log(2U, vha, 319, "Performing ISP error recovery - ha = %p.\n", ha); (*((ha->isp_ops)->reset_chip))(vha); } else { } tmp___0 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___0 != 2) { atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 615); } else { tmp = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp == 0) { atomic_set(& vha->loop_down_timer, 615); } else { } } __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_66287; ldv_66286: fcport->flags = 0U; tmp___1 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___1 == 4) { qla2x00_set_fcport_state___3(fcport, 3); } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_66287: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66286; } else { } if (*((unsigned long *)ha + 2UL) == 0UL) { if ((int )critemp) { qla2x00_abort_all_cmds(vha, 65536); } else { qla2x00_abort_all_cmds(vha, 524288); } } else { } qla2x00_free_irqs(vha); if ((int )critemp) { set_bit(25L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(23L, (unsigned long volatile *)(& vha->dpc_flags)); } writel(4294967288U, (void volatile *)ha->cregbase + 138096U); ql_log(2U, vha, 320, "%s Done done - ha=%p.\n", "qlafx00_abort_isp_cleanup", ha); return; } } void qlafx00_init_response_q_entries(struct rsp_que *rsp ) { uint16_t cnt ; response_t *pkt ; { rsp->ring_ptr = rsp->ring; rsp->ring_index = 0U; rsp->status_srb = (srb_t *)0; pkt = rsp->ring_ptr; cnt = 0U; goto ldv_66296; ldv_66295: pkt->signature = 3735936685U; writel(3735936685U, (void volatile *)(& pkt->signature)); pkt = pkt + 1; cnt = (uint16_t )((int )cnt + 1); ldv_66296: ; if ((int )rsp->length > (int )cnt) { goto ldv_66295; } else { } return; } } int qlafx00_rescan_isp(scsi_qla_host_t *vha ) { uint32_t status ; struct qla_hw_data *ha ; struct device_reg_fx00 *reg ; uint32_t aenmbx7 ; int tmp ; int tmp___0 ; { status = 258U; ha = vha->hw; reg = & (ha->iobase)->ispfx00; qla2x00_request_irqs(ha, *(ha->rsp_q_map)); aenmbx7 = readl((void const volatile *)(& reg->aenmailbox7)); ha->mbx_intr_code = (uint32_t )((unsigned short )(aenmbx7 >> 16)); ha->rqstq_intr_code = (uint32_t )((unsigned short )aenmbx7); ha->req_que_off = readl((void const volatile *)(& reg->aenmailbox1)); ha->rsp_que_off = readl((void const volatile *)(& reg->aenmailbox3)); ha->req_que_len = readl((void const volatile *)(& reg->aenmailbox5)); ha->rsp_que_len = readl((void const volatile *)(& reg->aenmailbox6)); ql_dbg(268435456U, vha, 8340, "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x Req que offset 0x%x Rsp que offset 0x%x\n", ha->mbx_intr_code, ha->rqstq_intr_code, ha->req_que_off, ha->rsp_que_len); writel(4294967288U, (void volatile *)ha->cregbase + 138096U); tmp = qla2x00_init_rings(vha); status = (uint32_t )tmp; if (status == 0U) { vha->flags.online = 1U; if ((vha->device_flags & 2U) != 0U) { status = 0U; } else { } tmp___0 = qlafx00_fx_disc(vha, & (vha->hw)->mr.fcport, 153); if (tmp___0 != 0) { ql_dbg(268435456U, vha, 8341, "failed to register host info\n"); } else { } } else { } scsi_unblock_requests(vha->host); return ((int )status); } } void qlafx00_timer_routine(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t fw_heart_beat ; uint32_t aenmbx0 ; struct device_reg_fx00 *reg ; uint32_t tempc ; int tmp ; int tmp___0 ; uint32_t data0 ; uint32_t data1 ; int tmp___1 ; unsigned int tmp___2 ; int tmp___3 ; { ha = vha->hw; reg = & (ha->iobase)->ispfx00; if ((unsigned int )ha->mr.fw_hbt_cnt != 0U) { ha->mr.fw_hbt_cnt = (uint8_t )((int )ha->mr.fw_hbt_cnt - 1); } else { if (*((unsigned long *)ha + 2UL) == 0UL) { tmp = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp == 0) { tmp___0 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 == 0) { if ((unsigned int )ha->mr.fw_hbt_en != 0U) { fw_heart_beat = readl((void const volatile *)(& reg->fwheartbeat)); if (ha->mr.old_fw_hbt_cnt != fw_heart_beat) { ha->mr.old_fw_hbt_cnt = fw_heart_beat; ha->mr.fw_hbt_miss_cnt = 0U; } else { ha->mr.fw_hbt_miss_cnt = (uint8_t )((int )ha->mr.fw_hbt_miss_cnt + 1); if ((unsigned int )ha->mr.fw_hbt_miss_cnt == 3U) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); ha->mr.fw_hbt_miss_cnt = 0U; } else { } } } else { } } else { } } else { } } else { } ha->mr.fw_hbt_cnt = 6U; } tmp___1 = constant_test_bit(23L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { aenmbx0 = readl((void const volatile *)(& reg->aenmailbox0)); if ((unsigned int )ha->mr.fw_reset_timer_exp != 0U) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); ha->mr.fw_reset_timer_exp = 0U; } else if (aenmbx0 == 32864U) { set_bit(24L, (unsigned long volatile *)(& vha->dpc_flags)); clear_bit(23L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); ha->mr.fw_reset_timer_tick = 120U; } else if (aenmbx0 == 32849U && (unsigned int )ha->mr.fw_hbt_en == 0U) { ha->mr.fw_hbt_en = 1U; } else if ((unsigned int )ha->mr.fw_reset_timer_tick == 0U) { if (ha->mr.old_aenmbx0_state == aenmbx0) { ha->mr.fw_reset_timer_exp = 1U; } else { } ha->mr.fw_reset_timer_tick = 120U; } else if (aenmbx0 == 4294967295U) { data0 = readl((void const volatile *)ha->cregbase + 262168U); data1 = readl((void const volatile *)ha->cregbase + 268324U); data0 = data0 & 4294901760U; data1 = data1 & 65535U; writel(data0 | data1, (void volatile *)ha->cregbase + 268324U); } else if ((aenmbx0 & 65280U) == 34304U) { ha->mr.fw_reset_timer_tick = 600U; } else if (aenmbx0 == 34050U) { ha->mr.fw_reset_timer_tick = 600U; } else { } if (ha->mr.old_aenmbx0_state != aenmbx0) { ha->mr.old_aenmbx0_state = aenmbx0; ha->mr.fw_reset_timer_tick = 120U; } else { } ha->mr.fw_reset_timer_tick = (uint16_t )((int )ha->mr.fw_reset_timer_tick - 1); } else { } tmp___3 = constant_test_bit(25L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___3 != 0) { if ((unsigned int )ha->mr.fw_critemp_timer_tick == 0U) { tmp___2 = readl((void const volatile *)ha->cregbase + 99524U); tempc = (3153000U - ((tmp___2 & 1022U) >> 1) * 10000U) / 13825U; ql_dbg(16777216U, vha, 24594, "ISPFx00(%s): Critical temp timer, current SOC temperature: %d\n", "qlafx00_timer_routine", tempc); if (ha->mr.critical_temperature > tempc) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); clear_bit(25L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else { } ha->mr.fw_critemp_timer_tick = 60U; } else { ha->mr.fw_critemp_timer_tick = (uint16_t )((int )ha->mr.fw_critemp_timer_tick - 1); } } else { } if ((int )ha->mr.host_info_resend) { if ((unsigned int )ha->mr.hinfo_resend_timer_tick == 0U) { ha->mr.host_info_resend = 0; set_bit(26L, (unsigned long volatile *)(& vha->dpc_flags)); ha->mr.hinfo_resend_timer_tick = 60U; qla2xxx_wake_dpc(vha); } else { ha->mr.hinfo_resend_timer_tick = (uint8_t )((int )ha->mr.hinfo_resend_timer_tick - 1); } } else { } return; } } int qlafx00_reset_initialize(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((vha->device_flags & 32U) != 0U) { ql_dbg(1073741824U, vha, 322, "Device in failed state\n"); return (0); } else { } ha->flags.mr_reset_hdlr_active = 1U; if (*((unsigned long *)vha + 19UL) != 0UL) { scsi_block_requests(vha->host); qlafx00_abort_isp_cleanup(vha, 0); } else { } ql_log(2U, vha, 323, "(%s): succeeded.\n", "qlafx00_reset_initialize"); ha->flags.mr_reset_hdlr_active = 0U; return (0); } } int qlafx00_abort_isp(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int tmp ; long tmp___0 ; { ha = vha->hw; if (*((unsigned long *)vha + 19UL) != 0UL) { tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect((long )(tmp != 0 && *((unsigned long *)ha + 2UL) != 0UL), 0L); if (tmp___0 != 0L) { clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); return (0); } else { } scsi_block_requests(vha->host); qlafx00_abort_isp_cleanup(vha, 0); } else { scsi_block_requests(vha->host); clear_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); vha->qla_stats.total_isp_aborts = vha->qla_stats.total_isp_aborts + 1U; (*((ha->isp_ops)->reset_chip))(vha); set_bit(23L, (unsigned long volatile *)(& vha->dpc_flags)); writel(4294967288U, (void volatile *)ha->cregbase + 138096U); } ql_log(2U, vha, 325, "(%s): succeeded.\n", "qlafx00_abort_isp"); return (0); } } __inline static fc_port_t *qlafx00_get_fcport(struct scsi_qla_host *vha , int tgt_id ) { fc_port_t *fcport ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_66336; ldv_66335: ; if ((int )fcport->tgt_id == tgt_id) { ql_dbg(33554432U, vha, 20594, "Matching fcport(%p) found with TGT-ID: 0x%x and Remote TGT_ID: 0x%x\n", fcport, (int )fcport->tgt_id, tgt_id); return (fcport); } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_66336: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_66335; } else { } return ((fc_port_t *)0); } } static void qlafx00_tgt_detach(struct scsi_qla_host *vha , int tgt_id ) { fc_port_t *fcport ; { ql_log(2U, vha, 20595, "Detach TGT-ID: 0x%x\n", tgt_id); fcport = qlafx00_get_fcport(vha, tgt_id); if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return; } else { } qla2x00_mark_device_lost(vha, fcport, 0, 0); return; } } int qlafx00_process_aen(struct scsi_qla_host *vha , struct qla_work_evt *evt ) { int rval ; uint32_t aen_code ; uint32_t aen_data ; u32 tmp ; { rval = 0; aen_code = 65535U; aen_data = evt->u.aenfx.evtcode; switch (evt->u.aenfx.evtcode) { case 32788U: ; if (evt->u.aenfx.mbx[1] == 0U) { if (evt->u.aenfx.mbx[2] == 1U) { if (*((unsigned long *)vha + 19UL) == 0UL) { vha->flags.fw_tgt_reported = 1U; } else { } atomic_set(& vha->loop_down_timer, 0); atomic_set(& vha->loop_state, 3); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else if (evt->u.aenfx.mbx[2] == 2U) { qlafx00_tgt_detach(vha, (int )evt->u.aenfx.mbx[3]); } else { } } else if (evt->u.aenfx.mbx[1] == 65535U) { if (evt->u.aenfx.mbx[2] == 1U) { if (*((unsigned long *)vha + 19UL) == 0UL) { vha->flags.fw_tgt_reported = 1U; } else { } set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); } else if (evt->u.aenfx.mbx[2] == 2U) { vha->device_flags = vha->device_flags | 2U; qla2x00_mark_all_devices_lost(vha, 1); } else { } } else { } goto ldv_66351; case 32785U: aen_code = 2U; aen_data = 0U; goto ldv_66351; case 32786U: aen_code = 3U; aen_data = 0U; goto ldv_66351; case 32775U: ql_log(2U, vha, 20610, "Process critical temperature event aenmb[0]: %x\n", evt->u.aenfx.evtcode); scsi_block_requests(vha->host); qlafx00_abort_isp_cleanup(vha, 1); scsi_unblock_requests(vha->host); goto ldv_66351; } ldv_66351: tmp = fc_get_event_number(); fc_host_post_event(vha->host, tmp, (enum fc_host_event_code )aen_code, aen_data); return (rval); } } static void qlafx00_update_host_attr(scsi_qla_host_t *vha , struct port_info_data *pinfo ) { u64 port_name ; u64 node_name ; { port_name = 0ULL; node_name = 0ULL; port_name = wwn_to_u64((u8 *)(& pinfo->port_name)); node_name = wwn_to_u64((u8 *)(& pinfo->node_name)); ((struct fc_host_attrs *)(vha->host)->shost_data)->node_name = node_name; ((struct fc_host_attrs *)(vha->host)->shost_data)->port_name = port_name; if ((unsigned int )pinfo->port_type == 0U) { (vha->hw)->current_topology = 8U; } else { } if ((unsigned int )pinfo->link_status == 17U) { atomic_set(& vha->loop_state, 5); } else if ((unsigned int )pinfo->link_status == 16U) { atomic_set(& vha->loop_state, 2); } else { } (vha->hw)->link_data_rate = (unsigned short )pinfo->link_config; return; } } static void qla2x00_fxdisc_iocb_timeout(void *data ) { srb_t *sp ; struct srb_iocb *lio ; { sp = (srb_t *)data; lio = & sp->u.iocb_cmd; complete(& lio->u.fxiocb.fxiocb_comp); return; } } static void qla2x00_fxdisc_sp_done(void *data , void *ptr , int res ) { srb_t *sp ; struct srb_iocb *lio ; { sp = (srb_t *)ptr; lio = & sp->u.iocb_cmd; complete(& lio->u.fxiocb.fxiocb_comp); return; } } int qlafx00_fx_disc(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t fx_type ) { srb_t *sp ; struct srb_iocb *fdisc ; int rval ; struct qla_hw_data *ha ; struct host_system_info *phost_info ; struct register_host_info *preg_hsi ; struct new_utsname *p_sysid ; struct timeval tv ; int tmp ; struct config_info_data *pinfo ; struct port_info_data *pinfo___0 ; struct qlafx00_tgt_node_info *pinfo___1 ; struct qlafx00_tgt_node_info *pinfo___2 ; { rval = 258; ha = vha->hw; p_sysid = (struct new_utsname *)0; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } fdisc = & sp->u.iocb_cmd; switch ((int )fx_type) { case 1: fdisc->u.fxiocb.flags = 2U; fdisc->u.fxiocb.rsp_len = 500U; goto ldv_66388; case 2: fdisc->u.fxiocb.flags = 6U; fdisc->u.fxiocb.rsp_len = 304U; fdisc->u.fxiocb.req_data = (unsigned int )fcport->port_id; goto ldv_66388; case 128: fdisc->u.fxiocb.flags = 6U; fdisc->u.fxiocb.rsp_len = 212U; fdisc->u.fxiocb.req_data = (unsigned int )fcport->tgt_id; goto ldv_66388; case 129: fdisc->u.fxiocb.flags = 6U; fdisc->u.fxiocb.rsp_len = 128U; goto ldv_66388; case 153: fdisc->u.fxiocb.flags = 1U; fdisc->u.fxiocb.req_len = 1036U; p_sysid = utsname(); if ((unsigned long )p_sysid == (unsigned long )((struct new_utsname *)0)) { ql_log(1U, vha, 12348, "Not able to get the system information\n"); goto done_free_sp; } else { } goto ldv_66388; case 255: ; default: ; goto ldv_66388; } ldv_66388: ; if ((int )fdisc->u.fxiocb.flags & 1) { fdisc->u.fxiocb.req_addr = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )fdisc->u.fxiocb.req_len, & fdisc->u.fxiocb.req_dma_handle, 208U, (struct dma_attrs *)0); if ((unsigned long )fdisc->u.fxiocb.req_addr == (unsigned long )((void *)0)) { goto done_free_sp; } else { } if ((unsigned int )fx_type == 153U) { preg_hsi = (struct register_host_info *)fdisc->u.fxiocb.req_addr; phost_info = & preg_hsi->hsi; memset((void *)preg_hsi, 0, 1036UL); phost_info->os_type = 2U; strncpy((char *)(& phost_info->sysname), (char const *)(& p_sysid->sysname), 128UL); strncpy((char *)(& phost_info->nodename), (char const *)(& p_sysid->nodename), 64UL); tmp = strcmp((char const *)(& phost_info->nodename), "(none)"); if (tmp == 0) { ha->mr.host_info_resend = 1; } else { } strncpy((char *)(& phost_info->release), (char const *)(& p_sysid->release), 64UL); strncpy((char *)(& phost_info->version), (char const *)(& p_sysid->version), 64UL); strncpy((char *)(& phost_info->machine), (char const *)(& p_sysid->machine), 64UL); strncpy((char *)(& phost_info->domainname), (char const *)(& p_sysid->domainname), 64UL); strncpy((char *)(& phost_info->hostdriver), "8.07.00.18-k", 64UL); do_gettimeofday(& tv); preg_hsi->utc = (unsigned long long )tv.tv_sec; ql_dbg(1073741824U, vha, 329, "ISP%04X: Host registration with firmware\n", (int )(ha->pdev)->device); ql_dbg(1073741824U, vha, 330, "os_type = \'%d\', sysname = \'%s\', nodname = \'%s\'\n", phost_info->os_type, (char *)(& phost_info->sysname), (char *)(& phost_info->nodename)); ql_dbg(1073741824U, vha, 331, "release = \'%s\', version = \'%s\'\n", (char *)(& phost_info->release), (char *)(& phost_info->version)); ql_dbg(1073741824U, vha, 332, "machine = \'%s\' domainname = \'%s\', hostdriver = \'%s\'\n", (char *)(& phost_info->machine), (char *)(& phost_info->domainname), (char *)(& phost_info->hostdriver)); ql_dump_buffer(1342177280U, vha, 333, (uint8_t *)phost_info, 772U); } else { } } else { } if (((int )fdisc->u.fxiocb.flags & 2) != 0) { fdisc->u.fxiocb.rsp_addr = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )fdisc->u.fxiocb.rsp_len, & fdisc->u.fxiocb.rsp_dma_handle, 208U, (struct dma_attrs *)0); if ((unsigned long )fdisc->u.fxiocb.rsp_addr == (unsigned long )((void *)0)) { goto done_unmap_req; } else { } } else { } sp->type = 10U; sp->name = (char *)"fxdisc"; qla2x00_init_timer(sp, 20UL); fdisc->timeout = & qla2x00_fxdisc_iocb_timeout; fdisc->u.fxiocb.req_func_type = fx_type; sp->done = & qla2x00_fxdisc_sp_done; rval = qla2x00_start_sp(sp); if (rval != 0) { goto done_unmap_dma; } else { } wait_for_completion(& fdisc->u.fxiocb.fxiocb_comp); if ((unsigned int )fx_type == 1U) { pinfo = (struct config_info_data *)fdisc->u.fxiocb.rsp_addr; strcpy((char *)(& (vha->hw)->model_number), (char const *)(& pinfo->model_num)); strcpy((char *)(& (vha->hw)->model_desc), (char const *)(& pinfo->model_description)); memcpy((void *)(& (vha->hw)->mr.symbolic_name), (void const *)(& pinfo->symbolic_name), 64UL); memcpy((void *)(& (vha->hw)->mr.serial_num), (void const *)(& pinfo->serial_num), 32UL); memcpy((void *)(& (vha->hw)->mr.hw_version), (void const *)(& pinfo->hw_version), 16UL); memcpy((void *)(& (vha->hw)->mr.fw_version), (void const *)(& pinfo->fw_version), 16UL); strim((char *)(& (vha->hw)->mr.fw_version)); memcpy((void *)(& (vha->hw)->mr.uboot_version), (void const *)(& pinfo->uboot_version), 16UL); memcpy((void *)(& (vha->hw)->mr.fru_serial_num), (void const *)(& pinfo->fru_serial_num), 32UL); (vha->hw)->mr.critical_temperature = pinfo->nominal_temp_value != 0U ? pinfo->nominal_temp_value : 80U; ha->mr.extended_io_enabled = (pinfo->enabled_capabilities & 32U) != 0U; } else if ((unsigned int )fx_type == 2U) { pinfo___0 = (struct port_info_data *)fdisc->u.fxiocb.rsp_addr; memcpy((void *)(& vha->node_name), (void const *)(& pinfo___0->node_name), 8UL); memcpy((void *)(& vha->port_name), (void const *)(& pinfo___0->port_name), 8UL); vha->d_id.b.domain = pinfo___0->port_id[0]; vha->d_id.b.area = pinfo___0->port_id[1]; vha->d_id.b.al_pa = pinfo___0->port_id[2]; qlafx00_update_host_attr(vha, pinfo___0); ql_dump_buffer(1073872896U, vha, 321, (uint8_t *)pinfo___0, 16U); } else if ((unsigned int )fx_type == 128U) { pinfo___1 = (struct qlafx00_tgt_node_info *)fdisc->u.fxiocb.rsp_addr; memcpy((void *)(& fcport->node_name), (void const *)(& pinfo___1->tgt_node_wwnn), 8UL); memcpy((void *)(& fcport->port_name), (void const *)(& pinfo___1->tgt_node_wwpn), 8UL); fcport->port_type = 5; ql_dump_buffer(1073872896U, vha, 324, (uint8_t *)pinfo___1, 16U); } else if ((unsigned int )fx_type == 129U) { pinfo___2 = (struct qlafx00_tgt_node_info *)fdisc->u.fxiocb.rsp_addr; ql_dump_buffer(1073872896U, vha, 326, (uint8_t *)pinfo___2, 16U); memcpy((void *)(vha->hw)->gid_list, (void const *)pinfo___2, 128UL); } else if ((unsigned int )fx_type == 255U) { fdisc->u.fxiocb.result = fdisc->u.fxiocb.result == 104U ? 0U : 258U; } else { } rval = (int )fdisc->u.fxiocb.result; done_unmap_dma: ; if ((unsigned long )fdisc->u.fxiocb.rsp_addr != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )fdisc->u.fxiocb.rsp_len, fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle, (struct dma_attrs *)0); } else { } done_unmap_req: ; if ((unsigned long )fdisc->u.fxiocb.req_addr != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )fdisc->u.fxiocb.req_len, fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle, (struct dma_attrs *)0); } else { } done_free_sp: (*(sp->free))((void *)vha, (void *)sp); done: ; return (rval); } } int qlafx00_initialize_adapter(struct scsi_qla_host *vha ) { int rval ; struct qla_hw_data *ha ; uint32_t tempc ; unsigned int tmp ; { ha = vha->hw; vha->flags.online = 0U; ha->flags.chip_reset_done = 0U; vha->flags.reset_active = 0U; ha->flags.pci_channel_io_perm_failure = 0U; ha->flags.eeh_busy = 0U; atomic_set(& vha->loop_down_timer, 255); atomic_set(& vha->loop_state, 2); vha->device_flags = 2U; vha->dpc_flags = 0UL; vha->flags.management_server_logged_in = 0U; ha->isp_abort_cnt = 0U; ha->beacon_blink_led = 0U; set_bit(0L, (unsigned long volatile *)(& ha->req_qid_map)); set_bit(0L, (unsigned long volatile *)(& ha->rsp_qid_map)); ql_dbg(1073741824U, vha, 327, "Configuring PCI space...\n"); rval = (*((ha->isp_ops)->pci_config))(vha); if (rval != 0) { ql_log(1U, vha, 328, "Unable to configure PCI space.\n"); return (rval); } else { } rval = qlafx00_init_fw_ready(vha); if (rval != 0) { return (rval); } else { } qlafx00_save_queue_ptrs(vha); rval = qlafx00_config_queues(vha); if (rval != 0) { return (rval); } else { } rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); if (rval != 0) { return (rval); } else { } rval = qla2x00_init_rings(vha); ha->flags.chip_reset_done = 1U; tmp = readl((void const volatile *)ha->cregbase + 99524U); tempc = (3153000U - ((tmp & 1022U) >> 1) * 10000U) / 13825U; ql_dbg(1073741824U, vha, 338, "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n", "qlafx00_initialize_adapter", tempc); return (rval); } } uint32_t qlafx00_fw_state_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int rval ; uint32_t state[1U] ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); vha = (scsi_qla_host_t *)tmp; rval = 258; tmp___0 = qla2x00_reset_active(vha); if (tmp___0 != 0) { ql_log(1U, vha, 28878, "ISP reset active.\n"); } else if (*((unsigned long *)vha->hw + 2UL) == 0UL) { rval = qlafx00_get_firmware_state(vha, (uint32_t *)(& state)); } else { } if (rval != 0) { memset((void *)(& state), -1, 4UL); } else { } return (state[0]); } } void qlafx00_get_host_speed(struct Scsi_Host *shost ) { struct qla_hw_data *ha ; void *tmp ; u32 speed ; { tmp = shost_priv(shost); ha = ((struct scsi_qla_host *)tmp)->hw; speed = 0U; switch ((int )ha->link_data_rate) { case 2: speed = 2U; goto ldv_66425; case 4: speed = 8U; goto ldv_66425; case 8: speed = 16U; goto ldv_66425; case 10: speed = 4U; goto ldv_66425; } ldv_66425: ((struct fc_host_attrs *)shost->shost_data)->speed = speed; return; } } __inline static void qlafx00_handle_sense(srb_t *sp , uint8_t *sense_data , uint32_t par_sense_len , uint32_t sense_len , struct rsp_que *rsp , int res ) { struct scsi_qla_host *vha ; struct scsi_cmnd *cp ; uint32_t track_sense_len ; { vha = (sp->fcport)->vha; cp = sp->u.scmd.cmd; sp->u.scmd.fw_sense_length = sense_len; if (sense_len > 95U) { sense_len = 96U; } else { } sp->u.scmd.request_sense_length = sense_len; sp->u.scmd.request_sense_ptr = cp->sense_buffer; track_sense_len = sense_len; if (sense_len > par_sense_len) { sense_len = par_sense_len; } else { } memcpy((void *)cp->sense_buffer, (void const *)sense_data, (size_t )sense_len); sp->u.scmd.fw_sense_length = sp->u.scmd.fw_sense_length - sense_len; sp->u.scmd.request_sense_ptr = cp->sense_buffer + (unsigned long )sense_len; track_sense_len = track_sense_len - sense_len; sp->u.scmd.request_sense_length = track_sense_len; ql_dbg(134217728U, vha, 12365, "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n", sense_len, par_sense_len, track_sense_len); if (sp->u.scmd.fw_sense_length != 0U) { rsp->status_srb = sp; cp->result = res; } else { } if (sense_len != 0U) { ql_dbg(134348800U, vha, 12345, "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", ((sp->fcport)->vha)->host_no, (cp->device)->id, (cp->device)->lun, cp); ql_dump_buffer(134348800U, vha, 12361, cp->sense_buffer, sense_len); } else { } return; } } static void qlafx00_tm_iocb_entry(scsi_qla_host_t *vha , struct req_que *req , struct tsk_mgmt_entry_fx00 *pkt , srb_t *sp , __le16 sstatus , __le16 cpstatus ) { struct srb_iocb *tmf ; { tmf = & sp->u.iocb_cmd; if ((unsigned int )cpstatus != 0U || ((int )sstatus & 256) != 0) { cpstatus = 1U; } else { } tmf->u.tmf.comp_status = cpstatus; (*(sp->done))((void *)vha, (void *)sp, 0); return; } } static void qlafx00_abort_iocb_entry(scsi_qla_host_t *vha , struct req_que *req , struct abort_iocb_entry_fx00 *pkt ) { char func[9U] ; srb_t *sp ; struct srb_iocb *abt ; { func[0] = 'A'; func[1] = 'B'; func[2] = 'T'; func[3] = '_'; func[4] = 'I'; func[5] = 'O'; func[6] = 'C'; func[7] = 'B'; func[8] = '\000'; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)pkt); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } abt = & sp->u.iocb_cmd; abt->u.abt.comp_status = pkt->tgt_id_sts; (*(sp->done))((void *)vha, (void *)sp, 0); return; } } static void qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha , struct req_que *req , struct ioctl_iocb_entry_fx00 *pkt ) { char func[10U] ; srb_t *sp ; struct fc_bsg_job *bsg_job ; struct srb_iocb *iocb_job ; int res ; struct qla_mt_iocb_rsp_fx00 fstatus ; uint8_t *fw_sts_ptr ; { func[0] = 'I'; func[1] = 'O'; func[2] = 'S'; func[3] = 'B'; func[4] = '_'; func[5] = 'I'; func[6] = 'O'; func[7] = 'C'; func[8] = 'B'; func[9] = '\000'; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)pkt); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } if ((unsigned int )sp->type == 10U) { iocb_job = & sp->u.iocb_cmd; iocb_job->u.fxiocb.seq_number = pkt->seq_no; iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags; iocb_job->u.fxiocb.result = pkt->status; if (((int )iocb_job->u.fxiocb.flags & 8) != 0) { iocb_job->u.fxiocb.req_data = pkt->dataword_r; } else { } } else { bsg_job = sp->u.bsg_job; memset((void *)(& fstatus), 0, 56UL); fstatus.reserved_1 = pkt->reserved_0; fstatus.func_type = pkt->comp_func_num; fstatus.ioctl_flags = pkt->fw_iotcl_flags; fstatus.ioctl_data = pkt->dataword_r; fstatus.adapid = pkt->adapid; fstatus.reserved_2 = pkt->dataword_r_extra; fstatus.res_count = (int32_t )pkt->residuallen; fstatus.status = pkt->status; fstatus.seq_number = pkt->seq_no; memcpy((void *)(& fstatus.reserved_3), (void const *)(& pkt->reserved_2), 20UL); fw_sts_ptr = (uint8_t *)(bsg_job->req)->sense + 16UL; memcpy((void *)fw_sts_ptr, (void const *)(& fstatus), 56UL); bsg_job->reply_len = 73U; ql_dump_buffer(8421376U, (sp->fcport)->vha, 20608, (uint8_t *)pkt, 60U); ql_dump_buffer(8421376U, (sp->fcport)->vha, 20596, fw_sts_ptr, 56U); (bsg_job->reply)->result = 0U; res = 0; (bsg_job->reply)->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; } (*(sp->done))((void *)vha, (void *)sp, res); return; } } static void qlafx00_status_entry(scsi_qla_host_t *vha , struct rsp_que *rsp , void *pkt ) { srb_t *sp ; fc_port_t *fcport ; struct scsi_cmnd *cp ; struct sts_entry_fx00 *sts ; __le16 comp_status ; __le16 scsi_status ; uint16_t ox_id ; __le16 lscsi_status ; int32_t resid ; uint32_t sense_len ; uint32_t par_sense_len ; uint32_t rsp_info_len ; uint32_t resid_len ; uint32_t fw_resid_len ; uint8_t *rsp_info ; uint8_t *sense_data ; struct qla_hw_data *ha ; uint32_t hindex ; uint32_t handle ; uint16_t que ; struct req_que *req ; int logit ; int res ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; unsigned int tmp___2 ; unsigned int tmp___3 ; unsigned int tmp___4 ; int tmp___5 ; int tmp___6 ; unsigned int tmp___7 ; { rsp_info = (uint8_t *)0U; sense_data = (uint8_t *)0U; ha = vha->hw; logit = 1; res = 0; sts = (struct sts_entry_fx00 *)pkt; comp_status = sts->comp_status; scsi_status = (unsigned int )sts->scsi_status & 4095U; hindex = sts->handle; handle = (uint32_t )((unsigned short )hindex); que = (unsigned short )(hindex >> 16); req = *(ha->req_q_map + (unsigned long )que); if ((uint32_t )req->num_outstanding_cmds > handle) { sp = *(req->outstanding_cmds + (unsigned long )handle); } else { sp = (srb_t *)0; } if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_dbg(134217728U, vha, 12340, "Invalid status handle (0x%x).\n", handle); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); return; } else { } if ((unsigned int )sp->type == 7U) { *(req->outstanding_cmds + (unsigned long )handle) = (srb_t *)0; qlafx00_tm_iocb_entry(vha, req, (struct tsk_mgmt_entry_fx00 *)pkt, sp, (int )scsi_status, (int )comp_status); return; } else { } if ((unsigned int )comp_status == 0U && (unsigned int )scsi_status == 0U) { qla2x00_process_completed_request(vha, req, handle); return; } else { } *(req->outstanding_cmds + (unsigned long )handle) = (srb_t *)0; cp = sp->u.scmd.cmd; if ((unsigned long )cp == (unsigned long )((struct scsi_cmnd *)0)) { ql_dbg(134217728U, vha, 12360, "Command already returned (0x%x/%p).\n", handle, sp); return; } else { } lscsi_status = (unsigned int )scsi_status & 254U; fcport = sp->fcport; ox_id = 0U; fw_resid_len = 0U; resid_len = fw_resid_len; rsp_info_len = resid_len; par_sense_len = rsp_info_len; sense_len = par_sense_len; if (((int )scsi_status & 512) != 0) { sense_len = sts->sense_len; } else { } if (((int )scsi_status & 3072) != 0) { resid_len = sts->residual_len; } else { } if ((unsigned int )comp_status == 21U) { fw_resid_len = sts->residual_len; } else { } sense_data = (uint8_t *)(& sts->data); rsp_info = sense_data; par_sense_len = 32U; if ((unsigned int )comp_status == 0U && ((int )scsi_status & 1024) != 0) { comp_status = 7U; } else { } switch ((int )comp_status) { case 0: ; case 28: ; if ((unsigned int )scsi_status == 0U) { res = 0; goto ldv_66499; } else { } if (((int )scsi_status & 3072) != 0) { resid = (int32_t )resid_len; scsi_set_resid(cp, resid); if ((unsigned int )lscsi_status == 0U) { tmp___0 = scsi_bufflen(cp); if (tmp___0 - (unsigned int )resid < cp->underflow) { tmp = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12368, "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", resid, tmp); res = 458752; goto ldv_66499; } else { } } else { } } else { } res = (int )lscsi_status; if ((unsigned int )lscsi_status == 40U) { ql_dbg(134217728U, fcport->vha, 12369, "QUEUE FULL detected.\n"); goto ldv_66499; } else { } logit = 0; if ((unsigned int )lscsi_status != 2U) { goto ldv_66499; } else { } memset((void *)cp->sense_buffer, 0, 96UL); if (((int )scsi_status & 512) == 0) { goto ldv_66499; } else { } qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); goto ldv_66499; case 21: ; if ((ha->device_type & 134217728U) != 0U || (ha->device_type & 131072U) != 0U) { resid = (int32_t )fw_resid_len; } else { resid = (int32_t )resid_len; } scsi_set_resid(cp, resid); if (((int )scsi_status & 2048) != 0) { if (((ha->device_type & 134217728U) != 0U || (ha->device_type & 131072U) != 0U) && fw_resid_len != resid_len) { tmp___1 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12370, "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, tmp___1); res = (int )lscsi_status | 458752; goto check_scsi_status; } else { } if ((unsigned int )lscsi_status == 0U) { tmp___3 = scsi_bufflen(cp); if (tmp___3 - (unsigned int )resid < cp->underflow) { tmp___2 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12371, "Mid-layer underflow detected (0x%x of 0x%x bytes, cp->underflow: 0x%x).\n", resid, tmp___2, cp->underflow); res = 458752; goto ldv_66499; } else { } } else { } } else if ((unsigned int )lscsi_status != 40U && (unsigned int )lscsi_status != 8U) { tmp___4 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12372, "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, tmp___4); res = (int )lscsi_status | 458752; goto check_scsi_status; } else { ql_dbg(134217728U, fcport->vha, 12373, "scsi_status: 0x%x, lscsi_status: 0x%x\n", (int )scsi_status, (int )lscsi_status); } res = (int )lscsi_status; logit = 0; check_scsi_status: ; if ((unsigned int )lscsi_status != 0U) { if ((unsigned int )lscsi_status == 40U) { ql_dbg(134217728U, fcport->vha, 12374, "QUEUE FULL detected.\n"); logit = 1; goto ldv_66499; } else { } if ((unsigned int )lscsi_status != 2U) { goto ldv_66499; } else { } memset((void *)cp->sense_buffer, 0, 96UL); if (((int )scsi_status & 512) == 0) { goto ldv_66499; } else { } qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); } else { } goto ldv_66499; case 41: ; case 42: ; case 43: ; case 1: ; case 40: ; case 6: ; case 4: res = 917504; tmp___5 = atomic_read((atomic_t const *)(& fcport->state)); ql_dbg(134217728U, fcport->vha, 12375, "Port down status: port-state=0x%x.\n", tmp___5); tmp___6 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___6 == 4) { qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); } else { } goto ldv_66499; case 5: res = 524288; goto ldv_66499; default: res = 458752; goto ldv_66499; } ldv_66499: ; if (logit != 0) { tmp___7 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12376, "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n", (int )comp_status, (int )scsi_status, res, vha->host_no, (cp->device)->id, (cp->device)->lun, (int )fcport->tgt_id, (int )lscsi_status, cp->cmnd, tmp___7, rsp_info_len, resid_len, fw_resid_len, sense_len, par_sense_len, rsp_info_len); } else { } if ((unsigned long )rsp->status_srb == (unsigned long )((srb_t *)0)) { (*(sp->done))((void *)ha, (void *)sp, res); } else { } return; } } static void qlafx00_status_cont_entry(struct rsp_que *rsp , sts_cont_entry_t *pkt ) { uint8_t sense_sz ; struct qla_hw_data *ha ; struct scsi_qla_host *vha ; void *tmp ; srb_t *sp ; struct scsi_cmnd *cp ; uint32_t sense_len ; uint8_t *sense_ptr ; { sense_sz = 0U; ha = rsp->hw; tmp = pci_get_drvdata(ha->pdev); vha = (struct scsi_qla_host *)tmp; sp = rsp->status_srb; if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_dbg(134217728U, vha, 12343, "no SP, sp = %p\n", sp); return; } else { } if (sp->u.scmd.fw_sense_length == 0U) { ql_dbg(134217728U, vha, 12363, "no fw sense data, sp = %p\n", sp); return; } else { } cp = sp->u.scmd.cmd; if ((unsigned long )cp == (unsigned long )((struct scsi_cmnd *)0)) { ql_log(1U, vha, 12347, "cmd is NULL: already returned to OS (sp=%p).\n", sp); rsp->status_srb = (srb_t *)0; return; } else { } if (sp->u.scmd.request_sense_length == 0U) { ql_dbg(134217728U, vha, 12364, "no sense data, sp = %p\n", sp); } else { sense_len = sp->u.scmd.request_sense_length; sense_ptr = sp->u.scmd.request_sense_ptr; ql_dbg(134217728U, vha, 12367, "sp=%p sense_len=0x%x sense_ptr=%p.\n", sp, sense_len, sense_ptr); if (sense_len > 60U) { sense_sz = 60U; } else { sense_sz = (uint8_t )sense_len; } ql_dump_buffer(134348800U, vha, 12366, (uint8_t *)pkt, 64U); memcpy((void *)sense_ptr, (void const *)(& pkt->data), (size_t )sense_sz); ql_dump_buffer(134348800U, vha, 12362, sense_ptr, (uint32_t )sense_sz); sense_len = sense_len - (uint32_t )sense_sz; sense_ptr = sense_ptr + (unsigned long )sense_sz; sp->u.scmd.request_sense_ptr = sense_ptr; sp->u.scmd.request_sense_length = sense_len; } sense_len = sp->u.scmd.fw_sense_length; sense_len = sense_len > 60U ? sense_len - 60U : 0U; sp->u.scmd.fw_sense_length = sense_len; if (sense_len == 0U) { rsp->status_srb = (srb_t *)0; (*(sp->done))((void *)ha, (void *)sp, cp->result); } else { } return; } } static void qlafx00_multistatus_entry(struct scsi_qla_host *vha , struct rsp_que *rsp , void *pkt ) { srb_t *sp ; struct multi_sts_entry_fx00 *stsmfx ; struct qla_hw_data *ha ; uint32_t handle ; uint32_t hindex ; uint32_t handle_count ; uint32_t i ; uint16_t que ; struct req_que *req ; __le32 *handle_ptr ; { ha = vha->hw; stsmfx = (struct multi_sts_entry_fx00 *)pkt; handle_count = (uint32_t )stsmfx->handle_count; if (handle_count > 15U) { ql_dbg(134217728U, vha, 12341, "Invalid handle count (0x%x).\n", handle_count); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); return; } else { } handle_ptr = (__le32 *)(& stsmfx->handles); i = 0U; goto ldv_66538; ldv_66537: hindex = *handle_ptr; handle = (uint32_t )((unsigned short )hindex); que = (unsigned short )(hindex >> 16); req = *(ha->req_q_map + (unsigned long )que); if ((uint32_t )req->num_outstanding_cmds > handle) { sp = *(req->outstanding_cmds + (unsigned long )handle); } else { sp = (srb_t *)0; } if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_dbg(134217728U, vha, 12356, "Invalid status handle (0x%x).\n", handle); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); return; } else { } qla2x00_process_completed_request(vha, req, handle); handle_ptr = handle_ptr + 1; i = i + 1U; ldv_66538: ; if (i < handle_count) { goto ldv_66537; } else { } return; } } static void qlafx00_error_entry(scsi_qla_host_t *vha , struct rsp_que *rsp , struct sts_entry_fx00 *pkt , uint8_t estatus , uint8_t etype ) { srb_t *sp ; struct qla_hw_data *ha ; char func[11U] ; uint16_t que ; struct req_que *req ; int res ; { ha = vha->hw; func[0] = 'E'; func[1] = 'R'; func[2] = 'R'; func[3] = 'O'; func[4] = 'R'; func[5] = '-'; func[6] = 'I'; func[7] = 'O'; func[8] = 'C'; func[9] = 'B'; func[10] = '\000'; que = 0U; req = (struct req_que *)0; res = 458752; ql_dbg(33554432U, vha, 20607, "type of error status in response: 0x%x\n", (int )estatus); req = *(ha->req_q_map + (unsigned long )que); sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)pkt); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { (*(sp->done))((void *)ha, (void *)sp, res); return; } else { } set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); return; } } static void qlafx00_process_response_queue(struct scsi_qla_host *vha , struct rsp_que *rsp ) { struct sts_entry_fx00 *pkt ; response_t *lptr ; uint16_t lreq_q_in ; uint16_t lreq_q_out ; unsigned int tmp ; { lreq_q_in = 0U; lreq_q_out = 0U; tmp = readl((void const volatile *)rsp->rsp_q_in); lreq_q_in = (uint16_t )tmp; lreq_q_out = rsp->ring_index; goto ldv_66561; ldv_66569: lptr = rsp->ring_ptr; memcpy_fromio((void *)(& rsp->rsp_pkt), (void const volatile *)lptr, 64UL); pkt = (struct sts_entry_fx00 *)(& rsp->rsp_pkt); rsp->ring_index = (uint16_t )((int )rsp->ring_index + 1); lreq_q_out = (uint16_t )((int )lreq_q_out + 1); if ((int )rsp->ring_index == (int )rsp->length) { lreq_q_out = 0U; rsp->ring_index = 0U; rsp->ring_ptr = rsp->ring; } else { rsp->ring_ptr = rsp->ring_ptr + 1; } if ((unsigned int )pkt->entry_status != 0U && (unsigned int )pkt->entry_type != 12U) { qlafx00_error_entry(vha, rsp, pkt, (int )pkt->entry_status, (int )pkt->entry_type); goto ldv_66561; } else { } switch ((int )pkt->entry_type) { case 1: qlafx00_status_entry(vha, rsp, (void *)pkt); goto ldv_66563; case 4: qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); goto ldv_66563; case 13: qlafx00_multistatus_entry(vha, rsp, (void *)pkt); goto ldv_66563; case 8: qlafx00_abort_iocb_entry(vha, rsp->req, (struct abort_iocb_entry_fx00 *)pkt); goto ldv_66563; case 12: qlafx00_ioctl_iosb_entry(vha, rsp->req, (struct ioctl_iocb_entry_fx00 *)pkt); goto ldv_66563; default: ql_dbg(33554432U, vha, 20609, "Received unknown response pkt type %x entry status=%x.\n", (int )pkt->entry_type, (int )pkt->entry_status); goto ldv_66563; } ldv_66563: ; ldv_66561: ; if ((int )lreq_q_in != (int )lreq_q_out) { goto ldv_66569; } else { } writel((unsigned int )rsp->ring_index, (void volatile *)rsp->rsp_q_out); return; } } static void qlafx00_async_event(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct device_reg_fx00 *reg ; int data_size ; unsigned short tmp ; unsigned short tmp___0 ; unsigned short tmp___1 ; unsigned short tmp___2 ; unsigned short tmp___3 ; unsigned short tmp___4 ; unsigned short tmp___5 ; { ha = vha->hw; data_size = 1; reg = & (ha->iobase)->ispfx00; switch (ha->aenmb[0]) { case 32770U: ql_log(1U, vha, 20601, "ISP System Error - mbx1=%x\n", ha->aenmb[0]); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_66578; case 32866U: ql_dbg(33554432U, vha, 20598, "Asynchronous FW shutdown requested.\n"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); goto ldv_66578; case 32788U: ha->aenmb[1] = readl((void const volatile *)(& reg->aenmailbox1)); ha->aenmb[2] = readl((void const volatile *)(& reg->aenmailbox2)); ha->aenmb[3] = readl((void const volatile *)(& reg->aenmailbox3)); ql_dbg(33554432U, vha, 20599, "Asynchronous port Update received aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]); data_size = 4; goto ldv_66578; case 32773U: ql_log(2U, vha, 20613, "Asynchronous over temperature event received aenmb[0]: %x\n", ha->aenmb[0]); goto ldv_66578; case 32774U: ql_log(2U, vha, 20614, "Asynchronous normal temperature event received aenmb[0]: %x\n", ha->aenmb[0]); goto ldv_66578; case 32775U: ql_log(2U, vha, 20611, "Asynchronous critical temperature event received aenmb[0]: %x\n", ha->aenmb[0]); goto ldv_66578; default: tmp = readw((void const volatile *)(& reg->aenmailbox1)); ha->aenmb[1] = (uint32_t )tmp; tmp___0 = readw((void const volatile *)(& reg->aenmailbox2)); ha->aenmb[2] = (uint32_t )tmp___0; tmp___1 = readw((void const volatile *)(& reg->aenmailbox3)); ha->aenmb[3] = (uint32_t )tmp___1; tmp___2 = readw((void const volatile *)(& reg->aenmailbox4)); ha->aenmb[4] = (uint32_t )tmp___2; tmp___3 = readw((void const volatile *)(& reg->aenmailbox5)); ha->aenmb[5] = (uint32_t )tmp___3; tmp___4 = readw((void const volatile *)(& reg->aenmailbox6)); ha->aenmb[6] = (uint32_t )tmp___4; tmp___5 = readw((void const volatile *)(& reg->aenmailbox7)); ha->aenmb[7] = (uint32_t )tmp___5; ql_dbg(33554432U, vha, 20600, "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n", ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]); goto ldv_66578; } ldv_66578: qlafx00_post_aenfx_work(vha, ha->aenmb[0], (uint32_t *)(& ha->aenmb), data_size); return; } } static void qlafx00_mbx_completion(scsi_qla_host_t *vha , uint32_t mb0 ) { uint16_t cnt ; uint32_t *wptr ; struct qla_hw_data *ha ; struct device_reg_fx00 *reg ; { ha = vha->hw; reg = & (ha->iobase)->ispfx00; if ((unsigned long )ha->mcp32 == (unsigned long )((struct mbx_cmd_32 *)0)) { ql_dbg(33554432U, vha, 20606, "MBX pointer OLD_ERROR.\n"); } else { } ha->flags.mbox_int = 1U; ha->mailbox_out32[0] = mb0; wptr = & reg->mailbox17; cnt = 1U; goto ldv_66594; ldv_66593: ha->mailbox_out32[(int )cnt] = readl((void const volatile *)wptr); wptr = wptr + 1; cnt = (uint16_t )((int )cnt + 1); ldv_66594: ; if ((int )((unsigned short )ha->mbx_count) > (int )cnt) { goto ldv_66593; } else { } return; } } irqreturn_t qlafx00_intr_handler(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct device_reg_fx00 *reg ; int status ; unsigned long iter ; uint32_t stat ; uint32_t mb[8U] ; struct rsp_que *rsp ; unsigned long flags ; uint32_t clr_intr ; uint32_t intr_stat ; int tmp ; long tmp___0 ; raw_spinlock_t *tmp___1 ; void *tmp___2 ; bool tmp___3 ; unsigned short tmp___4 ; unsigned short tmp___5 ; unsigned long tmp___6 ; { clr_intr = 0U; intr_stat = 0U; rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20605, "%s: NULL response queue pointer.\n", "qlafx00_intr_handler"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->ispfx00; status = 0; tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (1); } else { } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); tmp___2 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___2; iter = 50UL; goto ldv_66617; ldv_66616: stat = readl((void const volatile *)ha->cregbase + 138096U); tmp___3 = qla2x00_check_reg32_for_disconnect(vha, stat); if ((int )tmp___3) { goto ldv_66615; } else { } intr_stat = stat & 7U; if (intr_stat == 0U) { goto ldv_66615; } else { } if ((int )stat & 1) { tmp___4 = readw((void const volatile *)(& reg->mailbox16)); mb[0] = (uint32_t )tmp___4; qlafx00_mbx_completion(vha, mb[0]); status = status | 1; clr_intr = clr_intr | 1U; } else { } if ((intr_stat & 4U) != 0U) { tmp___5 = readw((void const volatile *)(& reg->aenmailbox0)); ha->aenmb[0] = (uint32_t )tmp___5; qlafx00_async_event(vha); clr_intr = clr_intr | 4U; } else { } if ((intr_stat & 2U) != 0U) { qlafx00_process_response_queue(vha, rsp); clr_intr = clr_intr | 2U; } else { } writel(~ clr_intr, (void volatile *)ha->cregbase + 138096U); readl((void const volatile *)ha->cregbase + 138096U); clr_intr = 0U; ldv_66617: tmp___6 = iter; iter = iter - 1UL; if (tmp___6 != 0UL) { goto ldv_66616; } else { } ldv_66615: qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } __inline static cont_a64_entry_t *qlafx00_prep_cont_type1_iocb(struct req_que *req , cont_a64_entry_t *lcont_pkt ) { cont_a64_entry_t *cont_pkt ; { req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } cont_pkt = (cont_a64_entry_t *)req->ring_ptr; lcont_pkt->entry_type = 3U; return (cont_pkt); } } __inline static void qlafx00_build_scsi_iocbs(srb_t *sp , struct cmd_type_7_fx00 *cmd_pkt , uint16_t tot_dsds , struct cmd_type_7_fx00 *lcmd_pkt ) { uint16_t avail_dsds ; __le32 *cur_dsd ; scsi_qla_host_t *vha ; struct scsi_cmnd *cmd ; struct scatterlist *sg ; int i ; int cont ; struct req_que *req ; cont_a64_entry_t lcont_pkt ; cont_a64_entry_t *cont_pkt ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; dma_addr_t sle_dma ; __le32 *tmp___2 ; __le32 *tmp___3 ; __le32 *tmp___4 ; { vha = (sp->fcport)->vha; req = vha->req; cmd = sp->u.scmd.cmd; cont = 0; cont_pkt = (cont_a64_entry_t *)0; lcmd_pkt->entry_type = 7U; tmp = scsi_bufflen(cmd); if (tmp == 0U || (unsigned int )cmd->sc_data_direction == 3U) { lcmd_pkt->byte_count = 0U; return; } else { } if ((unsigned int )cmd->sc_data_direction == 1U) { lcmd_pkt->cntrl_flags = 1U; tmp___0 = scsi_bufflen(cmd); vha->qla_stats.output_bytes = vha->qla_stats.output_bytes + (uint64_t )tmp___0; } else if ((unsigned int )cmd->sc_data_direction == 2U) { lcmd_pkt->cntrl_flags = 2U; tmp___1 = scsi_bufflen(cmd); vha->qla_stats.input_bytes = vha->qla_stats.input_bytes + (uint64_t )tmp___1; } else { } avail_dsds = 1U; cur_dsd = (__le32 *)(& lcmd_pkt->dseg_0_address); i = 0; sg = scsi_sglist(cmd); goto ldv_66641; ldv_66640: ; if ((unsigned int )avail_dsds == 0U) { memset((void *)(& lcont_pkt), 0, 64UL); cont_pkt = qlafx00_prep_cont_type1_iocb(req, & lcont_pkt); cur_dsd = (__le32 *)(& lcont_pkt.dseg_0_address); avail_dsds = 5U; cont = 1; } else { } sle_dma = sg->dma_address; tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = (unsigned int )sle_dma; tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = (unsigned int )(sle_dma >> 32ULL); tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = sg->dma_length; avail_dsds = (uint16_t )((int )avail_dsds - 1); if ((unsigned int )avail_dsds == 0U && cont == 1) { cont = 0; memcpy_toio((void volatile *)cont_pkt, (void const *)(& lcont_pkt), 64UL); } else { } i = i + 1; sg = sg_next(sg); ldv_66641: ; if ((int )tot_dsds > i) { goto ldv_66640; } else { } if ((unsigned int )avail_dsds != 0U && cont == 1) { memcpy_toio((void volatile *)cont_pkt, (void const *)(& lcont_pkt), 64UL); } else { } return; } } int qlafx00_start_scsi(srb_t *sp ) { int ret ; int nseg ; unsigned long flags ; uint32_t index ; uint32_t handle ; uint16_t cnt ; uint16_t req_cnt ; uint16_t tot_dsds ; struct req_que *req ; struct rsp_que *rsp ; struct scsi_cmnd *cmd ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct cmd_type_7_fx00 *cmd_pkt ; struct cmd_type_7_fx00 lcmd_pkt ; struct scsi_lun llun ; raw_spinlock_t *tmp ; unsigned int tmp___0 ; struct scatterlist *tmp___1 ; long tmp___2 ; unsigned int tmp___3 ; unsigned int tmp___4 ; { req = (struct req_que *)0; rsp = (struct rsp_que *)0; cmd = sp->u.scmd.cmd; vha = (sp->fcport)->vha; ha = vha->hw; ret = 0; rsp = *(ha->rsp_q_map); req = vha->req; tot_dsds = 0U; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); handle = req->current_outstanding_cmd; index = 1U; goto ldv_66667; ldv_66666: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_66665; } else { } index = index + 1U; ldv_66667: ; if ((uint32_t )req->num_outstanding_cmds > index) { goto ldv_66666; } else { } ldv_66665: ; if ((uint32_t )req->num_outstanding_cmds == index) { goto queuing_error; } else { } tmp___3 = scsi_sg_count(cmd); if (tmp___3 != 0U) { tmp___0 = scsi_sg_count(cmd); tmp___1 = scsi_sglist(cmd); nseg = dma_map_sg_attrs(& (ha->pdev)->dev, tmp___1, (int )tmp___0, cmd->sc_data_direction, (struct dma_attrs *)0); tmp___2 = ldv__builtin_expect(nseg == 0, 0L); if (tmp___2 != 0L) { goto queuing_error; } else { } } else { nseg = 0; } tot_dsds = (uint16_t )nseg; req_cnt = qla24xx_calc_iocbs(vha, (int )tot_dsds); if ((int )req->cnt < (int )req_cnt + 2) { tmp___4 = __readl((void const volatile *)req->req_q_out); cnt = (uint16_t )tmp___4; if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } if ((int )req->cnt < (int )req_cnt + 2) { goto queuing_error; } else { } } else { } req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)((unsigned long )handle); req->cnt = (int )req->cnt - (int )req_cnt; cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr; memset((void *)(& lcmd_pkt), 0, 64UL); lcmd_pkt.handle = ((unsigned int )req->id << 16) | sp->handle; lcmd_pkt.reserved_0 = 0U; lcmd_pkt.port_path_ctrl = 0U; lcmd_pkt.reserved_1 = 0U; lcmd_pkt.dseg_count = tot_dsds; lcmd_pkt.tgt_idx = (sp->fcport)->tgt_id; int_to_scsilun((cmd->device)->lun, & llun); host_to_adap((uint8_t *)(& llun), (uint8_t *)(& lcmd_pkt.lun), 8U); host_to_adap(cmd->cmnd, (uint8_t *)(& lcmd_pkt.fcp_cdb), 16U); lcmd_pkt.byte_count = scsi_bufflen(cmd); qlafx00_build_scsi_iocbs(sp, cmd_pkt, (int )tot_dsds, & lcmd_pkt); lcmd_pkt.entry_count = (unsigned char )req_cnt; lcmd_pkt.entry_status = (unsigned char )rsp->id; ql_dump_buffer(134348800U, vha, 12334, cmd->cmnd, (uint32_t )cmd->cmd_len); ql_dump_buffer(134348800U, vha, 12338, (uint8_t *)(& lcmd_pkt), 64U); memcpy_toio((void volatile *)cmd_pkt, (void const *)(& lcmd_pkt), 64UL); __asm__ volatile ("sfence": : : "memory"); req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } sp->flags = (uint16_t )((unsigned int )sp->flags | 1U); writel((unsigned int )req->ring_index, (void volatile *)req->req_q_in); writel(ha->rqstq_intr_code, (void volatile *)ha->cregbase + 133636U); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); queuing_error: ; if ((unsigned int )tot_dsds != 0U) { scsi_dma_unmap(cmd); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (258); } } void qlafx00_tm_iocb(srb_t *sp , struct tsk_mgmt_entry_fx00 *ptm_iocb ) { struct srb_iocb *fxio ; scsi_qla_host_t *vha ; struct req_que *req ; struct tsk_mgmt_entry_fx00 tm_iocb ; struct scsi_lun llun ; { fxio = & sp->u.iocb_cmd; vha = (sp->fcport)->vha; req = vha->req; memset((void *)(& tm_iocb), 0, 64UL); tm_iocb.entry_type = 5U; tm_iocb.entry_count = 1U; tm_iocb.handle = ((unsigned int )req->id << 16) | sp->handle; tm_iocb.reserved_0 = 0U; tm_iocb.tgt_id = (sp->fcport)->tgt_id; tm_iocb.control_flags = fxio->u.tmf.flags; if (tm_iocb.control_flags == 16U) { int_to_scsilun(fxio->u.tmf.lun, & llun); host_to_adap((uint8_t *)(& llun), (uint8_t *)(& tm_iocb.lun), 8U); } else { } memcpy((void *)ptm_iocb, (void const *)(& tm_iocb), 64UL); __asm__ volatile ("sfence": : : "memory"); return; } } void qlafx00_abort_iocb(srb_t *sp , struct abort_iocb_entry_fx00 *pabt_iocb ) { struct srb_iocb *fxio ; scsi_qla_host_t *vha ; struct req_que *req ; struct abort_iocb_entry_fx00 abt_iocb ; { fxio = & sp->u.iocb_cmd; vha = (sp->fcport)->vha; req = vha->req; memset((void *)(& abt_iocb), 0, 64UL); abt_iocb.entry_type = 8U; abt_iocb.entry_count = 1U; abt_iocb.handle = ((unsigned int )req->id << 16) | sp->handle; abt_iocb.abort_handle = ((unsigned int )req->id << 16) | fxio->u.abt.cmd_hndl; abt_iocb.tgt_id_sts = (sp->fcport)->tgt_id; abt_iocb.req_que_no = req->id; memcpy((void *)pabt_iocb, (void const *)(& abt_iocb), 64UL); __asm__ volatile ("sfence": : : "memory"); return; } } void qlafx00_fxdisc_iocb(srb_t *sp , struct fxdisc_entry_fx00 *pfxiocb ) { struct srb_iocb *fxio ; struct qla_mt_iocb_rqst_fx00 *piocb_rqst ; struct fc_bsg_job *bsg_job ; struct fxdisc_entry_fx00 fx_iocb ; uint8_t entry_cnt ; struct scatterlist *sg ; int avail_dsds ; int tot_dsds ; cont_a64_entry_t lcont_pkt ; cont_a64_entry_t *cont_pkt ; __le32 *cur_dsd ; int index ; int cont ; dma_addr_t sle_dma ; __le32 *tmp ; __le32 *tmp___0 ; __le32 *tmp___1 ; int avail_dsds___0 ; int tot_dsds___0 ; cont_a64_entry_t lcont_pkt___0 ; cont_a64_entry_t *cont_pkt___0 ; __le32 *cur_dsd___0 ; int index___0 ; int cont___0 ; dma_addr_t sle_dma___0 ; __le32 *tmp___2 ; __le32 *tmp___3 ; __le32 *tmp___4 ; { fxio = & sp->u.iocb_cmd; entry_cnt = 1U; memset((void *)(& fx_iocb), 0, 64UL); fx_iocb.entry_type = 11U; fx_iocb.handle = sp->handle; fx_iocb.entry_count = entry_cnt; if ((unsigned int )sp->type == 10U) { fx_iocb.func_num = sp->u.iocb_cmd.u.fxiocb.req_func_type; fx_iocb.adapid = fxio->u.fxiocb.adapter_id; fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi; fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0; fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1; fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra; if ((int )fxio->u.fxiocb.flags & 1) { fx_iocb.req_dsdcnt = 1U; fx_iocb.req_xfrcnt = (unsigned short )fxio->u.fxiocb.req_len; fx_iocb.dseg_rq_address[0] = (unsigned int )fxio->u.fxiocb.req_dma_handle; fx_iocb.dseg_rq_address[1] = (unsigned int )(fxio->u.fxiocb.req_dma_handle >> 32ULL); fx_iocb.dseg_rq_len = fxio->u.fxiocb.req_len; } else { } if (((int )fxio->u.fxiocb.flags & 2) != 0) { fx_iocb.rsp_dsdcnt = 1U; fx_iocb.rsp_xfrcnt = (unsigned short )fxio->u.fxiocb.rsp_len; fx_iocb.dseg_rsp_address[0] = (unsigned int )fxio->u.fxiocb.rsp_dma_handle; fx_iocb.dseg_rsp_address[1] = (unsigned int )(fxio->u.fxiocb.rsp_dma_handle >> 32ULL); fx_iocb.dseg_rsp_len = fxio->u.fxiocb.rsp_len; } else { } if (((int )fxio->u.fxiocb.flags & 4) != 0) { fx_iocb.dataword = fxio->u.fxiocb.req_data; } else { } fx_iocb.flags = fxio->u.fxiocb.flags; } else { bsg_job = sp->u.bsg_job; piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)(& (bsg_job->request)->rqst_data.h_vendor.vendor_cmd) + 1U; fx_iocb.func_num = piocb_rqst->func_type; fx_iocb.adapid = piocb_rqst->adapid; fx_iocb.adapid_hi = piocb_rqst->adapid_hi; fx_iocb.reserved_0 = piocb_rqst->reserved_0; fx_iocb.reserved_1 = piocb_rqst->reserved_1; fx_iocb.dataword_extra = piocb_rqst->dataword_extra; fx_iocb.dataword = piocb_rqst->dataword; fx_iocb.req_xfrcnt = piocb_rqst->req_len; fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len; if ((int )piocb_rqst->flags & 1) { cont_pkt = (cont_a64_entry_t *)0; index = 0; cont = 0; fx_iocb.req_dsdcnt = (unsigned short )bsg_job->request_payload.sg_cnt; tot_dsds = bsg_job->request_payload.sg_cnt; cur_dsd = (__le32 *)(& fx_iocb.dseg_rq_address); avail_dsds = 1; index = 0; sg = bsg_job->request_payload.sg_list; goto ldv_66705; ldv_66704: ; if (avail_dsds == 0) { memset((void *)(& lcont_pkt), 0, 64UL); cont_pkt = qlafx00_prep_cont_type1_iocb(((sp->fcport)->vha)->req, & lcont_pkt); cur_dsd = (__le32 *)(& lcont_pkt.dseg_0_address); avail_dsds = 5; cont = 1; entry_cnt = (uint8_t )((int )entry_cnt + 1); } else { } sle_dma = sg->dma_address; tmp = cur_dsd; cur_dsd = cur_dsd + 1; *tmp = (unsigned int )sle_dma; tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )(sle_dma >> 32ULL); tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = sg->dma_length; avail_dsds = avail_dsds - 1; if (avail_dsds == 0 && cont == 1) { cont = 0; memcpy_toio((void volatile *)cont_pkt, (void const *)(& lcont_pkt), 64UL); ql_dump_buffer(8421376U, (sp->fcport)->vha, 12354, (uint8_t *)(& lcont_pkt), 64U); } else { } index = index + 1; sg = sg_next(sg); ldv_66705: ; if (index < tot_dsds) { goto ldv_66704; } else { } if (avail_dsds != 0 && cont == 1) { memcpy_toio((void volatile *)cont_pkt, (void const *)(& lcont_pkt), 64UL); ql_dump_buffer(8421376U, (sp->fcport)->vha, 12355, (uint8_t *)(& lcont_pkt), 64U); } else { } } else { } if (((int )piocb_rqst->flags & 2) != 0) { cont_pkt___0 = (cont_a64_entry_t *)0; index___0 = 0; cont___0 = 0; fx_iocb.rsp_dsdcnt = (unsigned short )bsg_job->reply_payload.sg_cnt; tot_dsds___0 = bsg_job->reply_payload.sg_cnt; cur_dsd___0 = (__le32 *)(& fx_iocb.dseg_rsp_address); avail_dsds___0 = 1; index___0 = 0; sg = bsg_job->reply_payload.sg_list; goto ldv_66716; ldv_66715: ; if (avail_dsds___0 == 0) { memset((void *)(& lcont_pkt___0), 0, 64UL); cont_pkt___0 = qlafx00_prep_cont_type1_iocb(((sp->fcport)->vha)->req, & lcont_pkt___0); cur_dsd___0 = (__le32 *)(& lcont_pkt___0.dseg_0_address); avail_dsds___0 = 5; cont___0 = 1; entry_cnt = (uint8_t )((int )entry_cnt + 1); } else { } sle_dma___0 = sg->dma_address; tmp___2 = cur_dsd___0; cur_dsd___0 = cur_dsd___0 + 1; *tmp___2 = (unsigned int )sle_dma___0; tmp___3 = cur_dsd___0; cur_dsd___0 = cur_dsd___0 + 1; *tmp___3 = (unsigned int )(sle_dma___0 >> 32ULL); tmp___4 = cur_dsd___0; cur_dsd___0 = cur_dsd___0 + 1; *tmp___4 = sg->dma_length; avail_dsds___0 = avail_dsds___0 - 1; if (avail_dsds___0 == 0 && cont___0 == 1) { cont___0 = 0; memcpy_toio((void volatile *)cont_pkt___0, (void const *)(& lcont_pkt___0), 64UL); ql_dump_buffer(8421376U, (sp->fcport)->vha, 12357, (uint8_t *)(& lcont_pkt___0), 64U); } else { } index___0 = index___0 + 1; sg = sg_next(sg); ldv_66716: ; if (index___0 < tot_dsds___0) { goto ldv_66715; } else { } if (avail_dsds___0 != 0 && cont___0 == 1) { memcpy_toio((void volatile *)cont_pkt___0, (void const *)(& lcont_pkt___0), 64UL); ql_dump_buffer(8421376U, (sp->fcport)->vha, 12358, (uint8_t *)(& lcont_pkt___0), 64U); } else { } } else { } if (((int )piocb_rqst->flags & 4) != 0) { fx_iocb.dataword = piocb_rqst->dataword; } else { } fx_iocb.flags = piocb_rqst->flags; fx_iocb.entry_count = entry_cnt; } ql_dump_buffer(8421376U, (sp->fcport)->vha, 12359, (uint8_t *)(& fx_iocb), 64U); memcpy_toio((void volatile *)pfxiocb, (void const *)(& fx_iocb), 64UL); __asm__ volatile ("sfence": : : "memory"); return; } } void choose_timer_27(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_27 = 2; return; } } void activate_pending_timer_27(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_27 == (unsigned long )timer) { if (ldv_timer_state_27 == 2 || pending_flag != 0) { ldv_timer_list_27 = timer; ldv_timer_list_27->data = data; ldv_timer_state_27 = 1; } else { } return; } else { } reg_timer_27(timer); ldv_timer_list_27->data = data; return; } } void disable_suitable_timer_27(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_27) { ldv_timer_state_27 = 0; return; } else { } return; } } int reg_timer_27(struct timer_list *timer ) { { ldv_timer_list_27 = timer; ldv_timer_state_27 = 1; return (0); } } bool ldv_queue_work_on_263(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_264(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_265(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_266(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_267(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_268(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern unsigned long __msecs_to_jiffies(unsigned int const ) ; __inline static unsigned long msecs_to_jiffies(unsigned int const m ) { unsigned long tmp___0 ; { tmp___0 = __msecs_to_jiffies(m); return (tmp___0); } } bool ldv_queue_work_on_279(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_281(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_280(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_283(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_282(struct workqueue_struct *ldv_func_arg1 ) ; void disable_suitable_timer_28(struct timer_list *timer ) ; int reg_timer_28(struct timer_list *timer ) ; void choose_timer_28(struct timer_list *timer ) ; void activate_pending_timer_28(struct timer_list *timer , unsigned long data , int pending_flag ) ; extern void __udelay(unsigned long ) ; int ldv_scsi_add_host_with_dma_284(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; static int const MD_MIU_TEST_AGT_RDDATA___0[4U] = { 1090519208, 1090519212, 1090519224, 1090519228}; static uint32_t const qla8044_reg_tbl[14U] = { 13480U, 13484U, 13488U, 14216U, 14212U, 14220U, 13640U, 14304U, 14208U, 13648U, 13652U, 13656U, 13904U, 14260U}; void qla8044_get_minidump(struct scsi_qla_host *vha ) ; int qla8044_collect_md_data(struct scsi_qla_host *vha ) ; uint32_t qla8044_rd_reg(struct qla_hw_data *ha , ulong addr ) { unsigned int tmp ; { tmp = readl((void const volatile *)(ha->nx_pcibase + addr)); return (tmp); } } void qla8044_wr_reg(struct qla_hw_data *ha , ulong addr , uint32_t val ) { { writel(val, (void volatile *)(ha->nx_pcibase + addr)); return; } } int qla8044_rd_direct(struct scsi_qla_host *vha , uint32_t const crb_reg ) { struct qla_hw_data *ha ; uint32_t tmp ; { ha = vha->hw; if ((unsigned int )crb_reg <= 13U) { tmp = qla8044_rd_reg(ha, (ulong )qla8044_reg_tbl[crb_reg]); return ((int )tmp); } else { return (258); } } } void qla8044_wr_direct(struct scsi_qla_host *vha , uint32_t const crb_reg , uint32_t const value ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((unsigned int )crb_reg <= 13U) { qla8044_wr_reg(ha, (ulong )qla8044_reg_tbl[crb_reg], value); } else { } return; } } static int qla8044_set_win_base(scsi_qla_host_t *vha , uint32_t addr ) { uint32_t val ; int ret_val ; struct qla_hw_data *ha ; { ret_val = 0; ha = vha->hw; qla8044_wr_reg(ha, (ulong )(((int )ha->portnum + 3584) * 4), addr); val = qla8044_rd_reg(ha, (ulong )(((int )ha->portnum + 3584) * 4)); if (val != addr) { ql_log(1U, vha, 45191, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n", "qla8044_set_win_base", addr, val); ret_val = 258; } else { } return (ret_val); } } static int qla8044_rd_reg_indirect(scsi_qla_host_t *vha , uint32_t addr , uint32_t *data ) { int ret_val ; struct qla_hw_data *ha ; { ret_val = 0; ha = vha->hw; ret_val = qla8044_set_win_base(vha, addr); if (ret_val == 0) { *data = qla8044_rd_reg(ha, 14576UL); } else { ql_log(1U, vha, 45192, "%s: failed read of addr 0x%x!\n", "qla8044_rd_reg_indirect", addr); } return (ret_val); } } static int qla8044_wr_reg_indirect(scsi_qla_host_t *vha , uint32_t addr , uint32_t data ) { int ret_val ; struct qla_hw_data *ha ; { ret_val = 0; ha = vha->hw; ret_val = qla8044_set_win_base(vha, addr); if (ret_val == 0) { qla8044_wr_reg(ha, 14576UL, data); } else { ql_log(1U, vha, 45193, "%s: failed wrt to addr 0x%x, data 0x%x\n", "qla8044_wr_reg_indirect", addr, data); } return (ret_val); } } static void qla8044_read_write_crb_reg(struct scsi_qla_host *vha , uint32_t raddr , uint32_t waddr ) { uint32_t value ; { qla8044_rd_reg_indirect(vha, raddr, & value); qla8044_wr_reg_indirect(vha, waddr, value); return; } } static int qla8044_poll_wait_for_ready(struct scsi_qla_host *vha , uint32_t addr1 , uint32_t mask ) { unsigned long timeout ; uint32_t temp ; unsigned long tmp ; { tmp = msecs_to_jiffies(100U); timeout = tmp + (unsigned long )jiffies; ldv_65968: qla8044_rd_reg_indirect(vha, addr1, & temp); if ((temp & mask) != 0U) { goto ldv_65961; } else { } if ((long )((unsigned long )jiffies - timeout) >= 0L) { ql_log(1U, vha, 45393, "Error in processing rdmdio entry\n"); return (-1); } else { } goto ldv_65968; ldv_65961: ; return (0); } } static uint32_t qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha , uint32_t addr1 , uint32_t addr3 , uint32_t mask , uint32_t addr ) { uint32_t temp ; int ret ; { ret = 0; ret = qla8044_poll_wait_for_ready(vha, addr1, mask); if (ret == -1) { return (4294967295U); } else { } temp = addr | 1073741824U; qla8044_wr_reg_indirect(vha, addr1, temp); ret = qla8044_poll_wait_for_ready(vha, addr1, mask); if (ret == -1) { return (0U); } else { } qla8044_rd_reg_indirect(vha, addr3, (uint32_t *)(& ret)); return ((uint32_t )ret); } } static int qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha , uint32_t addr1 , uint32_t addr2 , uint32_t addr3 , uint32_t mask ) { unsigned long timeout ; uint32_t temp ; unsigned long tmp ; { tmp = msecs_to_jiffies(100U); timeout = tmp + (unsigned long )jiffies; ldv_65994: temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2); if ((temp & 1U) == 0U) { goto ldv_65987; } else { } if ((long )((unsigned long )jiffies - timeout) >= 0L) { ql_log(1U, vha, 45394, "Error in processing mdiobus idle\n"); return (-1); } else { } goto ldv_65994; ldv_65987: ; return (0); } } static int qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha , uint32_t addr1 , uint32_t addr3 , uint32_t mask , uint32_t addr , uint32_t value ) { int ret ; { ret = 0; ret = qla8044_poll_wait_for_ready(vha, addr1, mask); if (ret == -1) { return (-1); } else { } qla8044_wr_reg_indirect(vha, addr3, value); qla8044_wr_reg_indirect(vha, addr1, addr); ret = qla8044_poll_wait_for_ready(vha, addr1, mask); if (ret == -1) { return (-1); } else { } return (0); } } static void qla8044_rmw_crb_reg(struct scsi_qla_host *vha , uint32_t raddr , uint32_t waddr , struct qla8044_rmw *p_rmw_hdr ) { uint32_t value ; { if ((unsigned int )p_rmw_hdr->index_a != 0U) { value = vha->reset_tmplt.array[(int )p_rmw_hdr->index_a]; } else { qla8044_rd_reg_indirect(vha, raddr, & value); } value = p_rmw_hdr->test_mask & value; value = value << (int )p_rmw_hdr->shl; value = value >> (int )p_rmw_hdr->shr; value = p_rmw_hdr->or_value | value; value = p_rmw_hdr->xor_value ^ value; qla8044_wr_reg_indirect(vha, waddr, value); return; } } __inline static void qla8044_set_qsnt_ready(struct scsi_qla_host *vha ) { uint32_t qsnt_state ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla8044_rd_direct(vha, 5U); qsnt_state = (uint32_t )tmp; qsnt_state = (uint32_t )(1 << (int )ha->portnum) | qsnt_state; qla8044_wr_direct(vha, 5U, qsnt_state); ql_log(2U, vha, 45198, "%s(%ld): qsnt_state: 0x%08x\n", "qla8044_set_qsnt_ready", vha->host_no, qsnt_state); return; } } void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha ) { uint32_t qsnt_state ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla8044_rd_direct(vha, 5U); qsnt_state = (uint32_t )tmp; qsnt_state = (uint32_t )(~ (1 << (int )ha->portnum)) & qsnt_state; qla8044_wr_direct(vha, 5U, qsnt_state); ql_log(2U, vha, 45199, "%s(%ld): qsnt_state: 0x%08x\n", "qla8044_clear_qsnt_ready", vha->host_no, qsnt_state); return; } } static int qla8044_lock_recovery(struct scsi_qla_host *vha ) { uint32_t lock ; uint32_t lockid ; struct qla_hw_data *ha ; { lock = 0U; ha = vha->hw; lockid = qla8044_rd_reg(ha, 14236UL); if ((lockid & 3U) != 0U) { return (258); } else { } qla8044_wr_reg(ha, 14236UL, (uint32_t )(((int )ha->portnum << 2) | 1)); msleep(200U); lockid = qla8044_rd_reg(ha, 14236UL); if ((lockid & 60U) != (uint32_t )((int )ha->portnum << 2)) { return (258); } else { } ql_dbg(524288U, vha, 45195, "%s:%d: IDC Lock recovery initiated\n", "qla8044_lock_recovery", (int )ha->portnum); qla8044_wr_reg(ha, 14236UL, (uint32_t )(((int )ha->portnum << 2) | 2)); qla8044_wr_reg(ha, 13572UL, 255U); qla8044_rd_reg(ha, 14444UL); qla8044_wr_reg(ha, 14236UL, 0U); lock = qla8044_rd_reg(ha, 14440UL); if (lock != 0U) { lockid = qla8044_rd_reg(ha, 13572UL); lockid = ((lockid + 256U) & 4294967040U) | (uint32_t )ha->portnum; qla8044_wr_reg(ha, 13572UL, lockid); return (0); } else { return (258); } } } int qla8044_idc_lock(struct qla_hw_data *ha ) { uint32_t ret_val ; uint32_t timeout ; uint32_t status ; uint32_t lock_id ; uint32_t lock_cnt ; uint32_t func_num ; uint32_t tmo_owner ; uint32_t first_owner ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { ret_val = 0U; timeout = 0U; status = 0U; tmo_owner = 0U; first_owner = 0U; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; goto ldv_66045; ldv_66044: status = qla8044_rd_reg(ha, 14440UL); if (status != 0U) { lock_id = qla8044_rd_reg(ha, 13572UL); lock_id = ((lock_id + 256U) & 4294967040U) | (uint32_t )ha->portnum; qla8044_wr_reg(ha, 13572UL, lock_id); goto ldv_66042; } else { } if (timeout == 0U) { first_owner = qla8044_rd_reg(ha, 13572UL); } else { } timeout = timeout + 1U; if (timeout > 9U) { tmo_owner = qla8044_rd_reg(ha, 13572UL); func_num = tmo_owner & 255U; lock_cnt = tmo_owner >> 8; ql_log(1U, vha, 45332, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n", "qla8044_idc_lock", (int )ha->portnum, func_num, lock_cnt, first_owner & 255U); if (first_owner != tmo_owner) { ql_dbg(524288U, vha, 45333, "%s: %d: IDC lock failed\n", "qla8044_idc_lock", (int )ha->portnum); timeout = 0U; } else { tmp___0 = qla8044_lock_recovery(vha); if (tmp___0 == 0) { ret_val = 0U; ql_dbg(524288U, vha, 45334, "%s:IDC lock Recovery by %dsuccessful...\n", "qla8044_idc_lock", (int )ha->portnum); } else { } ql_dbg(524288U, vha, 45194, "%s: IDC lock Recovery by %d failed, Retrying timeout\n", "qla8044_idc_lock", (int )ha->portnum); timeout = 0U; } } else { } msleep(200U); ldv_66045: ; if (status == 0U) { goto ldv_66044; } else { } ldv_66042: ; return ((int )ret_val); } } void qla8044_idc_unlock(struct qla_hw_data *ha ) { int id ; scsi_qla_host_t *vha ; void *tmp ; uint32_t tmp___0 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = qla8044_rd_reg(ha, 13572UL); id = (int )tmp___0; if ((id & 255) != (int )ha->portnum) { ql_log(1U, vha, 45336, "%s: IDC Unlock by %d failed, lock owner is %d!\n", "qla8044_idc_unlock", (int )ha->portnum, id & 255); return; } else { } qla8044_wr_reg(ha, 13572UL, (uint32_t )(id | 255)); qla8044_rd_reg(ha, 14444UL); return; } } static int qla8044_flash_lock(scsi_qla_host_t *vha ) { int lock_owner ; int timeout ; uint32_t lock_status ; int ret_val ; struct qla_hw_data *ha ; uint32_t tmp ; { timeout = 0; lock_status = 0U; ret_val = 0; ha = vha->hw; goto ldv_66063; ldv_66062: lock_status = qla8044_rd_reg(ha, 14416UL); if (lock_status != 0U) { goto ldv_66060; } else { } timeout = timeout + 1; if (timeout > 499) { tmp = qla8044_rd_reg(ha, 13568UL); lock_owner = (int )tmp; ql_log(1U, vha, 45331, "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", "qla8044_flash_lock", (int )ha->portnum, lock_owner); ret_val = 258; goto ldv_66060; } else { } msleep(20U); ldv_66063: ; if (lock_status == 0U) { goto ldv_66062; } else { } ldv_66060: qla8044_wr_reg(ha, 13568UL, (uint32_t )ha->portnum); return (ret_val); } } static void qla8044_flash_unlock(scsi_qla_host_t *vha ) { int ret_val ; struct qla_hw_data *ha ; uint32_t tmp ; { ha = vha->hw; qla8044_wr_reg(ha, 13568UL, 255U); tmp = qla8044_rd_reg(ha, 14420UL); ret_val = (int )tmp; return; } } static void qla8044_flash_lock_recovery(struct scsi_qla_host *vha ) { int tmp ; { tmp = qla8044_flash_lock(vha); if (tmp != 0) { ql_log(1U, vha, 45344, "Resetting flash_lock\n"); } else { } qla8044_flash_unlock(vha); return; } } static int qla8044_read_flash_data(scsi_qla_host_t *vha , uint8_t *p_data , uint32_t flash_addr , int u32_word_count ) { int i ; int ret_val ; uint32_t u32_word ; int tmp ; int tmp___0 ; { ret_val = 0; tmp = qla8044_flash_lock(vha); if (tmp != 0) { ret_val = 258; goto exit_lock_error; } else { } if ((flash_addr & 3U) != 0U) { ql_log(1U, vha, 45335, "%s: Illegal addr = 0x%x\n", "qla8044_read_flash_data", flash_addr); ret_val = 258; goto exit_flash_read; } else { } i = 0; goto ldv_66085; ldv_66084: tmp___0 = qla8044_wr_reg_indirect(vha, 1108410416U, flash_addr & 4294901760U); if (tmp___0 != 0) { ql_log(1U, vha, 45337, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n! ", "qla8044_read_flash_data", flash_addr); ret_val = 258; goto exit_flash_read; } else { } ret_val = qla8044_rd_reg_indirect(vha, (flash_addr & 65535U) | 1108672512U, & u32_word); if (ret_val != 0) { ql_log(1U, vha, 45196, "%s: failed to read addr 0x%x!\n", "qla8044_read_flash_data", flash_addr); goto exit_flash_read; } else { } *((uint32_t *)p_data) = u32_word; p_data = p_data + 4UL; flash_addr = flash_addr + 4U; i = i + 1; ldv_66085: ; if (i < u32_word_count) { goto ldv_66084; } else { } exit_flash_read: qla8044_flash_unlock(vha); exit_lock_error: ; return (ret_val); } } uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { int tmp ; { scsi_block_requests(vha->host); tmp = qla8044_read_flash_data(vha, buf, offset, (int )(length / 4U)); if (tmp != 0) { ql_log(1U, vha, 45197, "%s: Failed to read from flash\n", "qla8044_read_optrom_data"); } else { } scsi_unblock_requests(vha->host); return (buf); } } __inline int qla8044_need_reset(struct scsi_qla_host *vha ) { uint32_t drv_state ; uint32_t drv_active ; int rval ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = vha->hw; tmp = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp; tmp___0 = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp___0; rval = (int )((uint32_t )(1 << (int )ha->portnum) & drv_state); if (*((unsigned long *)ha + 2UL) != 0UL && drv_active != 0U) { rval = 1; } else { } return (rval); } } static void qla8044_write_list(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { struct qla8044_entry *p_entry ; uint32_t i ; { p_entry = (struct qla8044_entry *)p_hdr + 8U; i = 0U; goto ldv_66108; ldv_66107: qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2); if ((unsigned int )p_hdr->delay != 0U) { __udelay((unsigned long )p_hdr->delay); } else { } i = i + 1U; p_entry = p_entry + 1; ldv_66108: ; if ((uint32_t )p_hdr->count > i) { goto ldv_66107; } else { } return; } } static void qla8044_read_write_list(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { struct qla8044_entry *p_entry ; uint32_t i ; { p_entry = (struct qla8044_entry *)p_hdr + 8U; i = 0U; goto ldv_66117; ldv_66116: qla8044_read_write_crb_reg(vha, p_entry->arg1, p_entry->arg2); if ((unsigned int )p_hdr->delay != 0U) { __udelay((unsigned long )p_hdr->delay); } else { } i = i + 1U; p_entry = p_entry + 1; ldv_66117: ; if ((uint32_t )p_hdr->count > i) { goto ldv_66116; } else { } return; } } static int qla8044_poll_reg(struct scsi_qla_host *vha , uint32_t addr , int duration , uint32_t test_mask , uint32_t test_result ) { uint32_t value ; int timeout_error ; uint8_t retries ; int ret_val ; uint8_t tmp ; { ret_val = 0; ret_val = qla8044_rd_reg_indirect(vha, addr, & value); if (ret_val == 258) { timeout_error = 1; goto exit_poll_reg; } else { } retries = (uint8_t )(duration / 10); ldv_66132: ; if ((value & test_mask) != test_result) { timeout_error = 1; msleep((unsigned int )(duration / 10)); ret_val = qla8044_rd_reg_indirect(vha, addr, & value); if (ret_val == 258) { timeout_error = 1; goto exit_poll_reg; } else { } } else { timeout_error = 0; goto ldv_66131; } tmp = retries; retries = (uint8_t )((int )retries - 1); if ((unsigned int )tmp != 0U) { goto ldv_66132; } else { } ldv_66131: ; exit_poll_reg: ; if (timeout_error != 0) { vha->reset_tmplt.seq_error = vha->reset_tmplt.seq_error + 1; ql_log(0U, vha, 45200, "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n", "qla8044_poll_reg", value, test_mask, test_result); } else { } return (timeout_error); } } static void qla8044_poll_list(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { long delay ; struct qla8044_entry *p_entry ; struct qla8044_poll *p_poll ; uint32_t i ; uint32_t value ; int tmp ; { p_poll = (struct qla8044_poll *)p_hdr + 8U; p_entry = (struct qla8044_entry *)p_poll + 8U; delay = (long )p_hdr->delay; if (delay == 0L) { i = 0U; goto ldv_66144; ldv_66143: qla8044_poll_reg(vha, p_entry->arg1, (int )delay, p_poll->test_mask, p_poll->test_value); i = i + 1U; p_entry = p_entry + 1; ldv_66144: ; if ((uint32_t )p_hdr->count > i) { goto ldv_66143; } else { } } else { i = 0U; goto ldv_66147; ldv_66146: ; if (delay != 0L) { tmp = qla8044_poll_reg(vha, p_entry->arg1, (int )delay, p_poll->test_mask, p_poll->test_value); if (tmp != 0) { qla8044_rd_reg_indirect(vha, p_entry->arg1, & value); qla8044_rd_reg_indirect(vha, p_entry->arg2, & value); } else { } } else { } i = i + 1U; p_entry = p_entry + 1; ldv_66147: ; if ((uint32_t )p_hdr->count > i) { goto ldv_66146; } else { } } return; } } static void qla8044_poll_write_list(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { long delay ; struct qla8044_quad_entry *p_entry ; struct qla8044_poll *p_poll ; uint32_t i ; int tmp ; { p_poll = (struct qla8044_poll *)p_hdr + 8U; p_entry = (struct qla8044_quad_entry *)p_poll + 8U; delay = (long )p_hdr->delay; i = 0U; goto ldv_66159; ldv_66158: qla8044_wr_reg_indirect(vha, p_entry->dr_addr, p_entry->dr_value); qla8044_wr_reg_indirect(vha, p_entry->ar_addr, p_entry->ar_value); if (delay != 0L) { tmp = qla8044_poll_reg(vha, p_entry->ar_addr, (int )delay, p_poll->test_mask, p_poll->test_value); if (tmp != 0) { ql_dbg(524288U, vha, 45201, "%s: Timeout Error: poll list, ", "qla8044_poll_write_list"); ql_dbg(524288U, vha, 45202, "item_num %d, entry_num %d\n", i, vha->reset_tmplt.seq_index); } else { } } else { } i = i + 1U; p_entry = p_entry + 1; ldv_66159: ; if ((uint32_t )p_hdr->count > i) { goto ldv_66158; } else { } return; } } static void qla8044_read_modify_write(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { struct qla8044_entry *p_entry ; struct qla8044_rmw *p_rmw_hdr ; uint32_t i ; { p_rmw_hdr = (struct qla8044_rmw *)p_hdr + 8U; p_entry = (struct qla8044_entry *)p_rmw_hdr + 16U; i = 0U; goto ldv_66169; ldv_66168: qla8044_rmw_crb_reg(vha, p_entry->arg1, p_entry->arg2, p_rmw_hdr); if ((unsigned int )p_hdr->delay != 0U) { __udelay((unsigned long )p_hdr->delay); } else { } i = i + 1U; p_entry = p_entry + 1; ldv_66169: ; if ((uint32_t )p_hdr->count > i) { goto ldv_66168; } else { } return; } } static void qla8044_pause(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { unsigned long __ms ; unsigned long tmp ; { if ((unsigned int )p_hdr->delay != 0U) { __ms = (unsigned long )p_hdr->delay; goto ldv_66177; ldv_66176: __const_udelay(4295000UL); ldv_66177: tmp = __ms; __ms = __ms - 1UL; if (tmp != 0UL) { goto ldv_66176; } else { } } else { } return; } } static void qla8044_template_end(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { { vha->reset_tmplt.template_end = 1U; if (vha->reset_tmplt.seq_error == 0) { ql_dbg(524288U, vha, 45203, "%s: Reset sequence completed SUCCESSFULLY.\n", "qla8044_template_end"); } else { ql_log(0U, vha, 45204, "%s: Reset sequence completed with some timeout errors.\n", "qla8044_template_end"); } return; } } static void qla8044_poll_read_list(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { long delay ; int index ; struct qla8044_quad_entry *p_entry ; struct qla8044_poll *p_poll ; uint32_t i ; uint32_t value ; int tmp ; int tmp___0 ; { p_poll = (struct qla8044_poll *)p_hdr + 8U; p_entry = (struct qla8044_quad_entry *)p_poll + 8U; delay = (long )p_hdr->delay; i = 0U; goto ldv_66196; ldv_66195: qla8044_wr_reg_indirect(vha, p_entry->ar_addr, p_entry->ar_value); if (delay != 0L) { tmp___0 = qla8044_poll_reg(vha, p_entry->ar_addr, (int )delay, p_poll->test_mask, p_poll->test_value); if (tmp___0 != 0) { ql_dbg(524288U, vha, 45205, "%s: Timeout Error: poll list, ", "qla8044_poll_read_list"); ql_dbg(524288U, vha, 45206, "Item_num %d, entry_num %d\n", i, vha->reset_tmplt.seq_index); } else { index = vha->reset_tmplt.array_index; qla8044_rd_reg_indirect(vha, p_entry->dr_addr, & value); tmp = index; index = index + 1; vha->reset_tmplt.array[tmp] = value; if (index == 16) { vha->reset_tmplt.array_index = 1; } else { } } } else { } i = i + 1U; p_entry = p_entry + 1; ldv_66196: ; if ((uint32_t )p_hdr->count > i) { goto ldv_66195; } else { } return; } } static void qla8044_process_reset_template(struct scsi_qla_host *vha , char *p_buff ) { int index ; int entries ; struct qla8044_reset_entry_hdr *p_hdr ; char *p_entry ; { p_entry = p_buff; vha->reset_tmplt.seq_end = 0U; vha->reset_tmplt.template_end = 0U; entries = (int )(vha->reset_tmplt.hdr)->entries; index = vha->reset_tmplt.seq_index; goto ldv_66220; ldv_66219: p_hdr = (struct qla8044_reset_entry_hdr *)p_entry; switch ((int )p_hdr->cmd) { case 0: ; goto ldv_66207; case 1: qla8044_write_list(vha, p_hdr); goto ldv_66207; case 2: qla8044_read_write_list(vha, p_hdr); goto ldv_66207; case 4: qla8044_poll_list(vha, p_hdr); goto ldv_66207; case 8: qla8044_poll_write_list(vha, p_hdr); goto ldv_66207; case 16: qla8044_read_modify_write(vha, p_hdr); goto ldv_66207; case 32: qla8044_pause(vha, p_hdr); goto ldv_66207; case 64: vha->reset_tmplt.seq_end = 1U; goto ldv_66207; case 128: qla8044_template_end(vha, p_hdr); goto ldv_66207; case 256: qla8044_poll_read_list(vha, p_hdr); goto ldv_66207; default: ql_log(0U, vha, 45207, "%s: Unknown command ==> 0x%04x on entry = %d\n", "qla8044_process_reset_template", (int )p_hdr->cmd, index); goto ldv_66207; } ldv_66207: p_entry = p_entry + (unsigned long )p_hdr->size; index = index + 1; ldv_66220: ; if ((unsigned int )vha->reset_tmplt.seq_end == 0U && index < entries) { goto ldv_66219; } else { } vha->reset_tmplt.seq_index = index; return; } } static void qla8044_process_init_seq(struct scsi_qla_host *vha ) { { qla8044_process_reset_template(vha, (char *)vha->reset_tmplt.init_offset); if ((unsigned int )vha->reset_tmplt.seq_end != 1U) { ql_log(0U, vha, 45208, "%s: Abrupt INIT Sub-Sequence end.\n", "qla8044_process_init_seq"); } else { } return; } } static void qla8044_process_stop_seq(struct scsi_qla_host *vha ) { { vha->reset_tmplt.seq_index = 0; qla8044_process_reset_template(vha, (char *)vha->reset_tmplt.stop_offset); if ((unsigned int )vha->reset_tmplt.seq_end != 1U) { ql_log(0U, vha, 45209, "%s: Abrupt STOP Sub-Sequence end.\n", "qla8044_process_stop_seq"); } else { } return; } } static void qla8044_process_start_seq(struct scsi_qla_host *vha ) { { qla8044_process_reset_template(vha, (char *)vha->reset_tmplt.start_offset); if ((unsigned int )vha->reset_tmplt.template_end != 1U) { ql_log(0U, vha, 45210, "%s: Abrupt START Sub-Sequence end.\n", "qla8044_process_start_seq"); } else { } return; } } static int qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha , uint32_t flash_addr , uint8_t *p_data , int u32_word_count ) { uint32_t i ; uint32_t u32_word ; uint32_t flash_offset ; uint32_t addr ; int ret_val ; { addr = flash_addr; ret_val = 0; flash_offset = addr & 65535U; if ((addr & 3U) != 0U) { ql_log(0U, vha, 45211, "%s: Illegal addr = 0x%x\n", "qla8044_lockless_flash_read_u32", addr); ret_val = 258; goto exit_lockless_read; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410416U, addr); if (ret_val != 0) { ql_log(0U, vha, 45212, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", "qla8044_lockless_flash_read_u32", addr); goto exit_lockless_read; } else { } if ((unsigned long )flash_offset + (unsigned long )u32_word_count * 4UL > 65535UL) { i = 0U; goto ldv_66248; ldv_66247: ret_val = qla8044_rd_reg_indirect(vha, (addr & 65535U) | 1108672512U, & u32_word); if (ret_val != 0) { ql_log(0U, vha, 45213, "%s: failed to read addr 0x%x!\n", "qla8044_lockless_flash_read_u32", addr); goto exit_lockless_read; } else { } *((uint32_t *)p_data) = u32_word; p_data = p_data + 4UL; addr = addr + 4U; flash_offset = flash_offset + 4U; if (flash_offset > 65535U) { ret_val = qla8044_wr_reg_indirect(vha, 1108410416U, addr); if (ret_val != 0) { ql_log(0U, vha, 45215, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", "qla8044_lockless_flash_read_u32", addr); goto exit_lockless_read; } else { } flash_offset = 0U; } else { } i = i + 1U; ldv_66248: ; if ((uint32_t )u32_word_count > i) { goto ldv_66247; } else { } } else { i = 0U; goto ldv_66251; ldv_66250: ret_val = qla8044_rd_reg_indirect(vha, (addr & 65535U) | 1108672512U, & u32_word); if (ret_val != 0) { ql_log(0U, vha, 45216, "%s: failed to read addr 0x%x!\n", "qla8044_lockless_flash_read_u32", addr); goto exit_lockless_read; } else { } *((uint32_t *)p_data) = u32_word; p_data = p_data + 4UL; addr = addr + 4U; i = i + 1U; ldv_66251: ; if ((uint32_t )u32_word_count > i) { goto ldv_66250; } else { } } exit_lockless_read: ; return (ret_val); } } static int qla8044_ms_mem_write_128b(struct scsi_qla_host *vha , uint64_t addr , uint32_t *data , uint32_t count ) { int i ; int j ; int ret_val ; uint32_t agt_ctrl ; unsigned long flags ; struct qla_hw_data *ha ; uint32_t *tmp ; int tmp___0 ; uint32_t *tmp___1 ; int tmp___2 ; uint32_t *tmp___3 ; int tmp___4 ; uint32_t *tmp___5 ; int tmp___6 ; int tmp___7 ; { ret_val = 0; ha = vha->hw; if ((addr & 15ULL) != 0ULL) { ret_val = 258; goto exit_ms_mem_write; } else { } flags = _raw_write_lock_irqsave(& ha->hw_lock); ret_val = qla8044_wr_reg_indirect(vha, 1090519192U, 0U); if (ret_val == 258) { ql_log(0U, vha, 45217, "%s: write to AGT_ADDR_HI failed!\n", "qla8044_ms_mem_write_128b"); goto exit_ms_mem_write_unlock; } else { } i = 0; goto ldv_66275; ldv_66274: ; if ((addr > 13019119615ULL || addr <= 12884901887ULL) && addr > 268435455ULL) { ret_val = 258; goto exit_ms_mem_write_unlock; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1090519188U, (uint32_t )addr); tmp = data; data = data + 1; tmp___0 = qla8044_wr_reg_indirect(vha, 1090519200U, *tmp); ret_val = tmp___0 + ret_val; tmp___1 = data; data = data + 1; tmp___2 = qla8044_wr_reg_indirect(vha, 1090519204U, *tmp___1); ret_val = tmp___2 + ret_val; tmp___3 = data; data = data + 1; tmp___4 = qla8044_wr_reg_indirect(vha, 1090519216U, *tmp___3); ret_val = tmp___4 + ret_val; tmp___5 = data; data = data + 1; tmp___6 = qla8044_wr_reg_indirect(vha, 1090519220U, *tmp___5); ret_val = tmp___6 + ret_val; if (ret_val == 258) { ql_log(0U, vha, 45218, "%s: write to AGT_WRDATA failed!\n", "qla8044_ms_mem_write_128b"); goto exit_ms_mem_write_unlock; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1090519184U, 6U); tmp___7 = qla8044_wr_reg_indirect(vha, 1090519184U, 7U); ret_val = tmp___7 + ret_val; if (ret_val == 258) { ql_log(0U, vha, 45219, "%s: write to AGT_CTRL failed!\n", "qla8044_ms_mem_write_128b"); goto exit_ms_mem_write_unlock; } else { } j = 0; goto ldv_66273; ldv_66272: ret_val = qla8044_rd_reg_indirect(vha, 1090519184U, & agt_ctrl); if (ret_val == 258) { ql_log(0U, vha, 45220, "%s: failed to read MD_MIU_TEST_AGT_CTRL!\n", "qla8044_ms_mem_write_128b"); goto exit_ms_mem_write_unlock; } else { } if ((agt_ctrl & 8U) == 0U) { goto ldv_66271; } else { } j = j + 1; ldv_66273: ; if (j <= 999) { goto ldv_66272; } else { } ldv_66271: ; if (j > 999) { ql_log(0U, vha, 45221, "%s: MS memory write failed!\n", "qla8044_ms_mem_write_128b"); ret_val = 258; goto exit_ms_mem_write_unlock; } else { } i = i + 1; addr = addr + 16ULL; ldv_66275: ; if ((uint32_t )i < count) { goto ldv_66274; } else { } exit_ms_mem_write_unlock: _raw_write_unlock_irqrestore(& ha->hw_lock, flags); exit_ms_mem_write: ; return (ret_val); } } static int qla8044_copy_bootloader(struct scsi_qla_host *vha ) { uint8_t *p_cache ; uint32_t src ; uint32_t count ; uint32_t size ; uint64_t dest ; int ret_val ; struct qla_hw_data *ha ; uint32_t tmp ; void *tmp___0 ; { ret_val = 0; ha = vha->hw; src = 65536U; tmp = qla8044_rd_reg(ha, 13660UL); dest = (uint64_t )tmp; size = qla8044_rd_reg(ha, 13664UL); if ((size & 15U) != 0U) { size = (size + 16U) & 4294967280U; } else { } count = size / 16U; tmp___0 = vmalloc((unsigned long )size); p_cache = (uint8_t *)tmp___0; if ((unsigned long )p_cache == (unsigned long )((uint8_t *)0U)) { ql_log(0U, vha, 45222, "%s: Failed to allocate memory for boot loader cache\n", "qla8044_copy_bootloader"); ret_val = 258; goto exit_copy_bootloader; } else { } ret_val = qla8044_lockless_flash_read_u32(vha, src, p_cache, (int )(size / 4U)); if (ret_val == 258) { ql_log(0U, vha, 45223, "%s: Error reading F/W from flash!!!\n", "qla8044_copy_bootloader"); goto exit_copy_error; } else { } ql_dbg(524288U, vha, 45224, "%s: Read F/W from flash!\n", "qla8044_copy_bootloader"); ret_val = qla8044_ms_mem_write_128b(vha, dest, (uint32_t *)p_cache, count); if (ret_val == 258) { ql_log(0U, vha, 45225, "%s: Error writing F/W to MS !!!\n", "qla8044_copy_bootloader"); goto exit_copy_error; } else { } ql_dbg(524288U, vha, 45226, "%s: Wrote F/W (size %d) to MS !!!\n", "qla8044_copy_bootloader", size); exit_copy_error: vfree((void const *)p_cache); exit_copy_bootloader: ; return (ret_val); } } static int qla8044_restart(struct scsi_qla_host *vha ) { int ret_val ; struct qla_hw_data *ha ; int tmp ; { ret_val = 0; ha = vha->hw; qla8044_process_stop_seq(vha); if (ql2xmdenable != 0) { qla8044_get_minidump(vha); } else { ql_log(0U, vha, 45388, "Minidump disabled.\n"); } qla8044_process_init_seq(vha); tmp = qla8044_copy_bootloader(vha); if (tmp != 0) { ql_log(0U, vha, 45227, "%s: Copy bootloader, firmware restart failed!\n", "qla8044_restart"); ret_val = 258; goto exit_restart; } else { } qla8044_wr_reg(ha, 13820UL, 0U); qla8044_process_start_seq(vha); exit_restart: ; return (ret_val); } } static int qla8044_check_cmd_peg_status(struct scsi_qla_host *vha ) { uint32_t val ; uint32_t ret_val ; int retries ; struct qla_hw_data *ha ; { ret_val = 258U; retries = 60; ha = vha->hw; ldv_66309: val = qla8044_rd_reg(ha, 13904UL); if (val == 65281U) { ql_dbg(524288U, vha, 45228, "%s: Command Peg initialization complete! state=0x%x\n", "qla8044_check_cmd_peg_status", val); ret_val = 0U; goto ldv_66308; } else { } msleep(500U); retries = retries - 1; if (retries != 0) { goto ldv_66309; } else { } ldv_66308: ; return ((int )ret_val); } } static int qla8044_start_firmware(struct scsi_qla_host *vha ) { int ret_val ; int tmp ; { ret_val = 0; tmp = qla8044_restart(vha); if (tmp != 0) { ql_log(0U, vha, 45229, "%s: Restart Error!!!, Need Reset!!!\n", "qla8044_start_firmware"); ret_val = 258; goto exit_start_fw; } else { ql_dbg(524288U, vha, 45231, "%s: Restart done!\n", "qla8044_start_firmware"); } ret_val = qla8044_check_cmd_peg_status(vha); if (ret_val != 0) { ql_log(0U, vha, 45232, "%s: Peg not initialized!\n", "qla8044_start_firmware"); ret_val = 258; } else { } exit_start_fw: ; return (ret_val); } } void qla8044_clear_drv_active(struct qla_hw_data *ha ) { uint32_t drv_active ; struct scsi_qla_host *vha ; void *tmp ; int tmp___0 ; { tmp = pci_get_drvdata(ha->pdev); vha = (struct scsi_qla_host *)tmp; tmp___0 = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp___0; drv_active = (uint32_t )(~ (1 << (int )ha->portnum)) & drv_active; ql_log(2U, vha, 45233, "%s(%ld): drv_active: 0x%08x\n", "qla8044_clear_drv_active", vha->host_no, drv_active); qla8044_wr_direct(vha, 3U, drv_active); return; } } static int qla8044_device_bootstrap(struct scsi_qla_host *vha ) { int rval ; int i ; uint32_t old_count ; uint32_t count ; int need_reset ; uint32_t idc_ctrl ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { rval = 258; old_count = 0U; count = 0U; need_reset = 0; ha = vha->hw; need_reset = qla8044_need_reset(vha); if (need_reset == 0) { tmp = qla8044_rd_direct(vha, 2U); old_count = (uint32_t )tmp; i = 0; goto ldv_66334; ldv_66333: msleep(200U); tmp___0 = qla8044_rd_direct(vha, 2U); count = (uint32_t )tmp___0; if (count != old_count) { rval = 0; goto dev_ready; } else { } i = i + 1; ldv_66334: ; if (i <= 9) { goto ldv_66333; } else { } qla8044_flash_lock_recovery(vha); } else if (*((unsigned long *)ha + 2UL) != 0UL) { qla8044_flash_lock_recovery(vha); } else { } ql_log(2U, vha, 45234, "%s: HW State: INITIALIZING\n", "qla8044_device_bootstrap"); qla8044_wr_direct(vha, 4U, 2U); qla8044_idc_unlock(ha); rval = qla8044_start_firmware(vha); qla8044_idc_lock(ha); if (rval != 0) { ql_log(2U, vha, 45235, "%s: HW State: FAILED\n", "qla8044_device_bootstrap"); qla8044_clear_drv_active(ha); qla8044_wr_direct(vha, 4U, 6U); return (rval); } else { } idc_ctrl = qla8044_rd_reg(ha, 14224UL); if ((idc_ctrl & 2U) != 0U) { qla8044_wr_reg(ha, 14224UL, idc_ctrl & 4294967293U); ha->fw_dumped = 0; } else { } dev_ready: ql_log(2U, vha, 45236, "%s: HW State: READY\n", "qla8044_device_bootstrap"); qla8044_wr_direct(vha, 4U, 3U); return (rval); } } static void qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha ) { u8 *phdr ; { if ((unsigned long )vha->reset_tmplt.buff == (unsigned long )((uint8_t *)0U)) { ql_log(0U, vha, 45237, "%s: Error Invalid reset_seq_template\n", "qla8044_dump_reset_seq_hdr"); return; } else { } phdr = vha->reset_tmplt.buff; ql_dbg(524288U, vha, 45238, "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n", (int )*phdr, (int )*(phdr + 1UL), (int )*(phdr + 2UL), (int )*(phdr + 3UL), (int )*(phdr + 4UL), (int )*(phdr + 5UL), (int )*(phdr + 6UL), (int )*(phdr + 7UL), (int )*(phdr + 8UL), (int )*(phdr + 9UL), (int )*(phdr + 10UL), (int )*(phdr + 11UL), (int )*(phdr + 12UL), (int )*(phdr + 13UL), (int )*(phdr + 14UL), (int )*(phdr + 15UL)); return; } } static int qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha ) { uint32_t sum ; uint16_t *buff ; int u16_count ; uint16_t *tmp ; int tmp___0 ; { sum = 0U; buff = (uint16_t *)vha->reset_tmplt.buff; u16_count = (int )((unsigned int )(vha->reset_tmplt.hdr)->size / 2U); goto ldv_66349; ldv_66348: tmp = buff; buff = buff + 1; sum = (uint32_t )*tmp + sum; ldv_66349: tmp___0 = u16_count; u16_count = u16_count - 1; if (tmp___0 > 0) { goto ldv_66348; } else { } goto ldv_66352; ldv_66351: sum = (sum & 65535U) + (sum >> 16); ldv_66352: ; if (sum >> 16 != 0U) { goto ldv_66351; } else { } if (sum != 4294967295U) { return (0); } else { ql_log(0U, vha, 45239, "%s: Reset seq checksum failed\n", "qla8044_reset_seq_checksum_test"); return (258); } } } void qla8044_read_reset_template(struct scsi_qla_host *vha ) { uint8_t *p_buff ; uint32_t addr ; uint32_t tmplt_hdr_def_size ; uint32_t tmplt_hdr_size ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { vha->reset_tmplt.seq_error = 0; tmp = vmalloc(8192UL); vha->reset_tmplt.buff = (uint8_t *)tmp; if ((unsigned long )vha->reset_tmplt.buff == (unsigned long )((uint8_t *)0U)) { ql_log(0U, vha, 45240, "%s: Failed to allocate reset template resources\n", "qla8044_read_reset_template"); goto exit_read_reset_template; } else { } p_buff = vha->reset_tmplt.buff; addr = 5177344U; tmplt_hdr_def_size = 4U; ql_dbg(524288U, vha, 45241, "%s: Read template hdr size %d from Flash\n", "qla8044_read_reset_template", tmplt_hdr_def_size); tmp___0 = qla8044_read_flash_data(vha, p_buff, addr, (int )tmplt_hdr_def_size); if (tmp___0 != 0) { ql_log(0U, vha, 45242, "%s: Failed to read reset template\n", "qla8044_read_reset_template"); goto exit_read_template_error; } else { } vha->reset_tmplt.hdr = (struct qla8044_reset_template_hdr *)vha->reset_tmplt.buff; tmplt_hdr_size = (unsigned int )(vha->reset_tmplt.hdr)->hdr_size / 4U; if (tmplt_hdr_size != tmplt_hdr_def_size || (unsigned int )(vha->reset_tmplt.hdr)->signature != 51966U) { ql_log(0U, vha, 45243, "%s: Template Header size invalid %d tmplt_hdr_def_size %d!!!\n", "qla8044_read_reset_template", tmplt_hdr_size, tmplt_hdr_def_size); goto exit_read_template_error; } else { } addr = (uint32_t )((int )(vha->reset_tmplt.hdr)->hdr_size + 5177344); p_buff = vha->reset_tmplt.buff + (unsigned long )(vha->reset_tmplt.hdr)->hdr_size; tmplt_hdr_def_size = (uint32_t )((unsigned long )((int )(vha->reset_tmplt.hdr)->size - (int )(vha->reset_tmplt.hdr)->hdr_size) / 4UL); ql_dbg(524288U, vha, 45244, "%s: Read rest of the template size %d\n", "qla8044_read_reset_template", (int )(vha->reset_tmplt.hdr)->size); tmp___1 = qla8044_read_flash_data(vha, p_buff, addr, (int )tmplt_hdr_def_size); if (tmp___1 != 0) { ql_log(0U, vha, 45245, "%s: Failed to read reset tempelate\n", "qla8044_read_reset_template"); goto exit_read_template_error; } else { } tmp___2 = qla8044_reset_seq_checksum_test(vha); if (tmp___2 != 0) { ql_log(0U, vha, 45246, "%s: Reset Seq checksum failed!\n", "qla8044_read_reset_template"); goto exit_read_template_error; } else { } ql_dbg(524288U, vha, 45247, "%s: Reset Seq checksum passed! Get stop, start and init seq offsets\n", "qla8044_read_reset_template"); vha->reset_tmplt.init_offset = vha->reset_tmplt.buff + (unsigned long )(vha->reset_tmplt.hdr)->init_seq_offset; vha->reset_tmplt.start_offset = vha->reset_tmplt.buff + (unsigned long )(vha->reset_tmplt.hdr)->start_seq_offset; vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff + (unsigned long )(vha->reset_tmplt.hdr)->hdr_size; qla8044_dump_reset_seq_hdr(vha); goto exit_read_reset_template; exit_read_template_error: vfree((void const *)vha->reset_tmplt.buff); exit_read_reset_template: ; return; } } void qla8044_set_idc_dontreset(struct scsi_qla_host *vha ) { uint32_t idc_ctrl ; struct qla_hw_data *ha ; { ha = vha->hw; idc_ctrl = qla8044_rd_reg(ha, 14224UL); idc_ctrl = idc_ctrl | 1U; ql_dbg(524288U, vha, 45248, "%s: idc_ctrl = %d\n", "qla8044_set_idc_dontreset", idc_ctrl); qla8044_wr_reg(ha, 14224UL, idc_ctrl); return; } } __inline void qla8044_set_rst_ready(struct scsi_qla_host *vha ) { uint32_t drv_state ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp; drv_state = (uint32_t )(1 << (int )ha->portnum) | drv_state; ql_log(2U, vha, 45249, "%s(%ld): drv_state: 0x%08x\n", "qla8044_set_rst_ready", vha->host_no, drv_state); qla8044_wr_direct(vha, 5U, drv_state); return; } } static void qla8044_need_reset_handler(struct scsi_qla_host *vha ) { uint32_t dev_state ; uint32_t drv_state ; uint32_t drv_active ; unsigned long reset_timeout ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { dev_state = 0U; ha = vha->hw; ql_log(0U, vha, 45250, "%s: Performing ISP error recovery\n", "qla8044_need_reset_handler"); if (*((unsigned long *)vha + 19UL) != 0UL) { qla8044_idc_unlock(ha); qla2x00_abort_isp_cleanup(vha); (*((ha->isp_ops)->get_flash_version))(vha, (void *)(vha->req)->ring); (*((ha->isp_ops)->nvram_config))(vha); qla8044_idc_lock(ha); } else { } tmp = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp; tmp___0 = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp___0; tmp___1 = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp___1; ql_log(2U, vha, 45253, "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n", "qla8044_need_reset_handler", vha->host_no, drv_state, drv_active, dev_state); qla8044_set_rst_ready(vha); reset_timeout = (unsigned long )(ha->fcoe_reset_timeout * 250U) + (unsigned long )jiffies; ldv_66393: ; if ((long )((unsigned long )jiffies - reset_timeout) >= 0L) { ql_log(2U, vha, 45252, "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n", "qla8044_need_reset_handler", (int )ha->portnum, drv_state, drv_active); goto ldv_66392; } else { } qla8044_idc_unlock(ha); msleep(1000U); qla8044_idc_lock(ha); tmp___2 = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp___2; tmp___3 = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp___3; tmp___4 = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp___4; if ((drv_state & drv_active) != drv_active && dev_state == 4U) { goto ldv_66393; } else { } ldv_66392: ; if (drv_state != drv_active) { ql_log(2U, vha, 45255, "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n", "qla8044_need_reset_handler", vha->host_no, (int )ha->portnum, drv_active ^ drv_state); drv_active = drv_active & drv_state; qla8044_wr_direct(vha, 3U, drv_active); } else if (*((unsigned long *)ha + 2UL) != 0UL && dev_state == 4U) { ha->flags.nic_core_reset_owner = 0U; qla8044_device_bootstrap(vha); return; } else { } if (((uint32_t )(1 << (int )ha->portnum) & drv_active) == 0U) { ha->flags.nic_core_reset_owner = 0U; return; } else { } if (*((unsigned long *)ha + 2UL) != 0UL || (drv_state & drv_active) == 128U) { ha->flags.nic_core_reset_owner = 0U; qla8044_device_bootstrap(vha); } else { } return; } } static void qla8044_set_drv_active(struct scsi_qla_host *vha ) { uint32_t drv_active ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp; drv_active = (uint32_t )(1 << (int )ha->portnum) | drv_active; ql_log(2U, vha, 45256, "%s(%ld): drv_active: 0x%08x\n", "qla8044_set_drv_active", vha->host_no, drv_active); qla8044_wr_direct(vha, 3U, drv_active); return; } } static int qla8044_check_drv_active(struct scsi_qla_host *vha ) { uint32_t drv_active ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp; if (((uint32_t )(1 << (int )ha->portnum) & drv_active) != 0U) { return (0); } else { return (3); } } } static void qla8044_clear_idc_dontreset(struct scsi_qla_host *vha ) { uint32_t idc_ctrl ; struct qla_hw_data *ha ; { ha = vha->hw; idc_ctrl = qla8044_rd_reg(ha, 14224UL); idc_ctrl = idc_ctrl & 4294967294U; ql_log(2U, vha, 45257, "%s: idc_ctrl = %d\n", "qla8044_clear_idc_dontreset", idc_ctrl); qla8044_wr_reg(ha, 14224UL, idc_ctrl); return; } } static int qla8044_set_idc_ver(struct scsi_qla_host *vha ) { int idc_ver ; uint32_t drv_active ; int rval ; struct qla_hw_data *ha ; int tmp ; uint32_t tmp___0 ; { rval = 0; ha = vha->hw; tmp = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp; if ((uint32_t )(1 << (int )ha->portnum) == drv_active) { idc_ver = qla8044_rd_direct(vha, 8U); idc_ver = idc_ver & -256; idc_ver = idc_ver | 1; qla8044_wr_direct(vha, 8U, (uint32_t const )idc_ver); ql_log(2U, vha, 45258, "%s: IDC version updated to %d\n", "qla8044_set_idc_ver", idc_ver); } else { idc_ver = qla8044_rd_direct(vha, 8U); idc_ver = idc_ver & 255; if (idc_ver != 1) { ql_log(2U, vha, 45259, "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n", "qla8044_set_idc_ver", 1, idc_ver); rval = 258; goto exit_set_idc_ver; } else { } } tmp___0 = qla8044_rd_reg(ha, 14232UL); idc_ver = (int )tmp___0; idc_ver = ~ (3 << (int )ha->portnum * 2) & idc_ver; idc_ver = idc_ver; qla8044_wr_reg(ha, 14232UL, (uint32_t )idc_ver); exit_set_idc_ver: ; return (rval); } } static int qla8044_update_idc_reg(struct scsi_qla_host *vha ) { uint32_t drv_active ; int rval ; struct qla_hw_data *ha ; int tmp ; { rval = 0; ha = vha->hw; if (*((unsigned long *)vha + 19UL) != 0UL) { goto exit_update_idc_reg; } else { } qla8044_idc_lock(ha); qla8044_set_drv_active(vha); tmp = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp; if ((uint32_t )(1 << (int )ha->portnum) == drv_active && ql2xdontresethba == 0) { qla8044_clear_idc_dontreset(vha); } else { } rval = qla8044_set_idc_ver(vha); if (rval == 258) { qla8044_clear_drv_active(ha); } else { } qla8044_idc_unlock(ha); exit_update_idc_reg: ; return (rval); } } static void qla8044_need_qsnt_handler(struct scsi_qla_host *vha ) { unsigned long qsnt_timeout ; uint32_t drv_state ; uint32_t drv_active ; uint32_t dev_state ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { ha = vha->hw; if (*((unsigned long *)vha + 19UL) != 0UL) { qla2x00_quiesce_io(vha); } else { return; } qla8044_set_qsnt_ready(vha); qsnt_timeout = (unsigned long )jiffies + 7500UL; tmp = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp; tmp___0 = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp___0; drv_active = drv_active << 1; goto ldv_66442; ldv_66441: ; if ((long )((unsigned long )jiffies - qsnt_timeout) >= 0L) { clear_bit(20L, (unsigned long volatile *)(& vha->dpc_flags)); qla8044_wr_direct(vha, 4U, 3U); qla8044_clear_qsnt_ready(vha); ql_log(2U, vha, 45260, "Timeout waiting for quiescent ack!!!\n"); return; } else { } qla8044_idc_unlock(ha); msleep(1000U); qla8044_idc_lock(ha); tmp___1 = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp___1; tmp___2 = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp___2; drv_active = drv_active << 1; ldv_66442: ; if (drv_state != drv_active) { goto ldv_66441; } else { } tmp___3 = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp___3; if (dev_state == 5U) { qla8044_wr_direct(vha, 4U, 7U); ql_log(2U, vha, 45261, "%s: HW State: QUIESCENT\n", "qla8044_need_qsnt_handler"); } else { } return; } } int qla8044_device_state_handler(struct scsi_qla_host *vha ) { uint32_t dev_state ; int rval ; unsigned long dev_init_timeout ; struct qla_hw_data *ha ; int tmp ; char *tmp___0 ; char *tmp___1 ; char *tmp___2 ; char *tmp___3 ; int tmp___4 ; int tmp___5 ; char *tmp___6 ; char *tmp___7 ; { rval = 0; ha = vha->hw; rval = qla8044_update_idc_reg(vha); if (rval == 258) { goto exit_error; } else { } tmp = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp; if (dev_state <= 7U) { tmp___0 = qdev_state(dev_state); tmp___1 = tmp___0; } else { tmp___1 = (char *)"Unknown"; } ql_dbg(524288U, vha, 45262, "Device state is 0x%x = %s\n", dev_state, tmp___1); dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; qla8044_idc_lock(ha); ldv_66469: ; if ((long )((unsigned long )jiffies - dev_init_timeout) >= 0L) { tmp___4 = qla8044_check_drv_active(vha); if (tmp___4 == 0) { if (dev_state <= 7U) { tmp___2 = qdev_state(dev_state); tmp___3 = tmp___2; } else { tmp___3 = (char *)"Unknown"; } ql_log(1U, vha, 45263, "%s: Device Init Failed 0x%x = %s\n", (char *)"qla2xxx", dev_state, tmp___3); qla8044_wr_direct(vha, 4U, 6U); } else { } } else { } tmp___5 = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp___5; if (dev_state <= 7U) { tmp___6 = qdev_state(dev_state); tmp___7 = tmp___6; } else { tmp___7 = (char *)"Unknown"; } ql_log(2U, vha, 45264, "Device state is 0x%x = %s\n", dev_state, tmp___7); switch (dev_state) { case 3U: ha->flags.nic_core_reset_owner = 0U; goto exit; case 1U: rval = qla8044_device_bootstrap(vha); goto ldv_66462; case 2U: qla8044_idc_unlock(ha); msleep(1000U); qla8044_idc_lock(ha); goto ldv_66462; case 4U: qla8044_need_reset_handler(vha); goto ldv_66462; case 5U: qla8044_need_qsnt_handler(vha); dev_init_timeout = (unsigned long )(ha->fcoe_reset_timeout * 250U) + (unsigned long )jiffies; goto ldv_66462; case 7U: ql_log(2U, vha, 45265, "HW State: QUIESCENT\n"); qla8044_idc_unlock(ha); msleep(1000U); qla8044_idc_lock(ha); dev_init_timeout = (unsigned long )(ha->fcoe_reset_timeout * 250U) + (unsigned long )jiffies; goto ldv_66462; case 6U: ha->flags.nic_core_reset_owner = 0U; qla8044_idc_unlock(ha); qla8xxx_dev_failed_handler(vha); rval = 258; qla8044_idc_lock(ha); goto exit; default: qla8044_idc_unlock(ha); qla8xxx_dev_failed_handler(vha); rval = 258; qla8044_idc_lock(ha); goto exit; } ldv_66462: ; goto ldv_66469; exit: qla8044_idc_unlock(ha); exit_error: ; return (rval); } } static int qla8044_check_temp(struct scsi_qla_host *vha ) { uint32_t temp ; uint32_t temp_state ; uint32_t temp_val ; int status ; int tmp ; { status = 0; tmp = qla8044_rd_direct(vha, 13U); temp = (uint32_t )tmp; temp_state = temp & 65535U; temp_val = temp >> 16; if (temp_state == 3U) { ql_log(1U, vha, 45266, "Device temperature %d degrees C exceeds maximum allowed. Hardware has been shut down\n", temp_val); status = 258; return (status); } else if (temp_state == 2U) { ql_log(1U, vha, 45267, "Device temperature %d degrees C exceeds operating range. Immediate action needed.\n", temp_val); } else { } return (0); } } int qla8044_read_temperature(scsi_qla_host_t *vha ) { uint32_t temp ; int tmp ; { tmp = qla8044_rd_direct(vha, 13U); temp = (uint32_t )tmp; return ((int )(temp >> 16)); } } int qla8044_check_fw_alive(struct scsi_qla_host *vha ) { uint32_t fw_heartbeat_counter ; uint32_t halt_status1 ; uint32_t halt_status2 ; int status ; int tmp ; int tmp___0 ; int tmp___1 ; { status = 0; tmp = qla8044_rd_direct(vha, 2U); fw_heartbeat_counter = (uint32_t )tmp; if (fw_heartbeat_counter == 4294967295U) { ql_dbg(524288U, vha, 45268, "scsi%ld: %s: Device in frozen state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", vha->host_no, "qla8044_check_fw_alive"); return (status); } else { } if ((uint32_t )vha->fw_heartbeat_counter == fw_heartbeat_counter) { vha->seconds_since_last_heartbeat = vha->seconds_since_last_heartbeat + 1; if (vha->seconds_since_last_heartbeat == 2) { vha->seconds_since_last_heartbeat = 0; tmp___0 = qla8044_rd_direct(vha, 0U); halt_status1 = (uint32_t )tmp___0; tmp___1 = qla8044_rd_direct(vha, 1U); halt_status2 = (uint32_t )tmp___1; ql_log(2U, vha, 45269, "scsi(%ld): %s, ISP8044 Dumping hw/fw registers:\n PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n", vha->host_no, "qla8044_check_fw_alive", halt_status1, halt_status2); status = 258; } else { } } else { vha->seconds_since_last_heartbeat = 0; } vha->fw_heartbeat_counter = (int )fw_heartbeat_counter; return (status); } } void qla8044_watchdog(struct scsi_qla_host *vha ) { uint32_t dev_state ; uint32_t halt_status ; int halt_status_unrecoverable ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { halt_status_unrecoverable = 0; ha = vha->hw; tmp___5 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___5 == 0) { tmp___6 = constant_test_bit(18L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___6 == 0) { tmp = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp; tmp___0 = qla8044_check_fw_alive(vha); if (tmp___0 != 0) { ha->flags.isp82xx_fw_hung = 1U; ql_log(1U, vha, 45322, "Firmware hung.\n"); qla82xx_clear_pending_mbx(vha); } else { } tmp___4 = qla8044_check_temp(vha); if (tmp___4 != 0) { set_bit(17L, (unsigned long volatile *)(& vha->dpc_flags)); ha->flags.isp82xx_fw_hung = 1U; qla2xxx_wake_dpc(vha); } else if (dev_state == 4U) { tmp___3 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___3 == 0) { ql_log(2U, vha, 45270, "%s: HW State: NEED RESET!\n", "qla8044_watchdog"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else { goto _L___0; } } else _L___0: /* CIL Label */ if (dev_state == 5U) { tmp___2 = constant_test_bit(20L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 == 0) { ql_log(2U, vha, 45271, "%s: HW State: NEED QUIES detected!\n", "qla8044_watchdog"); set_bit(20L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else { goto _L; } } else _L: /* CIL Label */ if (*((unsigned long *)ha + 2UL) != 0UL) { tmp___1 = qla8044_rd_direct(vha, 0U); halt_status = (uint32_t )tmp___1; if ((halt_status & 1073741824U) != 0U) { ql_log(0U, vha, 45272, "%s: Firmware error detected device is being reset\n", "qla8044_watchdog"); } else if ((int )halt_status < 0) { halt_status_unrecoverable = 1; } else { } if (halt_status_unrecoverable != 0) { set_bit(17L, (unsigned long volatile *)(& vha->dpc_flags)); } else if (dev_state == 7U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); ql_log(2U, vha, 45273, "%s: FW CONTEXT Reset needed!\n", "qla8044_watchdog"); } else { ql_log(2U, vha, 45274, "%s: detect abort needed\n", "qla8044_watchdog"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } qla2xxx_wake_dpc(vha); } else { } } else { } } else { } return; } } static int qla8044_minidump_process_control(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr ) { struct qla8044_minidump_entry_crb *crb_entry ; uint32_t read_value ; uint32_t opcode ; uint32_t poll_time ; uint32_t addr ; uint32_t index ; uint32_t crb_addr ; uint32_t rval ; unsigned long wtime ; struct qla8044_minidump_template_hdr *tmplt_hdr ; int i ; struct qla_hw_data *ha ; { rval = 0U; ha = vha->hw; ql_dbg(524288U, vha, 45277, "Entering fn: %s\n", "qla8044_minidump_process_control"); tmplt_hdr = (struct qla8044_minidump_template_hdr *)ha->md_tmplt_hdr; crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr; crb_addr = crb_entry->addr; i = 0; goto ldv_66523; ldv_66522: opcode = (uint32_t )crb_entry->crb_ctrl.opcode; if ((int )opcode & 1) { qla8044_wr_reg_indirect(vha, crb_addr, crb_entry->value_1); opcode = opcode & 4294967294U; } else { } if ((opcode & 2U) != 0U) { qla8044_rd_reg_indirect(vha, crb_addr, & read_value); qla8044_wr_reg_indirect(vha, crb_addr, read_value); opcode = opcode & 4294967293U; } else { } if ((opcode & 4U) != 0U) { qla8044_rd_reg_indirect(vha, crb_addr, & read_value); read_value = crb_entry->value_2 & read_value; opcode = opcode & 4294967291U; if ((opcode & 8U) != 0U) { read_value = crb_entry->value_3 | read_value; opcode = opcode & 4294967287U; } else { } qla8044_wr_reg_indirect(vha, crb_addr, read_value); } else { } if ((opcode & 8U) != 0U) { qla8044_rd_reg_indirect(vha, crb_addr, & read_value); read_value = crb_entry->value_3 | read_value; qla8044_wr_reg_indirect(vha, crb_addr, read_value); opcode = opcode & 4294967287U; } else { } if ((opcode & 16U) != 0U) { poll_time = (uint32_t )crb_entry->crb_strd.poll_timeout; wtime = (unsigned long )poll_time + (unsigned long )jiffies; qla8044_rd_reg_indirect(vha, crb_addr, & read_value); ldv_66521: ; if ((crb_entry->value_2 & read_value) == crb_entry->value_1) { goto ldv_66514; } else if ((long )((unsigned long )jiffies - wtime) >= 0L) { rval = 258U; goto ldv_66514; } else { qla8044_rd_reg_indirect(vha, crb_addr, & read_value); } goto ldv_66521; ldv_66514: opcode = opcode & 4294967279U; } else { } if ((opcode & 32U) != 0U) { if ((unsigned int )crb_entry->crb_strd.state_index_a != 0U) { index = (uint32_t )crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } qla8044_rd_reg_indirect(vha, addr, & read_value); index = (uint32_t )crb_entry->crb_ctrl.state_index_v; tmplt_hdr->saved_state_array[index] = read_value; opcode = opcode & 4294967263U; } else { } if ((opcode & 64U) != 0U) { if ((unsigned int )crb_entry->crb_strd.state_index_a != 0U) { index = (uint32_t )crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } if ((unsigned int )crb_entry->crb_ctrl.state_index_v != 0U) { index = (uint32_t )crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; } else { read_value = crb_entry->value_1; } qla8044_wr_reg_indirect(vha, addr, read_value); opcode = opcode & 4294967231U; } else { } if ((opcode & 128U) != 0U) { index = (uint32_t )crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; read_value = read_value << (int )crb_entry->crb_ctrl.shl; read_value = read_value >> (int )crb_entry->crb_ctrl.shr; if (crb_entry->value_2 != 0U) { read_value = crb_entry->value_2 & read_value; } else { } read_value = crb_entry->value_3 | read_value; read_value = crb_entry->value_1 + read_value; tmplt_hdr->saved_state_array[index] = read_value; opcode = opcode & 4294967167U; } else { } crb_addr = (uint32_t )crb_entry->crb_strd.addr_stride + crb_addr; i = i + 1; ldv_66523: ; if ((uint32_t )i < crb_entry->op_count) { goto ldv_66522; } else { } return ((int )rval); } } static void qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t r_stride ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla8044_minidump_entry_crb *crb_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; ql_dbg(524288U, vha, 45278, "Entering fn: %s\n", "qla8044_minidump_process_rdcrb"); crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr; r_addr = crb_hdr->addr; r_stride = (uint32_t )crb_hdr->crb_strd.addr_stride; loop_cnt = crb_hdr->op_count; i = 0U; goto ldv_66539; ldv_66538: qla8044_rd_reg_indirect(vha, r_addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_addr; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; r_addr = r_addr + r_stride; i = i + 1U; ldv_66539: ; if (i < loop_cnt) { goto ldv_66538; } else { } *d_ptr = data_ptr; return; } } static int qla8044_minidump_process_rdmem(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t r_value ; uint32_t r_data ; uint32_t i ; uint32_t j ; uint32_t loop_cnt ; struct qla8044_minidump_entry_rdmem *m_hdr ; unsigned long flags ; uint32_t *data_ptr ; struct qla_hw_data *ha ; uint32_t *tmp ; { data_ptr = *d_ptr; ha = vha->hw; ql_dbg(524288U, vha, 45279, "Entering fn: %s\n", "qla8044_minidump_process_rdmem"); m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr; r_addr = m_hdr->read_addr; loop_cnt = m_hdr->read_data_size / 16U; ql_dbg(524288U, vha, 45296, "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n", "qla8044_minidump_process_rdmem", r_addr, m_hdr->read_data_size); if ((r_addr & 15U) != 0U) { ql_dbg(524288U, vha, 45297, "[%s]: Read addr 0x%x not 16 bytes aligned\n", "qla8044_minidump_process_rdmem", r_addr); return (258); } else { } if ((m_hdr->read_data_size & 15U) != 0U) { ql_dbg(524288U, vha, 45298, "[%s]: Read data[0x%x] not multiple of 16 bytes\n", "qla8044_minidump_process_rdmem", m_hdr->read_data_size); return (258); } else { } ql_dbg(524288U, vha, 45299, "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", "qla8044_minidump_process_rdmem", r_addr, m_hdr->read_data_size, loop_cnt); flags = _raw_write_lock_irqsave(& ha->hw_lock); i = 0U; goto ldv_66570; ldv_66569: qla8044_wr_reg_indirect(vha, 1090519188U, r_addr); r_value = 0U; qla8044_wr_reg_indirect(vha, 1090519192U, r_value); r_value = 2U; qla8044_wr_reg_indirect(vha, 1090519184U, r_value); r_value = 3U; qla8044_wr_reg_indirect(vha, 1090519184U, r_value); j = 0U; goto ldv_66562; ldv_66561: qla8044_rd_reg_indirect(vha, 1090519184U, & r_value); if ((r_value & 8U) == 0U) { goto ldv_66560; } else { } j = j + 1U; ldv_66562: ; if (j <= 999U) { goto ldv_66561; } else { } ldv_66560: ; if (j > 999U) { _raw_write_unlock_irqrestore(& ha->hw_lock, flags); return (0); } else { } j = 0U; goto ldv_66567; ldv_66566: qla8044_rd_reg_indirect(vha, (uint32_t )MD_MIU_TEST_AGT_RDDATA___0[j], & r_data); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_data; j = j + 1U; ldv_66567: ; if (j <= 3U) { goto ldv_66566; } else { } r_addr = r_addr + 16U; i = i + 1U; ldv_66570: ; if (i < loop_cnt) { goto ldv_66569; } else { } _raw_write_unlock_irqrestore(& ha->hw_lock, flags); ql_dbg(524288U, vha, 45300, "Leaving fn: %s datacount: 0x%x\n", "qla8044_minidump_process_rdmem", loop_cnt * 16U); *d_ptr = data_ptr; return (0); } } static uint32_t qla8044_minidump_process_rdrom(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t fl_addr ; uint32_t u32_count ; uint32_t rval ; struct qla8044_minidump_entry_rdrom *rom_hdr ; uint32_t *data_ptr ; int tmp ; { data_ptr = *d_ptr; rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr; fl_addr = rom_hdr->read_addr; u32_count = rom_hdr->read_data_size / 4U; ql_dbg(524288U, vha, 45301, "[%s]: fl_addr: 0x%x, count: 0x%x\n", "qla8044_minidump_process_rdrom", fl_addr, u32_count); tmp = qla8044_lockless_flash_read_u32(vha, fl_addr, (uint8_t *)data_ptr, (int )u32_count); rval = (uint32_t )tmp; if (rval != 0U) { ql_log(0U, vha, 45302, "%s: Flash Read Error,Count=%d\n", "qla8044_minidump_process_rdrom", u32_count); return (258U); } else { data_ptr = data_ptr + (unsigned long )u32_count; *d_ptr = data_ptr; return (0U); } } } static void qla8044_mark_entry_skipped(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , int index ) { { entry_hdr->d_ctrl.driver_flags = (uint8_t )((unsigned int )entry_hdr->d_ctrl.driver_flags | 128U); ql_log(2U, vha, 45303, "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", vha->host_no, index, entry_hdr->entry_type, (int )entry_hdr->d_ctrl.entry_capture_mask); return; } } static int qla8044_minidump_process_l2tag(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t addr ; uint32_t r_addr ; uint32_t c_addr ; uint32_t t_r_addr ; uint32_t i ; uint32_t k ; uint32_t loop_count ; uint32_t t_value ; uint32_t r_cnt ; uint32_t r_value ; unsigned long p_wait ; unsigned long w_time ; unsigned long p_mask ; uint32_t c_value_w ; uint32_t c_value_r ; struct qla8044_minidump_entry_cache *cache_hdr ; int rval ; uint32_t *data_ptr ; uint32_t *tmp ; { rval = 258; data_ptr = *d_ptr; ql_dbg(524288U, vha, 45304, "Entering fn: %s\n", "qla8044_minidump_process_l2tag"); cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = (uint32_t )cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = (uint32_t )cache_hdr->addr_ctrl.init_tag_value; r_cnt = (uint32_t )cache_hdr->read_ctrl.read_addr_cnt; p_wait = (unsigned long )cache_hdr->cache_ctrl.poll_wait; p_mask = (unsigned long )cache_hdr->cache_ctrl.poll_mask; i = 0U; goto ldv_66627; ldv_66626: qla8044_wr_reg_indirect(vha, t_r_addr, t_value); if (c_value_w != 0U) { qla8044_wr_reg_indirect(vha, c_addr, c_value_w); } else { } if (p_mask != 0UL) { w_time = (unsigned long )jiffies + p_wait; ldv_66622: qla8044_rd_reg_indirect(vha, c_addr, & c_value_r); if (((unsigned long )c_value_r & p_mask) == 0UL) { goto ldv_66615; } else if ((long )((unsigned long )jiffies - w_time) >= 0L) { return (rval); } else { } goto ldv_66622; ldv_66615: ; } else { } addr = r_addr; k = 0U; goto ldv_66624; ldv_66623: qla8044_rd_reg_indirect(vha, addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; addr = (uint32_t )cache_hdr->read_ctrl.read_addr_stride + addr; k = k + 1U; ldv_66624: ; if (k < r_cnt) { goto ldv_66623; } else { } t_value = (uint32_t )cache_hdr->addr_ctrl.tag_value_stride + t_value; i = i + 1U; ldv_66627: ; if (i < loop_count) { goto ldv_66626; } else { } *d_ptr = data_ptr; return (0); } } static void qla8044_minidump_process_l1cache(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t addr ; uint32_t r_addr ; uint32_t c_addr ; uint32_t t_r_addr ; uint32_t i ; uint32_t k ; uint32_t loop_count ; uint32_t t_value ; uint32_t r_cnt ; uint32_t r_value ; uint32_t c_value_w ; struct qla8044_minidump_entry_cache *cache_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; { data_ptr = *d_ptr; cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = (uint32_t )cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = (uint32_t )cache_hdr->addr_ctrl.init_tag_value; r_cnt = (uint32_t )cache_hdr->read_ctrl.read_addr_cnt; i = 0U; goto ldv_66651; ldv_66650: qla8044_wr_reg_indirect(vha, t_r_addr, t_value); qla8044_wr_reg_indirect(vha, c_addr, c_value_w); addr = r_addr; k = 0U; goto ldv_66648; ldv_66647: qla8044_rd_reg_indirect(vha, addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; addr = (uint32_t )cache_hdr->read_ctrl.read_addr_stride + addr; k = k + 1U; ldv_66648: ; if (k < r_cnt) { goto ldv_66647; } else { } t_value = (uint32_t )cache_hdr->addr_ctrl.tag_value_stride + t_value; i = i + 1U; ldv_66651: ; if (i < loop_count) { goto ldv_66650; } else { } *d_ptr = data_ptr; return; } } static void qla8044_minidump_process_rdocm(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t r_stride ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla8044_minidump_entry_rdocm *ocm_hdr ; uint32_t *data_ptr ; struct qla_hw_data *ha ; uint32_t *tmp ; { data_ptr = *d_ptr; ha = vha->hw; ql_dbg(524288U, vha, 45305, "Entering fn: %s\n", "qla8044_minidump_process_rdocm"); ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr; r_addr = ocm_hdr->read_addr; r_stride = ocm_hdr->read_addr_stride; loop_cnt = ocm_hdr->op_count; ql_dbg(524288U, vha, 45306, "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n", "qla8044_minidump_process_rdocm", r_addr, r_stride, loop_cnt); i = 0U; goto ldv_66668; ldv_66667: r_value = readl((void const volatile *)((unsigned long )r_addr + ha->nx_pcibase)); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; r_addr = r_addr + r_stride; i = i + 1U; ldv_66668: ; if (i < loop_cnt) { goto ldv_66667; } else { } ql_dbg(524288U, vha, 45307, "Leaving fn: %s datacount: 0x%lx\n", "qla8044_minidump_process_rdocm", (unsigned long )loop_cnt * 4UL); *d_ptr = data_ptr; return; } } static void qla8044_minidump_process_rdmux(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t s_stride ; uint32_t s_addr ; uint32_t s_value ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla8044_minidump_entry_mux *mux_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; ql_dbg(524288U, vha, 45308, "Entering fn: %s\n", "qla8044_minidump_process_rdmux"); mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr; r_addr = mux_hdr->read_addr; s_addr = mux_hdr->select_addr; s_stride = mux_hdr->select_value_stride; s_value = mux_hdr->select_value; loop_cnt = mux_hdr->op_count; i = 0U; goto ldv_66686; ldv_66685: qla8044_wr_reg_indirect(vha, s_addr, s_value); qla8044_rd_reg_indirect(vha, r_addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = s_value; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; s_value = s_value + s_stride; i = i + 1U; ldv_66686: ; if (i < loop_cnt) { goto ldv_66685; } else { } *d_ptr = data_ptr; return; } } static void qla8044_minidump_process_queue(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t s_addr ; uint32_t r_addr ; uint32_t r_stride ; uint32_t r_value ; uint32_t r_cnt ; uint32_t qid ; uint32_t i ; uint32_t k ; uint32_t loop_cnt ; struct qla8044_minidump_entry_queue *q_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; { qid = 0U; data_ptr = *d_ptr; ql_dbg(524288U, vha, 45309, "Entering fn: %s\n", "qla8044_minidump_process_queue"); q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr; s_addr = q_hdr->select_addr; r_cnt = (uint32_t )q_hdr->rd_strd.read_addr_cnt; r_stride = (uint32_t )q_hdr->rd_strd.read_addr_stride; loop_cnt = q_hdr->op_count; i = 0U; goto ldv_66709; ldv_66708: qla8044_wr_reg_indirect(vha, s_addr, qid); r_addr = q_hdr->read_addr; k = 0U; goto ldv_66706; ldv_66705: qla8044_rd_reg_indirect(vha, r_addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; r_addr = r_addr + r_stride; k = k + 1U; ldv_66706: ; if (k < r_cnt) { goto ldv_66705; } else { } qid = (uint32_t )q_hdr->q_strd.queue_id_stride + qid; i = i + 1U; ldv_66709: ; if (i < loop_cnt) { goto ldv_66708; } else { } *d_ptr = data_ptr; return; } } static uint32_t qla8044_minidump_process_pollrd(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t s_addr ; uint32_t s_value ; uint32_t r_value ; uint32_t poll_wait___0 ; uint32_t poll_mask ; uint16_t s_stride ; uint16_t i ; struct qla8044_minidump_entry_pollrd *pollrd_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; pollrd_hdr = (struct qla8044_minidump_entry_pollrd *)entry_hdr; s_addr = pollrd_hdr->select_addr; r_addr = pollrd_hdr->read_addr; s_value = pollrd_hdr->select_value; s_stride = pollrd_hdr->select_value_stride; poll_wait___0 = pollrd_hdr->poll_wait; poll_mask = pollrd_hdr->poll_mask; i = 0U; goto ldv_66731; ldv_66730: qla8044_wr_reg_indirect(vha, s_addr, s_value); poll_wait___0 = pollrd_hdr->poll_wait; ldv_66729: qla8044_rd_reg_indirect(vha, s_addr, & r_value); if ((r_value & poll_mask) != 0U) { goto ldv_66726; } else { usleep_range(1000UL, 1100UL); poll_wait___0 = poll_wait___0 - 1U; if (poll_wait___0 == 0U) { ql_log(0U, vha, 45310, "%s: TIMEOUT\n", "qla8044_minidump_process_pollrd"); goto error; } else { } } goto ldv_66729; ldv_66726: qla8044_rd_reg_indirect(vha, r_addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = s_value; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; s_value = (uint32_t )s_stride + s_value; i = (uint16_t )((int )i + 1); ldv_66731: ; if ((int )pollrd_hdr->op_count > (int )i) { goto ldv_66730; } else { } *d_ptr = data_ptr; return (0U); error: ; return (258U); } } static void qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t sel_val1 ; uint32_t sel_val2 ; uint32_t t_sel_val ; uint32_t data ; uint32_t i ; uint32_t sel_addr1 ; uint32_t sel_addr2 ; uint32_t sel_val_mask ; uint32_t read_addr ; struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; { data_ptr = *d_ptr; rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *)entry_hdr; sel_val1 = rdmux2_hdr->select_value_1; sel_val2 = rdmux2_hdr->select_value_2; sel_addr1 = rdmux2_hdr->select_addr_1; sel_addr2 = rdmux2_hdr->select_addr_2; sel_val_mask = rdmux2_hdr->select_value_mask; read_addr = rdmux2_hdr->read_addr; i = 0U; goto ldv_66750; ldv_66749: qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1); t_sel_val = sel_val1 & sel_val_mask; tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = t_sel_val; qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); qla8044_rd_reg_indirect(vha, read_addr, & data); tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = data; qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2); t_sel_val = sel_val2 & sel_val_mask; tmp___1 = data_ptr; data_ptr = data_ptr + 1; *tmp___1 = t_sel_val; qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); qla8044_rd_reg_indirect(vha, read_addr, & data); tmp___2 = data_ptr; data_ptr = data_ptr + 1; *tmp___2 = data; sel_val1 = (uint32_t )rdmux2_hdr->select_value_stride + sel_val1; sel_val2 = (uint32_t )rdmux2_hdr->select_value_stride + sel_val2; i = i + 1U; ldv_66750: ; if (rdmux2_hdr->op_count > i) { goto ldv_66749; } else { } *d_ptr = data_ptr; return; } } static uint32_t qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t poll_wait___0 ; uint32_t poll_mask ; uint32_t r_value ; uint32_t data ; uint32_t addr_1 ; uint32_t addr_2 ; uint32_t value_1 ; uint32_t value_2 ; struct qla8044_minidump_entry_pollrdmwr *poll_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *)entry_hdr; addr_1 = poll_hdr->addr_1; addr_2 = poll_hdr->addr_2; value_1 = poll_hdr->value_1; value_2 = poll_hdr->value_2; poll_mask = poll_hdr->poll_mask; qla8044_wr_reg_indirect(vha, addr_1, value_1); poll_wait___0 = poll_hdr->poll_wait; ldv_66770: qla8044_rd_reg_indirect(vha, addr_1, & r_value); if ((r_value & poll_mask) != 0U) { goto ldv_66767; } else { usleep_range(1000UL, 1100UL); poll_wait___0 = poll_wait___0 - 1U; if (poll_wait___0 == 0U) { ql_log(0U, vha, 45311, "%s: TIMEOUT\n", "qla8044_minidump_process_pollrdmwr"); goto error; } else { } } goto ldv_66770; ldv_66767: qla8044_rd_reg_indirect(vha, addr_2, & data); data = poll_hdr->modify_mask & data; qla8044_wr_reg_indirect(vha, addr_2, data); qla8044_wr_reg_indirect(vha, addr_1, value_2); poll_wait___0 = poll_hdr->poll_wait; ldv_66772: qla8044_rd_reg_indirect(vha, addr_1, & r_value); if ((r_value & poll_mask) != 0U) { goto ldv_66771; } else { usleep_range(1000UL, 1100UL); poll_wait___0 = poll_wait___0 - 1U; if (poll_wait___0 == 0U) { ql_log(0U, vha, 45312, "%s: TIMEOUT2\n", "qla8044_minidump_process_pollrdmwr"); goto error; } else { } } goto ldv_66772; ldv_66771: tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = addr_2; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = data; *d_ptr = data_ptr; return (0U); error: ; return (258U); } } static int qla8044_check_dma_engine_state(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; int rval ; uint32_t dma_eng_num ; uint32_t cmd_sts_and_cntrl ; uint64_t dma_base_addr ; struct qla8044_minidump_template_hdr *tmplt_hdr ; { ha = vha->hw; rval = 0; dma_eng_num = 0U; cmd_sts_and_cntrl = 0U; dma_base_addr = 0ULL; tmplt_hdr = (struct qla8044_minidump_template_hdr *)0; tmplt_hdr = (struct qla8044_minidump_template_hdr *)ha->md_tmplt_hdr; dma_eng_num = tmplt_hdr->saved_state_array[8]; dma_base_addr = (uint64_t )((dma_eng_num + 30514U) * 65536U); rval = qla8044_rd_reg_indirect(vha, (uint32_t )dma_base_addr + 8U, & cmd_sts_and_cntrl); if (rval != 0) { return (258); } else { } if ((int )cmd_sts_and_cntrl < 0) { return (0); } else { } return (258); } } static int qla8044_start_pex_dma(struct scsi_qla_host *vha , struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr ) { struct qla_hw_data *ha ; int rval ; int wait ; uint32_t dma_eng_num ; uint32_t cmd_sts_and_cntrl ; uint64_t dma_base_addr ; struct qla8044_minidump_template_hdr *tmplt_hdr ; { ha = vha->hw; rval = 0; wait = 0; dma_eng_num = 0U; cmd_sts_and_cntrl = 0U; dma_base_addr = 0ULL; tmplt_hdr = (struct qla8044_minidump_template_hdr *)0; tmplt_hdr = (struct qla8044_minidump_template_hdr *)ha->md_tmplt_hdr; dma_eng_num = tmplt_hdr->saved_state_array[8]; dma_base_addr = (uint64_t )((dma_eng_num + 30514U) * 65536U); rval = qla8044_wr_reg_indirect(vha, (uint32_t )dma_base_addr, m_hdr->desc_card_addr); if (rval != 0) { goto error_exit; } else { } rval = qla8044_wr_reg_indirect(vha, (uint32_t )dma_base_addr + 4U, 0U); if (rval != 0) { goto error_exit; } else { } rval = qla8044_wr_reg_indirect(vha, (uint32_t )dma_base_addr + 8U, m_hdr->start_dma_cmd); if (rval != 0) { goto error_exit; } else { } wait = 0; goto ldv_66796; ldv_66795: rval = qla8044_rd_reg_indirect(vha, (uint32_t )dma_base_addr + 8U, & cmd_sts_and_cntrl); if (rval != 0) { goto error_exit; } else { } if ((cmd_sts_and_cntrl & 2U) == 0U) { goto ldv_66794; } else { } __const_udelay(42950UL); wait = wait + 1; ldv_66796: ; if (wait <= 9999) { goto ldv_66795; } else { } ldv_66794: ; if (wait > 9999) { rval = 258; goto error_exit; } else { } error_exit: ; return (rval); } } static int qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; int rval ; struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr ; uint32_t chunk_size ; uint32_t read_size ; uint8_t *data_ptr ; void *rdmem_buffer ; dma_addr_t rdmem_dma ; struct qla8044_pex_dma_descriptor dma_desc ; { ha = vha->hw; rval = 0; m_hdr = (struct qla8044_minidump_entry_rdmem_pex_dma *)0; data_ptr = (uint8_t *)*d_ptr; rdmem_buffer = (void *)0; rval = qla8044_check_dma_engine_state(vha); if (rval != 0) { ql_dbg(524288U, vha, 45383, "DMA engine not available. Fallback to rdmem-read.\n"); return (258); } else { } m_hdr = (struct qla8044_minidump_entry_rdmem_pex_dma *)entry_hdr; rdmem_buffer = dma_alloc_attrs(& (ha->pdev)->dev, 16384UL, & rdmem_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )rdmem_buffer == (unsigned long )((void *)0)) { ql_dbg(524288U, vha, 45384, "Unable to allocate rdmem dma buffer\n"); return (258); } else { } dma_desc.cmd.dma_desc_cmd = (unsigned int )m_hdr->dma_desc_cmd & 65295U; dma_desc.cmd.dma_desc_cmd = (unsigned int )dma_desc.cmd.dma_desc_cmd | (((unsigned int )((uint16_t )(ha->pdev)->devfn) & 7U) << 4U); dma_desc.dma_bus_addr = rdmem_dma; chunk_size = 16384U; dma_desc.cmd.read_data_size = chunk_size; read_size = 0U; goto ldv_66814; ldv_66813: ; if (m_hdr->read_data_size - read_size <= 16383U) { chunk_size = m_hdr->read_data_size - read_size; dma_desc.cmd.read_data_size = chunk_size; } else { } dma_desc.src_addr = (uint64_t )(m_hdr->read_addr + read_size); rval = qla8044_ms_mem_write_128b(vha, (uint64_t )m_hdr->desc_card_addr, (uint32_t *)(& dma_desc), 3U); if (rval != 0) { ql_log(1U, vha, 45386, "%s: Error writing rdmem-dma-init to MS !!!\n", "qla8044_minidump_pex_dma_read"); goto error_exit; } else { } ql_dbg(524288U, vha, 45387, "%s: Dma-descriptor: Instruct for rdmem dma (chunk_size 0x%x).\n", "qla8044_minidump_pex_dma_read", chunk_size); rval = qla8044_start_pex_dma(vha, m_hdr); if (rval != 0) { goto error_exit; } else { } memcpy((void *)data_ptr, (void const *)rdmem_buffer, (size_t )chunk_size); data_ptr = data_ptr + (unsigned long )chunk_size; read_size = read_size + chunk_size; ldv_66814: ; if (m_hdr->read_data_size > read_size) { goto ldv_66813; } else { } *d_ptr = (uint32_t *)data_ptr; error_exit: ; if ((unsigned long )rdmem_buffer != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 16384UL, rdmem_buffer, rdmem_dma, (struct dma_attrs *)0); } else { } return (rval); } } static uint32_t qla8044_minidump_process_rddfe(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { int loop_cnt ; uint32_t addr1 ; uint32_t addr2 ; uint32_t value ; uint32_t data ; uint32_t temp ; uint32_t wrVal ; uint8_t stride ; uint8_t stride2 ; uint16_t count ; uint32_t poll ; uint32_t mask ; uint32_t data_size ; uint32_t modify_mask ; uint32_t wait_count ; uint32_t *data_ptr ; struct qla8044_minidump_entry_rddfe *rddfe ; uint32_t *tmp ; uint32_t *tmp___0 ; { wait_count = 0U; data_ptr = *d_ptr; rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr; addr1 = rddfe->addr_1; value = rddfe->value; stride = rddfe->stride; stride2 = rddfe->stride2; count = rddfe->count; poll = rddfe->poll; mask = rddfe->mask; modify_mask = rddfe->modify_mask; data_size = rddfe->data_size; addr2 = (uint32_t )stride + addr1; loop_cnt = 0; goto ldv_66850; ldv_66849: qla8044_wr_reg_indirect(vha, addr1, value | 1073741824U); wait_count = 0U; goto ldv_66840; ldv_66839: qla8044_rd_reg_indirect(vha, addr1, & temp); if ((temp & mask) != 0U) { goto ldv_66838; } else { } wait_count = wait_count + 1U; ldv_66840: ; if (wait_count < poll) { goto ldv_66839; } else { } ldv_66838: ; if (wait_count == poll) { ql_log(1U, vha, 45395, "%s: TIMEOUT\n", "qla8044_minidump_process_rddfe"); goto error; } else { qla8044_rd_reg_indirect(vha, addr2, & temp); temp = temp & modify_mask; temp = (uint32_t )((loop_cnt << 16) | loop_cnt) | temp; wrVal = (temp << 16) | temp; qla8044_wr_reg_indirect(vha, addr2, wrVal); qla8044_wr_reg_indirect(vha, addr1, value); wait_count = 0U; goto ldv_66845; ldv_66844: qla8044_rd_reg_indirect(vha, addr1, & temp); if ((temp & mask) != 0U) { goto ldv_66843; } else { } wait_count = wait_count + 1U; ldv_66845: ; if (wait_count < poll) { goto ldv_66844; } else { } ldv_66843: ; if (wait_count == poll) { ql_log(1U, vha, 45396, "%s: TIMEOUT\n", "qla8044_minidump_process_rddfe"); goto error; } else { } qla8044_wr_reg_indirect(vha, addr1, (value | 1073741824U) + (uint32_t )stride2); wait_count = 0U; goto ldv_66848; ldv_66847: qla8044_rd_reg_indirect(vha, addr1, & temp); if ((temp & mask) != 0U) { goto ldv_66846; } else { } wait_count = wait_count + 1U; ldv_66848: ; if (wait_count < poll) { goto ldv_66847; } else { } ldv_66846: ; if (wait_count == poll) { ql_log(1U, vha, 45397, "%s: TIMEOUT\n", "qla8044_minidump_process_rddfe"); goto error; } else { } qla8044_rd_reg_indirect(vha, addr2, & data); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = wrVal; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = data; } loop_cnt = loop_cnt + 1; ldv_66850: ; if ((int )count > loop_cnt) { goto ldv_66849; } else { } *d_ptr = data_ptr; return (0U); error: ; return (4294967295U); } } static uint32_t qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { int ret ; uint32_t addr1 ; uint32_t addr2 ; uint32_t value1 ; uint32_t value2 ; uint32_t data ; uint32_t selVal ; uint8_t stride1 ; uint8_t stride2 ; uint32_t addr3 ; uint32_t addr4 ; uint32_t addr5 ; uint32_t addr6 ; uint32_t addr7 ; uint16_t count ; uint16_t loop_cnt ; uint32_t poll ; uint32_t mask ; uint32_t *data_ptr ; struct qla8044_minidump_entry_rdmdio *rdmdio ; uint32_t *tmp ; uint32_t *tmp___0 ; { ret = 0; data_ptr = *d_ptr; rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr; addr1 = rdmdio->addr_1; addr2 = rdmdio->addr_2; value1 = rdmdio->value_1; stride1 = rdmdio->stride_1; stride2 = rdmdio->stride_2; count = rdmdio->count; poll = rdmdio->poll; mask = rdmdio->mask; value2 = rdmdio->value_2; addr3 = (uint32_t )stride1 + addr1; loop_cnt = 0U; goto ldv_66879; ldv_66878: ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, addr3, mask); if (ret == -1) { goto error; } else { } addr4 = addr2 - (uint32_t )stride1; ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4, value2); if (ret == -1) { goto error; } else { } addr5 = (uint32_t )((int )stride1 * -2) + addr2; ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5, value1); if (ret == -1) { goto error; } else { } addr6 = (uint32_t )((int )stride1 * -3) + addr2; ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr6, 2U); if (ret == -1) { goto error; } else { } ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, addr3, mask); if (ret == -1) { goto error; } else { } addr7 = (uint32_t )((int )stride1 * -4) + addr2; data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr7); if (data == 4294967295U) { goto error; } else { } selVal = ((value2 << 18) | (value1 << 2)) | 2U; stride2 = rdmdio->stride_2; tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = selVal; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = data; value1 = (uint32_t )stride2 + value1; *d_ptr = data_ptr; loop_cnt = (uint16_t )((int )loop_cnt + 1); ldv_66879: ; if ((int )loop_cnt < (int )count) { goto ldv_66878; } else { } return (0U); error: ; return (4294967295U); } } static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t addr1 ; uint32_t addr2 ; uint32_t value1 ; uint32_t value2 ; uint32_t poll ; uint32_t mask ; uint32_t r_value ; uint32_t wait_count ; struct qla8044_minidump_entry_pollwr *pollwr_hdr ; { wait_count = 0U; pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; addr1 = pollwr_hdr->addr_1; addr2 = pollwr_hdr->addr_2; value1 = pollwr_hdr->value_1; value2 = pollwr_hdr->value_2; poll = pollwr_hdr->poll; mask = pollwr_hdr->mask; goto ldv_66897; ldv_66896: qla8044_rd_reg_indirect(vha, addr1, & r_value); if ((r_value & poll) != 0U) { goto ldv_66895; } else { } wait_count = wait_count + 1U; ldv_66897: ; if (wait_count < poll) { goto ldv_66896; } else { } ldv_66895: ; if (wait_count == poll) { ql_log(1U, vha, 45398, "%s: TIMEOUT\n", "qla8044_minidump_process_pollwr"); goto error; } else { } qla8044_wr_reg_indirect(vha, addr2, value2); qla8044_wr_reg_indirect(vha, addr1, value1); wait_count = 0U; goto ldv_66902; ldv_66901: qla8044_rd_reg_indirect(vha, addr1, & r_value); if ((r_value & poll) != 0U) { goto ldv_66900; } else { } wait_count = wait_count + 1U; ldv_66902: ; if (wait_count < poll) { goto ldv_66901; } else { } ldv_66900: ; return (0U); error: ; return (4294967295U); } } int qla8044_collect_md_data(struct scsi_qla_host *vha ) { int num_entry_hdr ; struct qla8044_minidump_entry_hdr *entry_hdr ; struct qla8044_minidump_template_hdr *tmplt_hdr ; uint32_t *data_ptr ; uint32_t data_collected ; uint32_t f_capture_mask ; int i ; int rval ; uint64_t now ; uint32_t timestamp ; uint32_t idc_control ; struct qla_hw_data *ha ; int tmp ; unsigned int tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; uint32_t tmp___4 ; uint32_t tmp___5 ; uint32_t tmp___6 ; { num_entry_hdr = 0; data_collected = 0U; rval = 258; ha = vha->hw; if ((unsigned long )ha->md_dump == (unsigned long )((void *)0)) { ql_log(2U, vha, 45313, "%s(%ld) No buffer to dump\n", "qla8044_collect_md_data", vha->host_no); return (rval); } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 45325, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto md_failed; } else { } ha->fw_dumped = 0; if ((unsigned long )ha->md_tmplt_hdr == (unsigned long )((void *)0) || (unsigned long )ha->md_dump == (unsigned long )((void *)0)) { ql_log(1U, vha, 45326, "Memory not allocated for minidump capture\n"); goto md_failed; } else { } qla8044_idc_lock(ha); idc_control = qla8044_rd_reg(ha, 14224UL); if ((idc_control & 2U) != 0U) { ql_log(1U, vha, 45330, "Forced reset from application, ignore minidump capture\n"); qla8044_wr_reg(ha, 14224UL, idc_control & 4294967293U); qla8044_idc_unlock(ha); goto md_failed; } else { } qla8044_idc_unlock(ha); tmp = qla82xx_validate_template_chksum(vha); if (tmp != 0) { ql_log(2U, vha, 45321, "Template checksum validation error\n"); goto md_failed; } else { } tmplt_hdr = (struct qla8044_minidump_template_hdr *)ha->md_tmplt_hdr; data_ptr = (uint32_t *)ha->md_dump; num_entry_hdr = (int )tmplt_hdr->num_of_entries; ql_dbg(524288U, vha, 45338, "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); f_capture_mask = tmplt_hdr->capture_debug_level & 255U; if ((f_capture_mask & 3U) != 3U) { ql_log(1U, vha, 45327, "Minimum required capture mask[0x%x] level not set\n", f_capture_mask); } else { } tmplt_hdr->driver_capture_mask = (uint32_t )ql2xmdcapmask; ql_log(2U, vha, 45314, "[%s]: starting data ptr: %p\n", "qla8044_collect_md_data", data_ptr); ql_log(2U, vha, 45323, "[%s]: no of entry headers in Template: 0x%x\n", "qla8044_collect_md_data", num_entry_hdr); ql_log(2U, vha, 45324, "[%s]: Total_data_size 0x%x, %d obtained\n", "qla8044_collect_md_data", ha->md_dump_size, ha->md_dump_size); now = get_jiffies_64(); tmp___0 = jiffies_to_msecs((unsigned long const )now); timestamp = tmp___0 / 1000U; tmplt_hdr->driver_timestamp = timestamp; entry_hdr = (struct qla8044_minidump_entry_hdr *)ha->md_tmplt_hdr + (unsigned long )tmplt_hdr->first_entry_offset; tmplt_hdr->saved_state_array[3] = tmplt_hdr->ocm_window_reg[(int )ha->portnum]; i = 0; goto ldv_66948; ldv_66947: ; if (ha->md_dump_size < data_collected) { ql_log(2U, vha, 45315, "Data collected: [0x%x], Total Dump size: [0x%x]\n", data_collected, ha->md_dump_size); return (rval); } else { } if (((int )entry_hdr->d_ctrl.entry_capture_mask & ql2xmdcapmask) == 0) { entry_hdr->d_ctrl.driver_flags = (uint8_t )((unsigned int )entry_hdr->d_ctrl.driver_flags | 128U); goto skip_nxt_entry; } else { } ql_dbg(524288U, vha, 45316, "Data collected: [0x%x], Dump size left:[0x%x]\n", data_collected, ha->md_dump_size - data_collected); switch (entry_hdr->entry_type) { case 255U: qla8044_mark_entry_skipped(vha, entry_hdr, i); goto ldv_66922; case 98U: rval = qla8044_minidump_process_control(vha, entry_hdr); if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } else { } goto ldv_66922; case 1U: qla8044_minidump_process_rdcrb(vha, entry_hdr, & data_ptr); goto ldv_66922; case 72U: rval = qla8044_minidump_pex_dma_read(vha, entry_hdr, & data_ptr); if (rval != 0) { rval = qla8044_minidump_process_rdmem(vha, entry_hdr, & data_ptr); if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } else { } } else { } goto ldv_66922; case 4U: ; case 71U: tmp___1 = qla8044_minidump_process_rdrom(vha, entry_hdr, & data_ptr); rval = (int )tmp___1; if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); } else { } goto ldv_66922; case 21U: ; case 22U: ; case 23U: ; case 24U: rval = qla8044_minidump_process_l2tag(vha, entry_hdr, & data_ptr); if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } else { } goto ldv_66922; case 8U: ; case 9U: ; case 11U: ; case 12U: qla8044_minidump_process_l1cache(vha, entry_hdr, & data_ptr); goto ldv_66922; case 6U: qla8044_minidump_process_rdocm(vha, entry_hdr, & data_ptr); goto ldv_66922; case 2U: qla8044_minidump_process_rdmux(vha, entry_hdr, & data_ptr); goto ldv_66922; case 3U: qla8044_minidump_process_queue(vha, entry_hdr, & data_ptr); goto ldv_66922; case 35U: tmp___2 = qla8044_minidump_process_pollrd(vha, entry_hdr, & data_ptr); rval = (int )tmp___2; if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); } else { } goto ldv_66922; case 36U: qla8044_minidump_process_rdmux2(vha, entry_hdr, & data_ptr); goto ldv_66922; case 37U: tmp___3 = qla8044_minidump_process_pollrdmwr(vha, entry_hdr, & data_ptr); rval = (int )tmp___3; if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); } else { } goto ldv_66922; case 38U: tmp___4 = qla8044_minidump_process_rddfe(vha, entry_hdr, & data_ptr); rval = (int )tmp___4; if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); } else { } goto ldv_66922; case 39U: tmp___5 = qla8044_minidump_process_rdmdio(vha, entry_hdr, & data_ptr); rval = (int )tmp___5; if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); } else { } goto ldv_66922; case 40U: tmp___6 = qla8044_minidump_process_pollwr(vha, entry_hdr, & data_ptr); rval = (int )tmp___6; if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); } else { } goto ldv_66922; case 0U: ; default: qla8044_mark_entry_skipped(vha, entry_hdr, i); goto ldv_66922; } ldv_66922: data_collected = (uint32_t )((long )data_ptr) - (uint32_t )((long )ha->md_dump); skip_nxt_entry: entry_hdr = entry_hdr + (unsigned long )entry_hdr->entry_size; i = i + 1; ldv_66948: ; if (i < num_entry_hdr) { goto ldv_66947; } else { } if (ha->md_dump_size != data_collected) { ql_log(2U, vha, 45317, "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n", data_collected, ha->md_dump_size); rval = 258; goto md_failed; } else { } ql_log(2U, vha, 45328, "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, 0U); ql_log(2U, vha, 45318, "Leaving fn: %s Last entry: 0x%x\n", "qla8044_collect_md_data", i); md_failed: ; return (rval); } } void qla8044_get_minidump(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla8044_collect_md_data(vha); if (tmp == 0) { ha->fw_dumped = 1; ha->prev_minidump_failed = 0; } else { ql_log(0U, vha, 45275, "%s: Unable to collect minidump\n", "qla8044_get_minidump"); ha->prev_minidump_failed = 1; } return; } } static int qla8044_poll_flash_status_reg(struct scsi_qla_host *vha ) { uint32_t flash_status ; int retries ; int ret_val ; int tmp ; { retries = 2000; ret_val = 0; goto ldv_66964; ldv_66963: ret_val = qla8044_rd_reg_indirect(vha, 1108344836U, & flash_status); if (ret_val != 0) { ql_log(1U, vha, 45372, "%s: Failed to read FLASH_STATUS reg.\n", "qla8044_poll_flash_status_reg"); goto ldv_66962; } else { } if ((flash_status & 6U) == 6U) { goto ldv_66962; } else { } msleep(1U); ldv_66964: tmp = retries; retries = retries - 1; if (tmp != 0) { goto ldv_66963; } else { } ldv_66962: ; if (retries == 0) { ret_val = 258; } else { } return (ret_val); } } static int qla8044_write_flash_status_reg(struct scsi_qla_host *vha , uint32_t data ) { int ret_val ; uint32_t cmd ; { ret_val = 0; cmd = (vha->hw)->fdt_wrt_sts_reg_cmd; ret_val = qla8044_wr_reg_indirect(vha, 1108410376U, cmd | 16580864U); if (ret_val != 0) { ql_log(1U, vha, 45349, "%s: Failed to write to FLASH_ADDR.\n", "qla8044_write_flash_status_reg"); goto exit_func; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410380U, data); if (ret_val != 0) { ql_log(1U, vha, 45350, "%s: Failed to write to FLASH_WRDATA.\n", "qla8044_write_flash_status_reg"); goto exit_func; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410372U, 5U); if (ret_val != 0) { ql_log(1U, vha, 45351, "%s: Failed to write to FLASH_CONTROL.\n", "qla8044_write_flash_status_reg"); goto exit_func; } else { } ret_val = qla8044_poll_flash_status_reg(vha); if (ret_val != 0) { ql_log(1U, vha, 45352, "%s: Error polling flash status reg.\n", "qla8044_write_flash_status_reg"); } else { } exit_func: ; return (ret_val); } } static int qla8044_unprotect_flash(scsi_qla_host_t *vha ) { int ret_val ; struct qla_hw_data *ha ; { ha = vha->hw; ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable); if (ret_val != 0) { ql_log(1U, vha, 45369, "%s: Write flash status failed.\n", "qla8044_unprotect_flash"); } else { } return (ret_val); } } static int qla8044_protect_flash(scsi_qla_host_t *vha ) { int ret_val ; struct qla_hw_data *ha ; { ha = vha->hw; ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable); if (ret_val != 0) { ql_log(1U, vha, 45371, "%s: Write flash status failed.\n", "qla8044_protect_flash"); } else { } return (ret_val); } } static int qla8044_erase_flash_sector(struct scsi_qla_host *vha , uint32_t sector_start_addr ) { uint32_t reversed_addr ; int ret_val ; { ret_val = 0; ret_val = qla8044_poll_flash_status_reg(vha); if (ret_val != 0) { ql_log(1U, vha, 45358, "%s: Poll flash status after erase failed..\n", "qla8044_erase_flash_sector"); } else { } reversed_addr = (((sector_start_addr & 255U) << 16) | (sector_start_addr & 65280U)) | ((sector_start_addr & 16711680U) >> 16); ret_val = qla8044_wr_reg_indirect(vha, 1108410380U, reversed_addr); if (ret_val != 0) { ql_log(1U, vha, 45359, "%s: Failed to write to FLASH_WRDATA.\n", "qla8044_erase_flash_sector"); } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410376U, (vha->hw)->fdt_erase_cmd | 16581376U); if (ret_val != 0) { ql_log(1U, vha, 45360, "%s: Failed to write to FLASH_ADDR.\n", "qla8044_erase_flash_sector"); } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410372U, 61U); if (ret_val != 0) { ql_log(1U, vha, 45361, "%s: Failed write to FLASH_CONTROL.\n", "qla8044_erase_flash_sector"); } else { } ret_val = qla8044_poll_flash_status_reg(vha); if (ret_val != 0) { ql_log(1U, vha, 45362, "%s: Poll flash status failed.\n", "qla8044_erase_flash_sector"); } else { } return (ret_val); } } static int qla8044_flash_write_u32(struct scsi_qla_host *vha , uint32_t addr , uint32_t *p_data ) { int ret_val ; { ret_val = 0; ret_val = qla8044_wr_reg_indirect(vha, 1108410376U, (addr >> 2) | 8388608U); if (ret_val != 0) { ql_log(1U, vha, 45364, "%s: Failed write to FLASH_ADDR.\n", "qla8044_flash_write_u32"); goto exit_func; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410380U, *p_data); if (ret_val != 0) { ql_log(1U, vha, 45365, "%s: Failed write to FLASH_WRDATA.\n", "qla8044_flash_write_u32"); goto exit_func; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410372U, 61U); if (ret_val != 0) { ql_log(1U, vha, 45366, "%s: Failed write to FLASH_CONTROL.\n", "qla8044_flash_write_u32"); goto exit_func; } else { } ret_val = qla8044_poll_flash_status_reg(vha); if (ret_val != 0) { ql_log(1U, vha, 45367, "%s: Poll flash status failed.\n", "qla8044_flash_write_u32"); } else { } exit_func: ; return (ret_val); } } static int qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha , uint32_t *dwptr , uint32_t faddr , uint32_t dwords ) { int ret ; uint32_t spi_val ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; { ret = 258; if (dwords <= 1U || dwords > 64U) { ql_dbg(8388608U, vha, 45347, "Got unsupported dwords = 0x%x.\n", dwords); return (258); } else { } qla8044_rd_reg_indirect(vha, 671670292U, & spi_val); qla8044_wr_reg_indirect(vha, 671670292U, spi_val | 4U); qla8044_wr_reg_indirect(vha, 1108410376U, 8388608U); tmp = dwptr; dwptr = dwptr + 1; ret = qla8044_wr_reg_indirect(vha, 1108410380U, *tmp); qla8044_wr_reg_indirect(vha, 1108410372U, 67U); ret = qla8044_poll_flash_status_reg(vha); if (ret != 0) { ql_log(1U, vha, 45348, "%s: Failed.\n", "qla8044_write_flash_buffer_mode"); goto exit_func; } else { } dwords = dwords - 1U; qla8044_wr_reg_indirect(vha, 1108410376U, 8388609U); goto ldv_67011; ldv_67010: tmp___0 = dwptr; dwptr = dwptr + 1; qla8044_wr_reg_indirect(vha, 1108410380U, *tmp___0); qla8044_wr_reg_indirect(vha, 1108410372U, 127U); ret = qla8044_poll_flash_status_reg(vha); if (ret != 0) { ql_log(1U, vha, 45353, "%s: Failed.\n", "qla8044_write_flash_buffer_mode"); goto exit_func; } else { } dwords = dwords - 1U; ldv_67011: ; if (dwords != 1U) { goto ldv_67010; } else { } qla8044_wr_reg_indirect(vha, 1108410376U, (faddr >> 2) | 8388608U); tmp___1 = dwptr; dwptr = dwptr + 1; qla8044_wr_reg_indirect(vha, 1108410380U, *tmp___1); qla8044_wr_reg_indirect(vha, 1108410372U, 125U); ret = qla8044_poll_flash_status_reg(vha); if (ret != 0) { ql_log(1U, vha, 45354, "%s: Failed.\n", "qla8044_write_flash_buffer_mode"); goto exit_func; } else { } qla8044_rd_reg_indirect(vha, 671670288U, & spi_val); if ((spi_val & 4U) != 0U) { ql_log(1U, vha, 45355, "%s: Failed.\n", "qla8044_write_flash_buffer_mode"); spi_val = 0U; qla8044_rd_reg_indirect(vha, 671670292U, & spi_val); qla8044_wr_reg_indirect(vha, 671670292U, spi_val | 4U); } else { } exit_func: ; return (ret); } } static int qla8044_write_flash_dword_mode(scsi_qla_host_t *vha , uint32_t *dwptr , uint32_t faddr , uint32_t dwords ) { int ret ; uint32_t liter ; { ret = 258; liter = 0U; goto ldv_67024; ldv_67023: ret = qla8044_flash_write_u32(vha, faddr, dwptr); if (ret != 0) { ql_dbg(524288U, vha, 45377, "%s: flash address=%x data=%x.\n", "qla8044_write_flash_dword_mode", faddr, *dwptr); goto ldv_67022; } else { } liter = liter + 1U; faddr = faddr + 4U; dwptr = dwptr + 1; ldv_67024: ; if (liter < dwords) { goto ldv_67023; } else { } ldv_67022: ; return (ret); } } int qla8044_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { int rval ; int i ; int burst_iter_count ; int dword_count ; int erase_sec_count ; uint32_t erase_offset ; uint8_t *p_cache ; uint8_t *p_src ; void *tmp ; { rval = 258; erase_offset = offset; tmp = kcalloc((size_t )length, 1UL, 208U); p_cache = (uint8_t *)tmp; if ((unsigned long )p_cache == (unsigned long )((uint8_t *)0U)) { return (258); } else { } memcpy((void *)p_cache, (void const *)buf, (size_t )length); p_src = p_cache; dword_count = (int )(length / 4U); burst_iter_count = dword_count / 64; erase_sec_count = (int )(length / 65536U); scsi_block_requests(vha->host); qla8044_flash_lock(vha); qla8044_unprotect_flash(vha); i = 0; goto ldv_67041; ldv_67040: rval = qla8044_erase_flash_sector(vha, erase_offset); ql_dbg(8388608U, vha, 45368, "Done erase of sector=0x%x.\n", erase_offset); if (rval != 0) { ql_log(1U, vha, 45345, "Failed to erase the sector having address: 0x%x.\n", erase_offset); goto out; } else { } erase_offset = erase_offset + 65536U; i = i + 1; ldv_67041: ; if (i < erase_sec_count) { goto ldv_67040; } else { } ql_dbg(8388608U, vha, 45375, "Got write for addr = 0x%x length=0x%x.\n", offset, length); i = 0; goto ldv_67044; ldv_67043: rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src, offset, 64U); if (rval != 0) { ql_log(1U, vha, 45346, "Failed to write flash in buffer mode, Reverting to slow-write.\n"); rval = qla8044_write_flash_dword_mode(vha, (uint32_t *)p_src, offset, 64U); } else { } p_src = p_src + 256UL; offset = offset + 256U; i = i + 1; ldv_67044: ; if (i < burst_iter_count) { goto ldv_67043; } else { } ql_dbg(8388608U, vha, 45363, "Done writing.\n"); out: qla8044_protect_flash(vha); qla8044_flash_unlock(vha); scsi_unblock_requests(vha->host); kfree((void const *)p_cache); return (rval); } } irqreturn_t qla8044_intr_handler(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_82xx *reg ; int status ; unsigned long flags ; unsigned long iter ; uint32_t stat ; uint16_t mb[4U] ; uint32_t leg_int_ptr ; uint32_t pf_bit ; void *tmp ; int tmp___0 ; long tmp___1 ; raw_spinlock_t *tmp___2 ; unsigned int tmp___3 ; unsigned long tmp___4 ; { status = 0; leg_int_ptr = 0U; rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 45379, "%s(): NULL response queue pointer\n", "qla8044_intr_handler"); return (0); } else { } ha = rsp->hw; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = pci_channel_offline(ha->pdev); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 0L); if (tmp___1 != 0L) { return (1); } else { } leg_int_ptr = qla8044_rd_reg(ha, 14528UL); if ((int )leg_int_ptr >= 0) { ql_dbg(524288U, vha, 45380, "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n", "qla8044_intr_handler"); return (0); } else { } pf_bit = (uint32_t )((int )ha->portnum << 16); if ((leg_int_ptr & 983040U) != pf_bit) { ql_dbg(524288U, vha, 45381, "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n", "qla8044_intr_handler", leg_int_ptr & 983040U, pf_bit); return (0); } else { } qla8044_wr_reg(ha, 14532UL, 0U); ldv_67063: leg_int_ptr = qla8044_rd_reg(ha, 14528UL); if ((leg_int_ptr & 983040U) != pf_bit) { goto ldv_67062; } else { } if ((leg_int_ptr & 1073741824U) != 0U) { goto ldv_67063; } else { } ldv_67062: reg = & (ha->iobase)->isp82; tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); iter = 1UL; goto ldv_67077; ldv_67076: tmp___3 = readl((void const volatile *)(& reg->host_int)); if (tmp___3 != 0U) { stat = readl((void const volatile *)(& reg->host_status)); if ((stat & 32768U) == 0U) { goto ldv_67067; } else { } switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla82xx_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; goto ldv_67072; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((void const volatile *)(& reg->mailbox_out) + 1U); mb[2] = readw((void const volatile *)(& reg->mailbox_out) + 2U); mb[3] = readw((void const volatile *)(& reg->mailbox_out) + 3U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_67072; case 19U: qla24xx_process_response_queue(vha, rsp); goto ldv_67072; default: ql_dbg(524288U, vha, 45382, "Unrecognized interrupt type (%d).\n", stat & 255U); goto ldv_67072; } ldv_67072: ; } else { } writel(0U, (void volatile *)(& reg->host_int)); ldv_67077: tmp___4 = iter; iter = iter - 1UL; if (tmp___4 != 0UL) { goto ldv_67076; } else { } ldv_67067: qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } static int qla8044_idc_dontreset(struct qla_hw_data *ha ) { uint32_t idc_ctrl ; { idc_ctrl = qla8044_rd_reg(ha, 14224UL); return ((int )idc_ctrl & 1); } } static void qla8044_clear_rst_ready(scsi_qla_host_t *vha ) { uint32_t drv_state ; int tmp ; { tmp = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp; drv_state = (uint32_t )(~ (1 << (int )(vha->hw)->portnum)) & drv_state; ql_dbg(524288U, vha, 45373, "drv_state: 0x%08x\n", drv_state); qla8044_wr_direct(vha, 5U, drv_state); return; } } int qla8044_abort_isp(scsi_qla_host_t *vha ) { int rval ; uint32_t dev_state ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = vha->hw; qla8044_idc_lock(ha); tmp = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp; if (ql2xdontresethba != 0) { qla8044_set_idc_dontreset(vha); } else { } if (dev_state == 3U) { tmp___0 = qla8044_idc_dontreset(ha); if (tmp___0 == 1) { ql_dbg(524288U, vha, 45374, "Reset recovery disabled\n"); rval = 258; goto exit_isp_reset; } else { } ql_dbg(524288U, vha, 45376, "HW State: NEED RESET\n"); qla8044_wr_direct(vha, 4U, 4U); } else { } qla83xx_reset_ownership(vha); qla8044_idc_unlock(ha); rval = qla8044_device_state_handler(vha); qla8044_idc_lock(ha); qla8044_clear_rst_ready(vha); exit_isp_reset: qla8044_idc_unlock(ha); if (rval == 0) { ha->flags.isp82xx_fw_hung = 0U; ha->flags.nic_core_reset_hdlr_active = 0U; rval = qla82xx_restart_isp(vha); } else { } return (rval); } } void qla8044_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { struct qla_hw_data *ha ; { ha = vha->hw; if (ha->allow_cna_fw_dump == 0) { return; } else { } scsi_block_requests(vha->host); ha->flags.isp82xx_no_md_cap = 1U; qla8044_idc_lock(ha); qla82xx_set_reset_owner(vha); qla8044_idc_unlock(ha); qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); return; } } void disable_suitable_timer_28(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_28) { ldv_timer_state_28 = 0; return; } else { } return; } } int reg_timer_28(struct timer_list *timer ) { { ldv_timer_list_28 = timer; ldv_timer_state_28 = 1; return (0); } } void choose_timer_28(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_28 = 2; return; } } void activate_pending_timer_28(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_28 == (unsigned long )timer) { if (ldv_timer_state_28 == 2 || pending_flag != 0) { ldv_timer_list_28 = timer; ldv_timer_list_28->data = data; ldv_timer_state_28 = 1; } else { } return; } else { } reg_timer_28(timer); ldv_timer_list_28->data = data; return; } } bool ldv_queue_work_on_279(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_280(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_281(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_282(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_283(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_284(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static int variable_test_bit(long nr , unsigned long const volatile *addr ) { int oldbit ; { __asm__ volatile ("bt %2,%1\n\tsbb %0,%0": "=r" (oldbit): "m" (*((unsigned long *)addr)), "Ir" (nr)); return (oldbit); } } __inline static __u64 __arch_swab64(__u64 val ) { { __asm__ ("bswapq %0": "=r" (val): "0" (val)); return (val); } } __inline static __u64 __fswab64(__u64 val ) { __u64 tmp ; { tmp = __arch_swab64(val); return (tmp); } } __inline static __u32 __le32_to_cpup(__le32 const *p ) { { return ((__u32 )*p); } } extern void dump_stack(void) ; extern void __dynamic_pr_debug(struct _ddebug * , char const * , ...) ; extern void __might_sleep(char const * , int , int ) ; extern int strcasecmp(char const * , char const * ) ; extern void _raw_spin_lock(raw_spinlock_t * ) ; extern void _raw_spin_unlock(raw_spinlock_t * ) ; __inline static void spin_lock(spinlock_t *lock ) { { _raw_spin_lock(& lock->__annonCompField18.rlock); return; } } __inline static void spin_unlock(spinlock_t *lock ) { { _raw_spin_unlock(& lock->__annonCompField18.rlock); return; } } extern void __wake_up(wait_queue_head_t * , unsigned int , int , void * ) ; extern long prepare_to_wait_event(wait_queue_head_t * , wait_queue_t * , int ) ; extern void finish_wait(wait_queue_head_t * , wait_queue_t * ) ; extern void init_timer_key(struct timer_list * , unsigned int , char const * , struct lock_class_key * ) ; extern void delayed_work_timer_fn(unsigned long ) ; void ldv_destroy_workqueue_302(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_queue_work_on_295(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_297(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_296(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_299(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_298(struct workqueue_struct *ldv_func_arg1 ) ; extern bool flush_delayed_work(struct delayed_work * ) ; bool ldv_flush_delayed_work_301(struct delayed_work *ldv_func_arg1 ) ; __inline static bool queue_work___1(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_295(8192, wq, work); return (tmp); } } __inline static bool queue_delayed_work(struct workqueue_struct *wq , struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = ldv_queue_delayed_work_on_296(8192, wq, dwork, delay); return (tmp); } } __inline static bool schedule_work___0(struct work_struct *work ) { bool tmp ; { tmp = queue_work___1(system_wq, work); return (tmp); } } __inline static void flush_scheduled_work(void) { { ldv_flush_workqueue_298(system_wq); return; } } __inline static bool schedule_delayed_work(struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = queue_delayed_work(system_wq, dwork, delay); return (tmp); } } __inline static void kref_get___0(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static void ldv_error(void) { { ERROR: ; __VERIFIER_error(); } } __inline static void ldv__builtin_trap(void) { { ldv_error(); return; } } void call_and_disable_work_10(struct work_struct *work ) ; void invoke_work_8(void) ; void activate_pending_timer_29(struct timer_list *timer , unsigned long data , int pending_flag ) ; void invoke_work_10(void) ; void call_and_disable_all_11(int state ) ; int reg_timer_29(struct timer_list *timer ) ; void call_and_disable_work_7(struct work_struct *work ) ; void call_and_disable_all_9(int state ) ; void call_and_disable_all_12(int state ) ; void call_and_disable_work_8(struct work_struct *work ) ; void invoke_work_11(void) ; void invoke_work_9(void) ; void disable_work_8(struct work_struct *work ) ; void activate_work_9(struct work_struct *work , int state ) ; void invoke_work_12(void) ; void call_and_disable_work_12(struct work_struct *work ) ; void activate_work_11(struct work_struct *work , int state ) ; void disable_work_11(struct work_struct *work ) ; void disable_work_12(struct work_struct *work ) ; void invoke_work_7(void) ; void call_and_disable_all_8(int state ) ; void call_and_disable_work_9(struct work_struct *work ) ; void activate_work_12(struct work_struct *work , int state ) ; void activate_work_8(struct work_struct *work , int state ) ; void disable_work_9(struct work_struct *work ) ; void disable_work_10(struct work_struct *work ) ; void activate_work_10(struct work_struct *work , int state ) ; void call_and_disable_work_11(struct work_struct *work ) ; void choose_timer_29(struct timer_list *timer ) ; void call_and_disable_all_10(int state ) ; void disable_suitable_timer_29(struct timer_list *timer ) ; __inline static struct page *sg_page___3(struct scatterlist *sg ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (123), "i" (12UL)); ldv_31679: ; goto ldv_31679; } else { } tmp___0 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (124), "i" (12UL)); ldv_31680: ; goto ldv_31680; } else { } return ((struct page *)(sg->page_link & 0xfffffffffffffffcUL)); } } __inline static void *sg_virt___2(struct scatterlist *sg ) { struct page *tmp ; void *tmp___0 ; { tmp = sg_page___3(sg); tmp___0 = lowmem_page_address((struct page const *)tmp); return (tmp___0 + (unsigned long )sg->offset); } } __inline static int dma_map_sg_attrs___2(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int i ; int ents ; struct scatterlist *s ; void *tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; { tmp = get_dma_ops(dev); ops = tmp; i = 0; s = sg; goto ldv_35526; ldv_35525: tmp___0 = sg_virt___2(s); kmemcheck_mark_initialized(tmp___0, s->length); i = i + 1; s = sg_next(s); ldv_35526: ; if (i < nents) { goto ldv_35525; } else { } tmp___1 = valid_dma_direction((int )dir); tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (56), "i" (12UL)); ldv_35528: ; goto ldv_35528; } else { } ents = (*(ops->map_sg))(dev, sg, nents, dir, attrs); tmp___3 = ldv__builtin_expect(ents < 0, 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (58), "i" (12UL)); ldv_35529: ; goto ldv_35529; } else { } debug_dma_map_sg(dev, sg, nents, ents, (int )dir); return (ents); } } __inline static void dma_unmap_sg_attrs___1(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (70), "i" (12UL)); ldv_35538: ; goto ldv_35538; } else { } debug_dma_unmap_sg(dev, sg, nents, (int )dir); if ((unsigned long )ops->unmap_sg != (unsigned long )((void (*)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_sg))(dev, sg, nents, dir, attrs); } else { } return; } } __inline static int pci_map_sg(struct pci_dev *hwdev , struct scatterlist *sg , int nents , int direction ) { int tmp ; { tmp = dma_map_sg_attrs___2((unsigned long )hwdev != (unsigned long )((struct pci_dev *)0) ? & hwdev->dev : (struct device *)0, sg, nents, (enum dma_data_direction )direction, (struct dma_attrs *)0); return (tmp); } } __inline static void pci_unmap_sg(struct pci_dev *hwdev , struct scatterlist *sg , int nents , int direction ) { { dma_unmap_sg_attrs___1((unsigned long )hwdev != (unsigned long )((struct pci_dev *)0) ? & hwdev->dev : (struct device *)0, sg, nents, (enum dma_data_direction )direction, (struct dma_attrs *)0); return; } } __inline static u32 get_unaligned_le32(void const *p ) { __u32 tmp ; { tmp = __le32_to_cpup((__le32 const *)p); return (tmp); } } __inline static void put_unaligned_be64(u64 val , void *p ) { __u64 tmp ; { tmp = __fswab64(val); *((__be64 *)p) = tmp; return; } } extern u64 scsilun_to_int(struct scsi_lun * ) ; int ldv_scsi_add_host_with_dma_300(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; extern int percpu_ida_alloc(struct percpu_ida * , int ) ; extern void percpu_ida_free(struct percpu_ida * , unsigned int ) ; int qlt_lport_register(void *target_lport_ptr , u64 phys_wwpn , u64 npiv_wwpn , u64 npiv_wwnn , int (*callback)(struct scsi_qla_host * , void * , u64 , u64 ) ) ; void qlt_lport_deregister(struct scsi_qla_host *vha ) ; void qlt_unreg_sess(struct qla_tgt_sess *sess ) ; __inline static void qla_reverse_ini_mode(struct scsi_qla_host *ha ) { { if ((int )(ha->host)->active_mode & 1) { (ha->host)->active_mode = (unsigned int )(ha->host)->active_mode & 2U; } else { (ha->host)->active_mode = (unsigned char )((unsigned int )(ha->host)->active_mode | 1U); } return; } } int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd ) ; int qlt_xmit_response(struct qla_tgt_cmd *cmd , int xmit_type , uint8_t scsi_status ) ; void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd ) ; void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd ) ; void qlt_free_cmd(struct qla_tgt_cmd *cmd ) ; void qlt_enable_vha(struct scsi_qla_host *vha ) ; int qlt_stop_phase1(struct qla_tgt *tgt ) ; void qlt_stop_phase2(struct qla_tgt *tgt ) ; int qlt_free_qfull_cmds(struct scsi_qla_host *vha ) ; __inline static void qla2x00_clean_dsd_pool___0(struct qla_hw_data *ha , srb_t *sp , struct qla_tgt_cmd *tc ) { struct dsd_dma *dsd_ptr ; struct dsd_dma *tdsd_ptr ; struct crc_context *ctx ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { ctx = (struct crc_context *)sp->u.scmd.ctx; } else if ((unsigned long )tc != (unsigned long )((struct qla_tgt_cmd *)0)) { ctx = tc->ctx; } else { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/scsi/qla2xxx/qla_inline.h"), "i" (143), "i" (12UL)); ldv_66028: ; goto ldv_66028; return; } __mptr = (struct list_head const *)ctx->dsd_list.next; dsd_ptr = (struct dsd_dma *)__mptr; __mptr___0 = (struct list_head const *)dsd_ptr->list.next; tdsd_ptr = (struct dsd_dma *)__mptr___0; goto ldv_66036; ldv_66035: dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); list_del(& dsd_ptr->list); kfree((void const *)dsd_ptr); dsd_ptr = tdsd_ptr; __mptr___1 = (struct list_head const *)tdsd_ptr->list.next; tdsd_ptr = (struct dsd_dma *)__mptr___1; ldv_66036: ; if ((unsigned long )(& dsd_ptr->list) != (unsigned long )(& ctx->dsd_list)) { goto ldv_66035; } else { } INIT_LIST_HEAD(& ctx->dsd_list); return; } } static int ql2xtgt_tape_enable ; static char *qlini_mode = (char *)"enabled"; int ql2x_ini_mode = 0; static int temp_sam_status = 8; static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha , struct atio_from_isp *atio ) ; static void qlt_response_pkt(struct scsi_qla_host *vha , response_t *pkt ) ; static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess , uint32_t lun , int fn , void *iocb , int flags ) ; static void qlt_send_term_exchange(struct scsi_qla_host *vha , struct qla_tgt_cmd *cmd , struct atio_from_isp *atio , int ha_locked ) ; static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha , struct qla_tgt_srr_imm *imm , int ha_locked ) ; static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha , struct qla_tgt_cmd *cmd ) ; static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha , struct atio_from_isp *atio , uint16_t status , int qfull ) ; static void qlt_disable_vha(struct scsi_qla_host *vha ) ; static struct kmem_cache *qla_tgt_mgmt_cmd_cachep ; static mempool_t *qla_tgt_mgmt_cmd_mempool ; static struct workqueue_struct *qla_tgt_wq ; static struct mutex qla_tgt_mutex = {{1}, {{{{{0}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "qla_tgt_mutex.wait_lock", 0, 0UL}}}}, {& qla_tgt_mutex.wait_list, & qla_tgt_mutex.wait_list}, 0, (void *)(& qla_tgt_mutex), {0, {0, 0}, "qla_tgt_mutex", 0, 0UL}}; static struct list_head qla_tgt_glist = {& qla_tgt_glist, & qla_tgt_glist}; static struct qla_tgt_sess *qlt_find_sess_by_port_name(struct qla_tgt *tgt , uint8_t const *port_name ) { struct qla_tgt_sess *sess ; struct list_head const *__mptr ; int tmp ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)tgt->sess_list.next; sess = (struct qla_tgt_sess *)__mptr + 0xffffffffffffffd8UL; goto ldv_66182; ldv_66181: tmp = memcmp((void const *)(& sess->port_name), (void const *)port_name, 8UL); if (tmp == 0) { return (sess); } else { } __mptr___0 = (struct list_head const *)sess->sess_list_entry.next; sess = (struct qla_tgt_sess *)__mptr___0 + 0xffffffffffffffd8UL; ldv_66182: ; if ((unsigned long )(& sess->sess_list_entry) != (unsigned long )(& tgt->sess_list)) { goto ldv_66181; } else { } return ((struct qla_tgt_sess *)0); } } __inline static int qlt_issue_marker(struct scsi_qla_host *vha , int vha_locked ) { int rc ; int tmp ; long tmp___0 ; { tmp___0 = ldv__builtin_expect((unsigned int )vha->marker_needed != 0U, 0L); if (tmp___0 != 0L) { tmp = qla2x00_issue_marker(vha, vha_locked); rc = tmp; if (rc != 0) { ql_dbg(16384U, vha, 57405, "qla_target(%d): issue_marker() failed\n", (int )vha->vp_idx); } else { } return (rc); } else { } return (0); } } __inline static struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha , uint8_t *d_id ) { struct qla_hw_data *ha ; uint8_t vp_idx ; long tmp ; int tmp___0 ; long tmp___1 ; { ha = vha->hw; if ((int )vha->d_id.b.area != (int )*(d_id + 1UL) || (int )vha->d_id.b.domain != (int )*d_id) { return ((struct scsi_qla_host *)0); } else { } if ((int )vha->d_id.b.al_pa == (int )*(d_id + 2UL)) { return (vha); } else { } tmp = ldv__builtin_expect((unsigned long )ha->tgt.tgt_vp_map == (unsigned long )((struct qla_tgt_vp_map *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"), "i" (169), "i" (12UL)); ldv_66195: ; goto ldv_66195; } else { } vp_idx = (ha->tgt.tgt_vp_map + (unsigned long )*(d_id + 2UL))->idx; tmp___0 = variable_test_bit((long )vp_idx, (unsigned long const volatile *)(& ha->vp_idx_map)); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 1L); if (tmp___1 != 0L) { return ((ha->tgt.tgt_vp_map + (unsigned long )vp_idx)->vha); } else { } return ((struct scsi_qla_host *)0); } } __inline static struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha , uint16_t vp_idx ) { struct qla_hw_data *ha ; long tmp ; int tmp___0 ; long tmp___1 ; { ha = vha->hw; if ((int )vha->vp_idx == (int )vp_idx) { return (vha); } else { } tmp = ldv__builtin_expect((unsigned long )ha->tgt.tgt_vp_map == (unsigned long )((struct qla_tgt_vp_map *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"), "i" (186), "i" (12UL)); ldv_66201: ; goto ldv_66201; } else { } tmp___0 = variable_test_bit((long )vp_idx, (unsigned long const volatile *)(& ha->vp_idx_map)); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 1L); if (tmp___1 != 0L) { return ((ha->tgt.tgt_vp_map + (unsigned long )vp_idx)->vha); } else { } return ((struct scsi_qla_host *)0); } } __inline static void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha ) { unsigned long flags ; raw_spinlock_t *tmp ; { tmp = spinlock_check(& (vha->hw)->tgt.q_full_lock); flags = _raw_spin_lock_irqsave(tmp); (vha->hw)->tgt.num_pend_cmds = (vha->hw)->tgt.num_pend_cmds + 1U; if ((vha->hw)->tgt.num_pend_cmds > (vha->hw)->qla_stats.stat_max_pend_cmds) { (vha->hw)->qla_stats.stat_max_pend_cmds = (vha->hw)->tgt.num_pend_cmds; } else { } spin_unlock_irqrestore(& (vha->hw)->tgt.q_full_lock, flags); return; } } __inline static void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha ) { unsigned long flags ; raw_spinlock_t *tmp ; { tmp = spinlock_check(& (vha->hw)->tgt.q_full_lock); flags = _raw_spin_lock_irqsave(tmp); (vha->hw)->tgt.num_pend_cmds = (vha->hw)->tgt.num_pend_cmds - 1U; spin_unlock_irqrestore(& (vha->hw)->tgt.q_full_lock, flags); return; } } static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha , struct atio_from_isp *atio ) { __u16 tmp ; struct scsi_qla_host *host ; struct scsi_qla_host *tmp___0 ; long tmp___1 ; struct scsi_qla_host *host___0 ; struct imm_ntfy_from_isp *entry ; long tmp___2 ; { tmp = __fswab16((int )atio->u.isp24.fcp_hdr.ox_id); ql_dbg(16384U, vha, 57458, "%s: qla_target(%d): type %x ox_id %04x\n", "qlt_24xx_atio_pkt_all_vps", (int )vha->vp_idx, (int )atio->u.raw.entry_type, (int )tmp); switch ((int )atio->u.raw.entry_type) { case 6: tmp___0 = qlt_find_host_by_d_id(vha, (uint8_t *)(& atio->u.isp24.fcp_hdr.d_id)); host = tmp___0; tmp___1 = ldv__builtin_expect((unsigned long )host == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___1 != 0L) { ql_dbg(16384U, vha, 57406, "qla_target(%d): Received ATIO_TYPE7 with unknown d_id %x:%x:%x\n", (int )vha->vp_idx, (int )atio->u.isp24.fcp_hdr.d_id[0], (int )atio->u.isp24.fcp_hdr.d_id[1], (int )atio->u.isp24.fcp_hdr.d_id[2]); goto ldv_66223; } else { } qlt_24xx_atio_pkt(host, atio); goto ldv_66223; case 13: host___0 = vha; entry = (struct imm_ntfy_from_isp *)atio; if ((unsigned int )entry->u.isp24.vp_index != 255U && (unsigned int )entry->u.isp24.nport_handle != 65535U) { host___0 = qlt_find_host_by_vp_idx(vha, (int )entry->u.isp24.vp_index); tmp___2 = ldv__builtin_expect((unsigned long )host___0 == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___2 != 0L) { ql_dbg(16384U, vha, 57407, "qla_target(%d): Received ATIO (IMMED_NOTIFY_TYPE) with unknown vp_index %d\n", (int )vha->vp_idx, (int )entry->u.isp24.vp_index); goto ldv_66223; } else { } } else { } qlt_24xx_atio_pkt(host___0, atio); goto ldv_66223; default: ql_dbg(16384U, vha, 57408, "qla_target(%d): Received unknown ATIO atio type %x\n", (int )vha->vp_idx, (int )atio->u.raw.entry_type); goto ldv_66223; } ldv_66223: ; return; } } void qlt_response_pkt_all_vps(struct scsi_qla_host *vha , response_t *pkt ) { struct ctio7_from_24xx *entry ; struct scsi_qla_host *host ; struct scsi_qla_host *tmp ; long tmp___0 ; struct scsi_qla_host *host___0 ; struct imm_ntfy_from_isp *entry___0 ; long tmp___1 ; struct scsi_qla_host *host___1 ; struct nack_to_isp *entry___1 ; long tmp___2 ; struct abts_recv_from_24xx *entry___2 ; struct scsi_qla_host *host___2 ; struct scsi_qla_host *tmp___3 ; long tmp___4 ; struct abts_resp_to_24xx *entry___3 ; struct scsi_qla_host *host___3 ; struct scsi_qla_host *tmp___5 ; long tmp___6 ; { switch ((int )pkt->entry_type) { case 122: ql_dbg(16384U, vha, 57459, "qla_target(%d):%s: CRC2 Response pkt\n", (int )vha->vp_idx, "qlt_response_pkt_all_vps"); case 18: entry = (struct ctio7_from_24xx *)pkt; tmp = qlt_find_host_by_vp_idx(vha, (int )entry->vp_index); host = tmp; tmp___0 = ldv__builtin_expect((unsigned long )host == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___0 != 0L) { ql_dbg(16384U, vha, 57409, "qla_target(%d): Response pkt (CTIO_TYPE7) received, with unknown vp_index %d\n", (int )vha->vp_idx, (int )entry->vp_index); goto ldv_66237; } else { } qlt_response_pkt(host, pkt); goto ldv_66237; case 13: host___0 = vha; entry___0 = (struct imm_ntfy_from_isp *)pkt; host___0 = qlt_find_host_by_vp_idx(vha, (int )entry___0->u.isp24.vp_index); tmp___1 = ldv__builtin_expect((unsigned long )host___0 == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___1 != 0L) { ql_dbg(16384U, vha, 57410, "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) received, with unknown vp_index %d\n", (int )vha->vp_idx, (int )entry___0->u.isp24.vp_index); goto ldv_66237; } else { } qlt_response_pkt(host___0, pkt); goto ldv_66237; case 14: host___1 = vha; entry___1 = (struct nack_to_isp *)pkt; if ((unsigned int )entry___1->u.isp24.vp_index != 255U) { host___1 = qlt_find_host_by_vp_idx(vha, (int )entry___1->u.isp24.vp_index); tmp___2 = ldv__builtin_expect((unsigned long )host___1 == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___2 != 0L) { ql_dbg(16384U, vha, 57411, "qla_target(%d): Response pkt (NOTIFY_ACK_TYPE) received, with unknown vp_index %d\n", (int )vha->vp_idx, (int )entry___1->u.isp24.vp_index); goto ldv_66237; } else { } } else { } qlt_response_pkt(host___1, pkt); goto ldv_66237; case 84: entry___2 = (struct abts_recv_from_24xx *)pkt; tmp___3 = qlt_find_host_by_vp_idx(vha, (int )entry___2->vp_index); host___2 = tmp___3; tmp___4 = ldv__builtin_expect((unsigned long )host___2 == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___4 != 0L) { ql_dbg(16384U, vha, 57412, "qla_target(%d): Response pkt (ABTS_RECV_24XX) received, with unknown vp_index %d\n", (int )vha->vp_idx, (int )entry___2->vp_index); goto ldv_66237; } else { } qlt_response_pkt(host___2, pkt); goto ldv_66237; case 85: entry___3 = (struct abts_resp_to_24xx *)pkt; tmp___5 = qlt_find_host_by_vp_idx(vha, (int )entry___3->vp_index); host___3 = tmp___5; tmp___6 = ldv__builtin_expect((unsigned long )host___3 == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___6 != 0L) { ql_dbg(16384U, vha, 57413, "qla_target(%d): Response pkt (ABTS_RECV_24XX) received, with unknown vp_index %d\n", (int )vha->vp_idx, (int )entry___3->vp_index); goto ldv_66237; } else { } qlt_response_pkt(host___3, pkt); goto ldv_66237; default: qlt_response_pkt(vha, pkt); goto ldv_66237; } ldv_66237: ; return; } } static void qlt_free_session_done(struct work_struct *work ) { struct qla_tgt_sess *sess ; struct work_struct const *__mptr ; struct qla_tgt *tgt ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; long tmp ; { __mptr = (struct work_struct const *)work; sess = (struct qla_tgt_sess *)__mptr + 0xffffffffffffffa8UL; tgt = sess->tgt; vha = sess->vha; ha = vha->hw; tmp = ldv__builtin_expect((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"), "i" (385), "i" (12UL)); ldv_66260: ; goto ldv_66260; } else { } if ((unsigned long )sess->se_sess != (unsigned long )((struct se_session *)0)) { (*((ha->tgt.tgt_ops)->free_session))(sess); } else { } ql_dbg(8192U, vha, 61441, "Unregistration of sess %p finished\n", sess); kfree((void const *)sess); tgt->sess_count = tgt->sess_count - 1; if (tgt->sess_count == 0) { __wake_up(& tgt->waitQ, 3U, 0, (void *)0); } else { } return; } } void qlt_unreg_sess(struct qla_tgt_sess *sess ) { struct scsi_qla_host *vha ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; { vha = sess->vha; (*(((vha->hw)->tgt.tgt_ops)->clear_nacl_from_fcport_map))(sess); list_del(& sess->sess_list_entry); if ((unsigned int )*((unsigned char *)sess + 8UL) != 0U) { list_del(& sess->del_list_entry); } else { } __init_work(& sess->free_work, 0); __constr_expr_0.counter = 137438953408L; sess->free_work.data = __constr_expr_0; lockdep_init_map(& sess->free_work.lockdep_map, "(&sess->free_work)", & __key, 0); INIT_LIST_HEAD(& sess->free_work.entry); sess->free_work.func = & qlt_free_session_done; schedule_work___0(& sess->free_work); return; } } static char const __kstrtab_qlt_unreg_sess[15U] = { 'q', 'l', 't', '_', 'u', 'n', 'r', 'e', 'g', '_', 's', 'e', 's', 's', '\000'}; struct kernel_symbol const __ksymtab_qlt_unreg_sess ; struct kernel_symbol const __ksymtab_qlt_unreg_sess = {(unsigned long )(& qlt_unreg_sess), (char const *)(& __kstrtab_qlt_unreg_sess)}; static int qlt_reset(struct scsi_qla_host *vha , void *iocb , int mcmd ) { struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; uint32_t unpacked_lun ; uint32_t lun ; uint16_t loop_id ; int res ; struct imm_ntfy_from_isp *n ; struct atio_from_isp *a ; u64 tmp ; int tmp___0 ; { ha = vha->hw; sess = (struct qla_tgt_sess *)0; lun = 0U; res = 0; n = (struct imm_ntfy_from_isp *)iocb; a = (struct atio_from_isp *)iocb; loop_id = n->u.isp24.nport_handle; if ((unsigned int )loop_id == 65535U) { } else { sess = (*((ha->tgt.tgt_ops)->find_sess_by_loop_id))(vha, (int )loop_id); } ql_dbg(16384U, vha, 57344, "Using sess for qla_tgt_reset: %p\n", sess); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { res = -3; return (res); } else { } ql_dbg(16384U, vha, 57415, "scsi(%ld): resetting (session %p from port %8phC mcmd %x, loop_id %d)\n", vha->host_no, sess, (uint8_t *)(& sess->port_name), mcmd, (int )loop_id); lun = (uint32_t )a->u.isp24.fcp_cmnd.lun; tmp = scsilun_to_int((struct scsi_lun *)(& lun)); unpacked_lun = (uint32_t )tmp; tmp___0 = qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, iocb, 1); return (tmp___0); } } static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess , bool immediate ) { struct qla_tgt *tgt ; uint32_t dev_loss_tmo ; { tgt = sess->tgt; dev_loss_tmo = (uint32_t )((tgt->ha)->port_down_retry_count + 5); if ((unsigned int )*((unsigned char *)sess + 8UL) != 0U) { return; } else { } ql_dbg(16384U, sess->vha, 57345, "Scheduling sess %p for deletion\n", sess); list_add_tail(& sess->del_list_entry, & tgt->del_sess_list); sess->deleted = 1U; if ((int )immediate) { dev_loss_tmo = 0U; } else { } sess->expires = (unsigned long )(dev_loss_tmo * 250U) + (unsigned long )jiffies; ql_dbg(16384U, sess->vha, 57416, "qla_target(%d): session for port %8phC (loop ID %d) scheduled for deletion in %u secs (expires: %lu) immed: %d\n", (int )(sess->vha)->vp_idx, (uint8_t *)(& sess->port_name), (int )sess->loop_id, dev_loss_tmo, sess->expires, (int )immediate); if ((int )immediate) { schedule_delayed_work(& tgt->sess_del_work, 0UL); } else { schedule_delayed_work(& tgt->sess_del_work, sess->expires - (unsigned long )jiffies); } return; } } static void qlt_clear_tgt_db(struct qla_tgt *tgt ) { struct qla_tgt_sess *sess ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)tgt->sess_list.next; sess = (struct qla_tgt_sess *)__mptr + 0xffffffffffffffd8UL; goto ldv_66302; ldv_66301: qlt_schedule_sess_for_deletion(sess, 1); __mptr___0 = (struct list_head const *)sess->sess_list_entry.next; sess = (struct qla_tgt_sess *)__mptr___0 + 0xffffffffffffffd8UL; ldv_66302: ; if ((unsigned long )(& sess->sess_list_entry) != (unsigned long )(& tgt->sess_list)) { goto ldv_66301; } else { } return; } } static int qla24xx_get_loop_id(struct scsi_qla_host *vha , uint8_t const *s_id , uint16_t *loop_id ) { struct qla_hw_data *ha ; dma_addr_t gid_list_dma ; struct gid_list_info *gid_list ; char *id_iter ; int res ; int rc ; int i ; uint16_t entries ; int tmp ; void *tmp___0 ; int tmp___1 ; struct gid_list_info *gid ; int tmp___2 ; { ha = vha->hw; tmp = qla2x00_gid_list_size(ha); tmp___0 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )tmp, & gid_list_dma, 208U, (struct dma_attrs *)0); gid_list = (struct gid_list_info *)tmp___0; if ((unsigned long )gid_list == (unsigned long )((struct gid_list_info *)0)) { tmp___1 = qla2x00_gid_list_size(ha); ql_dbg(8192U, vha, 61508, "qla_target(%d): DMA Alloc failed of %u\n", (int )vha->vp_idx, tmp___1); return (-12); } else { } rc = qla2x00_get_id_list(vha, (void *)gid_list, gid_list_dma, & entries); if (rc != 0) { ql_dbg(8192U, vha, 61509, "qla_target(%d): get_id_list() failed: %x\n", (int )vha->vp_idx, rc); res = -1; goto out_free_id_list; } else { } id_iter = (char *)gid_list; res = -1; i = 0; goto ldv_66321; ldv_66320: gid = (struct gid_list_info *)id_iter; if (((int )gid->al_pa == (int )((unsigned char )*(s_id + 2UL)) && (int )gid->area == (int )((unsigned char )*(s_id + 1UL))) && (int )gid->domain == (int )((unsigned char )*s_id)) { *loop_id = gid->loop_id; res = 0; goto ldv_66319; } else { } id_iter = id_iter + (unsigned long )ha->gid_list_info_size; i = i + 1; ldv_66321: ; if ((int )entries > i) { goto ldv_66320; } else { } ldv_66319: ; out_free_id_list: tmp___2 = qla2x00_gid_list_size(ha); dma_free_attrs(& (ha->pdev)->dev, (size_t )tmp___2, (void *)gid_list, gid_list_dma, (struct dma_attrs *)0); return (res); } } static void qlt_undelete_sess(struct qla_tgt_sess *sess ) { long tmp ; { tmp = ldv__builtin_expect((unsigned int )*((unsigned char *)sess + 8UL) == 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"), "i" (581), "i" (12UL)); ldv_66325: ; goto ldv_66325; } else { } list_del(& sess->del_list_entry); sess->deleted = 0U; return; } } static void qlt_del_sess_work_fn(struct delayed_work *work ) { struct qla_tgt *tgt ; struct delayed_work const *__mptr ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; unsigned long flags ; unsigned long elapsed ; raw_spinlock_t *tmp ; struct list_head const *__mptr___0 ; int tmp___0 ; { __mptr = (struct delayed_work const *)work; tgt = (struct qla_tgt *)__mptr + 0xffffffffffffffb0UL; vha = tgt->vha; ha = vha->hw; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); goto ldv_66350; ldv_66349: __mptr___0 = (struct list_head const *)tgt->del_sess_list.next; sess = (struct qla_tgt_sess *)__mptr___0 + 0xffffffffffffffc0UL; elapsed = jiffies; if ((long )(elapsed - sess->expires) >= 0L) { qlt_undelete_sess(sess); ql_dbg(8192U, vha, 61444, "Timeout: sess %p about to be deleted\n", sess); (*((ha->tgt.tgt_ops)->shutdown_sess))(sess); (*((ha->tgt.tgt_ops)->put_sess))(sess); } else { schedule_delayed_work(& tgt->sess_del_work, sess->expires - elapsed); goto ldv_66348; } ldv_66350: tmp___0 = list_empty((struct list_head const *)(& tgt->del_sess_list)); if (tmp___0 == 0) { goto ldv_66349; } else { } ldv_66348: spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static struct qla_tgt_sess *qlt_create_sess(struct scsi_qla_host *vha , fc_port_t *fcport , bool local ) { struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; unsigned long flags ; unsigned char be_sid[3U] ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; int tmp___0 ; struct list_head const *__mptr___0 ; void *tmp___1 ; int tmp___2 ; raw_spinlock_t *tmp___3 ; { ha = vha->hw; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)(vha->vha_tgt.qla_tgt)->sess_list.next; sess = (struct qla_tgt_sess *)__mptr + 0xffffffffffffffd8UL; goto ldv_66368; ldv_66367: tmp___0 = memcmp((void const *)(& sess->port_name), (void const *)(& fcport->port_name), 8UL); if (tmp___0 == 0) { ql_dbg(8192U, vha, 61445, "Double sess %p found (s_id %x:%x:%x, loop_id %d), updating to d_id %x:%x:%x, loop_id %d", sess, (int )sess->s_id.b.domain, (int )sess->s_id.b.al_pa, (int )sess->s_id.b.area, (int )sess->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.al_pa, (int )fcport->d_id.b.area, (int )fcport->loop_id); if ((unsigned int )*((unsigned char *)sess + 8UL) != 0U) { qlt_undelete_sess(sess); } else { } kref_get___0(& (sess->se_sess)->sess_kref); (*((ha->tgt.tgt_ops)->update_sess))(sess, fcport->d_id, (int )fcport->loop_id, (fcport->flags & 16U) != 0U); if ((unsigned int )*((unsigned char *)sess + 8UL) != 0U && ! local) { sess->local = 0U; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (sess); } else { } __mptr___0 = (struct list_head const *)sess->sess_list_entry.next; sess = (struct qla_tgt_sess *)__mptr___0 + 0xffffffffffffffd8UL; ldv_66368: ; if ((unsigned long )(& sess->sess_list_entry) != (unsigned long )(& (vha->vha_tgt.qla_tgt)->sess_list)) { goto ldv_66367; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___1 = kzalloc(168UL, 208U); sess = (struct qla_tgt_sess *)tmp___1; if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { ql_dbg(8192U, vha, 61514, "qla_target(%u): session allocation failed, all commands from port %8phC will be refused", (int )vha->vp_idx, (uint8_t *)(& fcport->port_name)); return ((struct qla_tgt_sess *)0); } else { } sess->tgt = vha->vha_tgt.qla_tgt; sess->vha = vha; sess->s_id = fcport->d_id; sess->loop_id = fcport->loop_id; sess->local = (unsigned char )local; ql_dbg(8192U, vha, 61446, "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", sess, vha->vha_tgt.qla_tgt); be_sid[0] = sess->s_id.b.domain; be_sid[1] = sess->s_id.b.area; be_sid[2] = sess->s_id.b.al_pa; tmp___2 = (*((ha->tgt.tgt_ops)->check_initiator_node_acl))(vha, (unsigned char *)(& fcport->port_name), (void *)sess, (uint8_t *)(& be_sid), (int )fcport->loop_id); if (tmp___2 < 0) { kfree((void const *)sess); return ((struct qla_tgt_sess *)0); } else { } kref_get___0(& (sess->se_sess)->sess_kref); sess->conf_compl_supported = 0U; memcpy((void *)(& sess->port_name), (void const *)(& fcport->port_name), 8UL); tmp___3 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___3); list_add_tail(& sess->sess_list_entry, & (vha->vha_tgt.qla_tgt)->sess_list); (vha->vha_tgt.qla_tgt)->sess_count = (vha->vha_tgt.qla_tgt)->sess_count + 1; spin_unlock_irqrestore(& ha->hardware_lock, flags); ql_dbg(8192U, vha, 61515, "qla_target(%d): %ssession for wwn %8phC (loop_id %d, s_id %x:%x:%x, confirmed completion %ssupported) added\n", (int )vha->vp_idx, (int )local ? (char *)"local " : (char *)"", (uint8_t *)(& fcport->port_name), (int )fcport->loop_id, (int )sess->s_id.b.domain, (int )sess->s_id.b.area, (int )sess->s_id.b.al_pa, (unsigned int )*((unsigned char *)sess + 8UL) != 0U ? (char *)"" : (char *)"not "); return (sess); } } void qlt_fc_port_added(struct scsi_qla_host *vha , fc_port_t *fcport ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_sess *sess ; unsigned long flags ; bool tmp ; raw_spinlock_t *tmp___0 ; raw_spinlock_t *tmp___1 ; { ha = vha->hw; tgt = vha->vha_tgt.qla_tgt; if ((unsigned long )(vha->hw)->tgt.tgt_ops == (unsigned long )((struct qla_tgt_func_tmpl *)0)) { return; } else { } if ((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0) || (unsigned int )fcport->port_type != 4U) { return; } else { } tmp = qla_ini_mode_enabled(vha); if ((int )tmp) { return; } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); if (tgt->tgt_stop != 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } else { } sess = qlt_find_sess_by_port_name(tgt, (uint8_t const *)(& fcport->port_name)); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { spin_unlock_irqrestore(& ha->hardware_lock, flags); mutex_lock_nested(& vha->vha_tgt.tgt_mutex, 0U); sess = qlt_create_sess(vha, fcport, 0); mutex_unlock(& vha->vha_tgt.tgt_mutex); tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); } else { kref_get___0(& (sess->se_sess)->sess_kref); if ((unsigned int )*((unsigned char *)sess + 8UL) != 0U) { qlt_undelete_sess(sess); ql_dbg(8192U, vha, 61516, "qla_target(%u): %ssession for port %8phC (loop ID %d) reappeared\n", (int )vha->vp_idx, (unsigned int )*((unsigned char *)sess + 8UL) != 0U ? (char *)"local " : (char *)"", (uint8_t *)(& sess->port_name), (int )sess->loop_id); ql_dbg(8192U, vha, 61447, "Reappeared sess %p\n", sess); } else { } (*((ha->tgt.tgt_ops)->update_sess))(sess, fcport->d_id, (int )fcport->loop_id, (fcport->flags & 16U) != 0U); } if ((unsigned long )sess != (unsigned long )((struct qla_tgt_sess *)0) && (unsigned int )*((unsigned char *)sess + 8UL) != 0U) { ql_dbg(8192U, vha, 61517, "qla_target(%u): local session for port %8phC (loop ID %d) became global\n", (int )vha->vp_idx, (uint8_t *)(& fcport->port_name), (int )sess->loop_id); sess->local = 0U; } else { } (*((ha->tgt.tgt_ops)->put_sess))(sess); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } void qlt_fc_port_deleted(struct scsi_qla_host *vha , fc_port_t *fcport ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_sess *sess ; unsigned long flags ; raw_spinlock_t *tmp ; { ha = vha->hw; tgt = vha->vha_tgt.qla_tgt; if ((unsigned long )(vha->hw)->tgt.tgt_ops == (unsigned long )((struct qla_tgt_func_tmpl *)0)) { return; } else { } if ((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0) || (unsigned int )fcport->port_type != 4U) { return; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if (tgt->tgt_stop != 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } else { } sess = qlt_find_sess_by_port_name(tgt, (uint8_t const *)(& fcport->port_name)); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } else { } ql_dbg(8192U, vha, 61448, "qla_tgt_fc_port_deleted %p", sess); sess->local = 1U; qlt_schedule_sess_for_deletion(sess, 0); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } __inline static int test_tgt_sess_count(struct qla_tgt *tgt ) { struct qla_hw_data *ha ; unsigned long flags ; int res ; raw_spinlock_t *tmp ; int tmp___0 ; { ha = tgt->ha; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = list_empty((struct list_head const *)(& tgt->sess_list)); ql_dbg(16384U, tgt->vha, 57346, "tgt %p, empty(sess_list)=%d sess_count=%d\n", tgt, tmp___0, tgt->sess_count); res = tgt->sess_count == 0; spin_unlock_irqrestore(& ha->hardware_lock, flags); return (res); } } int qlt_stop_phase1(struct qla_tgt *tgt ) { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; unsigned long flags ; struct Scsi_Host *sh ; struct fc_host_attrs *fc_host ; bool npiv_vports ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; raw_spinlock_t *tmp___1 ; raw_spinlock_t *tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; wait_queue_t __wait ; long __ret ; long __int ; long tmp___6 ; int tmp___7 ; bool tmp___8 ; int tmp___9 ; wait_queue_t __wait___0 ; long __ret___0 ; long __int___0 ; long tmp___10 ; int tmp___11 ; { vha = tgt->vha; ha = tgt->ha; mutex_lock_nested(& qla_tgt_mutex, 0U); if ((unsigned long )vha->fc_vport == (unsigned long )((struct fc_vport *)0)) { sh = vha->host; fc_host = (struct fc_host_attrs *)sh->shost_data; tmp = spinlock_check(sh->host_lock); flags = _raw_spin_lock_irqsave(tmp); npiv_vports = (unsigned int )fc_host->npiv_vports_inuse != 0U; spin_unlock_irqrestore(sh->host_lock, flags); if ((int )npiv_vports) { mutex_unlock(& qla_tgt_mutex); return (-1); } else { } } else { } if (tgt->tgt_stop != 0 || tgt->tgt_stopped != 0) { ql_dbg(8192U, vha, 61518, "Already in tgt->tgt_stop or tgt_stopped state\n"); mutex_unlock(& qla_tgt_mutex); return (-1); } else { } ql_dbg(16384U, vha, 57347, "Stopping target for host %ld(%p)\n", vha->host_no, vha); mutex_lock_nested(& vha->vha_tgt.tgt_mutex, 0U); tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); tgt->tgt_stop = 1; qlt_clear_tgt_db(tgt); spin_unlock_irqrestore(& ha->hardware_lock, flags); mutex_unlock(& vha->vha_tgt.tgt_mutex); mutex_unlock(& qla_tgt_mutex); ldv_flush_delayed_work_301(& tgt->sess_del_work); ql_dbg(8192U, vha, 61449, "Waiting for sess works (tgt %p)", tgt); tmp___1 = spinlock_check(& tgt->sess_work_lock); flags = _raw_spin_lock_irqsave(tmp___1); goto ldv_66429; ldv_66428: spin_unlock_irqrestore(& tgt->sess_work_lock, flags); flush_scheduled_work(); tmp___2 = spinlock_check(& tgt->sess_work_lock); flags = _raw_spin_lock_irqsave(tmp___2); ldv_66429: tmp___3 = list_empty((struct list_head const *)(& tgt->sess_works_list)); if (tmp___3 == 0) { goto ldv_66428; } else { } spin_unlock_irqrestore(& tgt->sess_work_lock, flags); tmp___4 = list_empty((struct list_head const *)(& tgt->sess_list)); ql_dbg(8192U, vha, 61450, "Waiting for tgt %p: list_empty(sess_list)=%d sess_count=%d\n", tgt, tmp___4, tgt->sess_count); __might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c", 893, 0); tmp___5 = test_tgt_sess_count(tgt); if (tmp___5 != 0) { goto ldv_66431; } else { } __ret = 0L; INIT_LIST_HEAD(& __wait.task_list); __wait.flags = 0U; ldv_66437: tmp___6 = prepare_to_wait_event(& tgt->waitQ, & __wait, 2); __int = tmp___6; tmp___7 = test_tgt_sess_count(tgt); if (tmp___7 != 0) { goto ldv_66436; } else { } schedule(); goto ldv_66437; ldv_66436: finish_wait(& tgt->waitQ, & __wait); ldv_66431: ; if (*((unsigned long *)ha + 2UL) == 0UL) { tmp___8 = qla_tgt_mode_enabled(vha); if ((int )tmp___8) { qlt_disable_vha(vha); } else { } } else { } __might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c", 900, 0); tmp___9 = test_tgt_sess_count(tgt); if (tmp___9 != 0) { goto ldv_66439; } else { } __ret___0 = 0L; INIT_LIST_HEAD(& __wait___0.task_list); __wait___0.flags = 0U; ldv_66445: tmp___10 = prepare_to_wait_event(& tgt->waitQ, & __wait___0, 2); __int___0 = tmp___10; tmp___11 = test_tgt_sess_count(tgt); if (tmp___11 != 0) { goto ldv_66444; } else { } schedule(); goto ldv_66445; ldv_66444: finish_wait(& tgt->waitQ, & __wait___0); ldv_66439: ; return (0); } } static char const __kstrtab_qlt_stop_phase1[16U] = { 'q', 'l', 't', '_', 's', 't', 'o', 'p', '_', 'p', 'h', 'a', 's', 'e', '1', '\000'}; struct kernel_symbol const __ksymtab_qlt_stop_phase1 ; struct kernel_symbol const __ksymtab_qlt_stop_phase1 = {(unsigned long )(& qlt_stop_phase1), (char const *)(& __kstrtab_qlt_stop_phase1)}; void qlt_stop_phase2(struct qla_tgt *tgt ) { struct qla_hw_data *ha ; scsi_qla_host_t *vha ; void *tmp ; unsigned long flags ; raw_spinlock_t *tmp___0 ; raw_spinlock_t *tmp___1 ; { ha = tgt->ha; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; if (tgt->tgt_stopped != 0) { ql_dbg(8192U, vha, 61519, "Already in tgt->tgt_stopped state\n"); dump_stack(); return; } else { } ql_dbg(8192U, vha, 61451, "Waiting for %d IRQ commands to complete (tgt %p)", tgt->irq_cmd_count, tgt); mutex_lock_nested(& vha->vha_tgt.tgt_mutex, 0U); tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); goto ldv_66467; ldv_66466: spin_unlock_irqrestore(& ha->hardware_lock, flags); __const_udelay(8590UL); tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); ldv_66467: ; if (tgt->irq_cmd_count != 0) { goto ldv_66466; } else { } tgt->tgt_stop = 0; tgt->tgt_stopped = 1; spin_unlock_irqrestore(& ha->hardware_lock, flags); mutex_unlock(& vha->vha_tgt.tgt_mutex); ql_dbg(8192U, vha, 61452, "Stop of tgt %p finished", tgt); return; } } static char const __kstrtab_qlt_stop_phase2[16U] = { 'q', 'l', 't', '_', 's', 't', 'o', 'p', '_', 'p', 'h', 'a', 's', 'e', '2', '\000'}; struct kernel_symbol const __ksymtab_qlt_stop_phase2 ; struct kernel_symbol const __ksymtab_qlt_stop_phase2 = {(unsigned long )(& qlt_stop_phase2), (char const *)(& __kstrtab_qlt_stop_phase2)}; static void qlt_release(struct qla_tgt *tgt ) { scsi_qla_host_t *vha ; { vha = tgt->vha; if ((unsigned long )vha->vha_tgt.qla_tgt != (unsigned long )((struct qla_tgt *)0) && tgt->tgt_stopped == 0) { qlt_stop_phase2(tgt); } else { } vha->vha_tgt.qla_tgt = (struct qla_tgt *)0; ql_dbg(8192U, vha, 61453, "Release of tgt %p finished\n", tgt); kfree((void const *)tgt); return; } } static int qlt_sched_sess_work(struct qla_tgt *tgt , int type , void const *param , unsigned int param_size ) { struct qla_tgt_sess_work_param *prm ; unsigned long flags ; void *tmp ; raw_spinlock_t *tmp___0 ; { tmp = kzalloc(88UL, 32U); prm = (struct qla_tgt_sess_work_param *)tmp; if ((unsigned long )prm == (unsigned long )((struct qla_tgt_sess_work_param *)0)) { ql_dbg(8192U, tgt->vha, 61520, "qla_target(%d): Unable to create session work, command will be refused", 0); return (-12); } else { } ql_dbg(8192U, tgt->vha, 61454, "Scheduling work (type %d, prm %p) to find session for param %p (size %d, tgt %p)\n", type, prm, param, param_size, tgt); prm->type = type; memcpy((void *)(& prm->__annonCompField127.tm_iocb), param, (size_t )param_size); tmp___0 = spinlock_check(& tgt->sess_work_lock); flags = _raw_spin_lock_irqsave(tmp___0); list_add_tail(& prm->sess_works_list_entry, & tgt->sess_works_list); spin_unlock_irqrestore(& tgt->sess_work_lock, flags); schedule_work___0(& tgt->sess_work); return (0); } } static void qlt_send_notify_ack(struct scsi_qla_host *vha , struct imm_ntfy_from_isp *ntfy , uint32_t add_flags , uint16_t resp_code , int resp_code_valid , uint16_t srr_flags , uint16_t srr_reject_code , uint8_t srr_explan ) { struct qla_hw_data *ha ; request_t *pkt ; struct nack_to_isp *nack ; int tmp ; void *tmp___0 ; { ha = vha->hw; ql_dbg(16384U, vha, 57348, "Sending NOTIFY_ACK (ha=%p)\n", ha); tmp = qlt_issue_marker(vha, 1); if (tmp != 0) { return; } else { } tmp___0 = qla2x00_alloc_iocbs(vha, (srb_t *)0); pkt = (request_t *)tmp___0; if ((unsigned long )pkt == (unsigned long )((request_t *)0)) { ql_dbg(16384U, vha, 57417, "qla_target(%d): %s failed: unable to allocate request packet\n", (int )vha->vp_idx, "qlt_send_notify_ack"); return; } else { } if ((unsigned long )vha->vha_tgt.qla_tgt != (unsigned long )((struct qla_tgt *)0)) { (vha->vha_tgt.qla_tgt)->notify_ack_expected = (vha->vha_tgt.qla_tgt)->notify_ack_expected + 1; } else { } pkt->entry_type = 14U; pkt->entry_count = 1U; nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; if ((unsigned int )ntfy->u.isp24.status == 70U) { nack->u.isp24.flags = (unsigned int )ntfy->u.isp24.flags & 1U; } else { } nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; nack->u.isp24.status = ntfy->u.isp24.status; nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; nack->u.isp24.srr_flags = srr_flags; nack->u.isp24.srr_reject_code = (uint8_t )srr_reject_code; nack->u.isp24.srr_reject_code_expl = srr_explan; nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; ql_dbg(16384U, vha, 57349, "qla_target(%d): Sending 24xx Notify Ack %d\n", (int )vha->vp_idx, (int )nack->u.isp24.status); __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs(vha, vha->req); return; } } static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha , struct abts_recv_from_24xx *abts , uint32_t status , bool ids_reversed ) { struct qla_hw_data *ha ; struct abts_resp_to_24xx *resp ; uint32_t f_ctl ; uint8_t *p ; int tmp ; void *tmp___0 ; uint8_t *tmp___1 ; uint8_t *tmp___2 ; { ha = vha->hw; ql_dbg(16384U, vha, 57350, "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", ha, abts, status); tmp = qlt_issue_marker(vha, 1); if (tmp != 0) { return; } else { } tmp___0 = qla2x00_alloc_iocbs_ready(vha, (srb_t *)0); resp = (struct abts_resp_to_24xx *)tmp___0; if ((unsigned long )resp == (unsigned long )((struct abts_resp_to_24xx *)0)) { ql_dbg(16384U, vha, 57418, "qla_target(%d): %s failed: unable to allocate request packet", (int )vha->vp_idx, "qlt_24xx_send_abts_resp"); return; } else { } resp->entry_type = 85U; resp->entry_count = 1U; resp->nport_handle = abts->nport_handle; resp->vp_index = (uint8_t )vha->vp_idx; resp->sof_type = abts->sof_type; resp->exchange_address = abts->exchange_address; resp->fcp_hdr_le = abts->fcp_hdr_le; f_ctl = 10027008U; p = (uint8_t *)(& f_ctl); tmp___1 = p; p = p + 1; resp->fcp_hdr_le.f_ctl[0] = *tmp___1; tmp___2 = p; p = p + 1; resp->fcp_hdr_le.f_ctl[1] = *tmp___2; resp->fcp_hdr_le.f_ctl[2] = *p; if ((int )ids_reversed) { resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; } else { resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; } resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; if (status == 0U) { resp->fcp_hdr_le.r_ctl = 132U; resp->payload.ba_acct.seq_id_valid = 0U; resp->payload.ba_acct.low_seq_cnt = 0U; resp->payload.ba_acct.high_seq_cnt = 65535U; resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; } else { resp->fcp_hdr_le.r_ctl = 133U; resp->payload.ba_rjt.reason_code = 9U; } (vha->vha_tgt.qla_tgt)->abts_resp_expected = (vha->vha_tgt.qla_tgt)->abts_resp_expected + 1; __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs(vha, vha->req); return; } } static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha , struct abts_resp_from_24xx_fw *entry ) { struct ctio7_to_24xx *ctio ; int tmp ; void *tmp___0 ; { ql_dbg(16384U, vha, 57351, "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); tmp = qlt_issue_marker(vha, 1); if (tmp != 0) { return; } else { } tmp___0 = qla2x00_alloc_iocbs_ready(vha, (srb_t *)0); ctio = (struct ctio7_to_24xx *)tmp___0; if ((unsigned long )ctio == (unsigned long )((struct ctio7_to_24xx *)0)) { ql_dbg(16384U, vha, 57419, "qla_target(%d): %s failed: unable to allocate request packet\n", (int )vha->vp_idx, "qlt_24xx_retry_term_exchange"); return; } else { } ctio->entry_type = 18U; ctio->entry_count = 1U; ctio->nport_handle = entry->nport_handle; ctio->handle = 4294967295U; ctio->timeout = 10U; ctio->vp_index = (uint8_t )vha->vp_idx; ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; ctio->exchange_addr = entry->exchange_addr_to_abort; ctio->u.status1.flags = 16448U; ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs(vha, vha->req); qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, 0U, 1); return; } } static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha , struct abts_recv_from_24xx *abts , struct qla_tgt_sess *sess ) { struct qla_hw_data *ha ; struct se_session *se_sess ; struct qla_tgt_mgmt_cmd *mcmd ; struct se_cmd *se_cmd ; u32 lun ; int rc ; bool found_lun ; struct list_head const *__mptr ; struct qla_tgt_cmd *cmd ; struct se_cmd const *__mptr___0 ; struct list_head const *__mptr___1 ; void *tmp ; { ha = vha->hw; se_sess = sess->se_sess; lun = 0U; found_lun = 0; spin_lock(& se_sess->sess_cmd_lock); __mptr = (struct list_head const *)se_sess->sess_cmd_list.next; se_cmd = (struct se_cmd *)__mptr + 0xffffffffffffff70UL; goto ldv_66543; ldv_66542: __mptr___0 = (struct se_cmd const *)se_cmd; cmd = (struct qla_tgt_cmd *)__mptr___0; if (se_cmd->tag == (u64 )abts->exchange_addr_to_abort) { lun = cmd->unpacked_lun; found_lun = 1; goto ldv_66541; } else { } __mptr___1 = (struct list_head const *)se_cmd->se_cmd_list.next; se_cmd = (struct se_cmd *)__mptr___1 + 0xffffffffffffff70UL; ldv_66543: ; if ((unsigned long )(& se_cmd->se_cmd_list) != (unsigned long )(& se_sess->sess_cmd_list)) { goto ldv_66542; } else { } ldv_66541: spin_unlock(& se_sess->sess_cmd_lock); if (! found_lun) { return (-2); } else { } ql_dbg(8192U, vha, 61455, "qla_target(%d): task abort (tag=%d)\n", (int )vha->vp_idx, abts->exchange_addr_to_abort); tmp = mempool_alloc(qla_tgt_mgmt_cmd_mempool, 32U); mcmd = (struct qla_tgt_mgmt_cmd *)tmp; if ((unsigned long )mcmd == (unsigned long )((struct qla_tgt_mgmt_cmd *)0)) { ql_dbg(8192U, vha, 61521, "qla_target(%d): %s: Allocation of ABORT cmd failed", (int )vha->vp_idx, "__qlt_24xx_handle_abts"); return (-12); } else { } memset((void *)mcmd, 0, 1000UL); mcmd->sess = sess; memcpy((void *)(& mcmd->orig_iocb.abts), (void const *)abts, 64UL); mcmd->reset_count = (vha->hw)->chip_reset; rc = (*((ha->tgt.tgt_ops)->handle_tmr))(mcmd, lun, 1, abts->exchange_addr_to_abort); if (rc != 0) { ql_dbg(8192U, vha, 61522, "qla_target(%d): tgt_ops->handle_tmr() failed: %d", (int )vha->vp_idx, rc); mempool_free((void *)mcmd, qla_tgt_mgmt_cmd_mempool); return (-14); } else { } return (0); } } static void qlt_24xx_handle_abts(struct scsi_qla_host *vha , struct abts_recv_from_24xx *abts ) { struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; uint32_t tag ; uint8_t s_id[3U] ; int rc ; { ha = vha->hw; tag = abts->exchange_addr_to_abort; if ((int )abts->fcp_hdr_le.parameter & 1) { ql_dbg(8192U, vha, 61523, "qla_target(%d): ABTS: Abort Sequence not supported\n", (int )vha->vp_idx); qlt_24xx_send_abts_resp(vha, abts, 4U, 0); return; } else { } if (tag == 4294967295U) { ql_dbg(8192U, vha, 61456, "qla_target(%d): ABTS: Unknown Exchange Address received\n", (int )vha->vp_idx); qlt_24xx_send_abts_resp(vha, abts, 4U, 0); return; } else { } ql_dbg(8192U, vha, 61457, "qla_target(%d): task abort (s_id=%x:%x:%x, tag=%d, param=%x)\n", (int )vha->vp_idx, (int )abts->fcp_hdr_le.s_id[2], (int )abts->fcp_hdr_le.s_id[1], (int )abts->fcp_hdr_le.s_id[0], tag, abts->fcp_hdr_le.parameter); s_id[0] = abts->fcp_hdr_le.s_id[2]; s_id[1] = abts->fcp_hdr_le.s_id[1]; s_id[2] = abts->fcp_hdr_le.s_id[0]; sess = (*((ha->tgt.tgt_ops)->find_sess_by_s_id))(vha, (uint8_t const *)(& s_id)); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { ql_dbg(8192U, vha, 61458, "qla_target(%d): task abort for non-existant session\n", (int )vha->vp_idx); rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 1, (void const *)abts, 64U); if (rc != 0) { qlt_24xx_send_abts_resp(vha, abts, 4U, 0); } else { } return; } else { } rc = __qlt_24xx_handle_abts(vha, abts, sess); if (rc != 0) { ql_dbg(8192U, vha, 61524, "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", (int )vha->vp_idx, rc); qlt_24xx_send_abts_resp(vha, abts, 4U, 0); return; } else { } return; } } static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha , struct qla_tgt_mgmt_cmd *mcmd , uint32_t resp_code ) { struct atio_from_isp *atio ; struct ctio7_to_24xx *ctio ; uint16_t temp ; int tmp ; void *tmp___0 ; __u16 tmp___1 ; { atio = & mcmd->orig_iocb.atio; ql_dbg(16384U, ha, 57352, "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", ha, atio, resp_code); tmp = qlt_issue_marker(ha, 1); if (tmp != 0) { return; } else { } tmp___0 = qla2x00_alloc_iocbs(ha, (srb_t *)0); ctio = (struct ctio7_to_24xx *)tmp___0; if ((unsigned long )ctio == (unsigned long )((struct ctio7_to_24xx *)0)) { ql_dbg(16384U, ha, 57420, "qla_target(%d): %s failed: unable to allocate request packet\n", (int )ha->vp_idx, "qlt_24xx_send_task_mgmt_ctio"); return; } else { } ctio->entry_type = 18U; ctio->entry_count = 1U; ctio->handle = 4294967295U; ctio->nport_handle = (mcmd->sess)->loop_id; ctio->timeout = 10U; ctio->vp_index = (uint8_t )ha->vp_idx; ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio->exchange_addr = atio->u.isp24.exchange_addr; ctio->u.status1.flags = (uint16_t )((int )((short )((int )atio->u.isp24.attr << 9)) | -32704); tmp___1 = __fswab16((int )atio->u.isp24.fcp_hdr.ox_id); temp = tmp___1; ctio->u.status1.ox_id = temp; ctio->u.status1.scsi_status = 256U; ctio->u.status1.response_len = 8U; ctio->u.status1.sense_data[0] = (uint8_t )resp_code; __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs(ha, ha->req); return; } } void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd ) { { mempool_free((void *)mcmd, qla_tgt_mgmt_cmd_mempool); return; } } static char const __kstrtab_qlt_free_mcmd[14U] = { 'q', 'l', 't', '_', 'f', 'r', 'e', 'e', '_', 'm', 'c', 'm', 'd', '\000'}; struct kernel_symbol const __ksymtab_qlt_free_mcmd ; struct kernel_symbol const __ksymtab_qlt_free_mcmd = {(unsigned long )(& qlt_free_mcmd), (char const *)(& __kstrtab_qlt_free_mcmd)}; void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd ) { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; int tmp___0 ; int tmp___1 ; { vha = (mcmd->sess)->vha; ha = vha->hw; ql_dbg(8192U, vha, 61459, "TM response mcmd (%p) status %#x state %#x", mcmd, (int )mcmd->fc_tm_rsp, mcmd->flags); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___1 = qla2x00_reset_active(vha); if (tmp___1 != 0 || mcmd->reset_count != ha->chip_reset) { tmp___0 = qla2x00_reset_active(vha); ql_dbg(33554432U, vha, 57600, "RESET-TMR active/old-count/new-count = %d/%d/%d.\n", tmp___0, mcmd->reset_count, ha->chip_reset); (*((ha->tgt.tgt_ops)->free_mcmd))(mcmd); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } else { } if (mcmd->flags == 1U) { qlt_send_notify_ack(vha, & mcmd->orig_iocb.imm_ntfy, 0U, 0, 0, 0, 0, 0); } else if ((unsigned int )(mcmd->se_cmd.se_tmr_req)->function == 1U) { qlt_24xx_send_abts_resp(vha, & mcmd->orig_iocb.abts, (uint32_t )mcmd->fc_tm_rsp, 0); } else { qlt_24xx_send_task_mgmt_ctio(vha, mcmd, (uint32_t )mcmd->fc_tm_rsp); } (*((ha->tgt.tgt_ops)->free_mcmd))(mcmd); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static char const __kstrtab_qlt_xmit_tm_rsp[16U] = { 'q', 'l', 't', '_', 'x', 'm', 'i', 't', '_', 't', 'm', '_', 'r', 's', 'p', '\000'}; struct kernel_symbol const __ksymtab_qlt_xmit_tm_rsp ; struct kernel_symbol const __ksymtab_qlt_xmit_tm_rsp = {(unsigned long )(& qlt_xmit_tm_rsp), (char const *)(& __kstrtab_qlt_xmit_tm_rsp)}; static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm ) { struct qla_tgt_cmd *cmd ; long tmp ; long tmp___0 ; int tmp___1 ; long tmp___2 ; { cmd = prm->cmd; tmp = ldv__builtin_expect(cmd->sg_cnt == 0, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"), "i" (1410), "i" (12UL)); ldv_66593: ; goto ldv_66593; } else { } prm->sg = cmd->sg; prm->seg_cnt = pci_map_sg(((prm->tgt)->ha)->pdev, cmd->sg, cmd->sg_cnt, (int )cmd->dma_data_direction); tmp___0 = ldv__builtin_expect(prm->seg_cnt == 0, 0L); if (tmp___0 != 0L) { goto out_err; } else { } (prm->cmd)->sg_mapped = 1U; if ((unsigned int )cmd->se_cmd.prot_op == 0U) { if (prm->seg_cnt > (prm->tgt)->datasegs_per_cmd) { prm->req_cnt = prm->req_cnt + (((prm->seg_cnt - (prm->tgt)->datasegs_per_cmd) + (prm->tgt)->datasegs_per_cont) + -1) / (prm->tgt)->datasegs_per_cont; } else { } } else { if ((unsigned int )cmd->se_cmd.prot_op == 1U || (unsigned int )cmd->se_cmd.prot_op == 8U) { prm->seg_cnt = (int )((((uint32_t )cmd->bufflen + cmd->blk_sz) - 1U) / cmd->blk_sz); prm->tot_dsds = (uint16_t )prm->seg_cnt; } else { prm->tot_dsds = (uint16_t )prm->seg_cnt; } if (cmd->prot_sg_cnt != 0U) { prm->prot_sg = cmd->prot_sg; tmp___1 = pci_map_sg(((prm->tgt)->ha)->pdev, cmd->prot_sg, (int )cmd->prot_sg_cnt, (int )cmd->dma_data_direction); prm->prot_seg_cnt = (uint16_t )tmp___1; tmp___2 = ldv__builtin_expect((unsigned int )prm->prot_seg_cnt == 0U, 0L); if (tmp___2 != 0L) { goto out_err; } else { } if ((unsigned int )cmd->se_cmd.prot_op == 1U || (unsigned int )cmd->se_cmd.prot_op == 8U) { prm->prot_seg_cnt = (uint16_t )((((uint32_t )cmd->bufflen + cmd->blk_sz) - 1U) / cmd->blk_sz); prm->tot_dsds = (int )prm->tot_dsds + (int )prm->prot_seg_cnt; } else { prm->tot_dsds = (int )prm->tot_dsds + (int )prm->prot_seg_cnt; } } else { } } return (0); out_err: ql_dbg(16384U, (prm->cmd)->vha, 57421, "qla_target(%d): PCI mapping failed: sg_cnt=%d", 0, (prm->cmd)->sg_cnt); return (-1); } } static void qlt_unmap_sg(struct scsi_qla_host *vha , struct qla_tgt_cmd *cmd ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((unsigned int )*((unsigned char *)cmd + 1104UL) == 0U) { return; } else { } pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, (int )cmd->dma_data_direction); cmd->sg_mapped = 0U; if (cmd->prot_sg_cnt != 0U) { pci_unmap_sg(ha->pdev, cmd->prot_sg, (int )cmd->prot_sg_cnt, (int )cmd->dma_data_direction); } else { } if ((unsigned int )*((unsigned char *)cmd + 1104UL) != 0U) { qla2x00_clean_dsd_pool___0(ha, (srb_t *)0, cmd); } else { } if ((unsigned long )cmd->ctx != (unsigned long )((struct crc_context *)0)) { dma_pool_free(ha->dl_dma_pool, (void *)cmd->ctx, (cmd->ctx)->crc_ctx_dma); } else { } return; } } static int qlt_check_reserve_free_req(struct scsi_qla_host *vha , uint32_t req_cnt ) { uint32_t cnt ; uint32_t cnt_in ; unsigned int tmp ; unsigned int tmp___0 ; long tmp___1 ; { if ((uint32_t )(vha->req)->cnt < req_cnt + 2U) { tmp = readl((void const volatile *)(vha->req)->req_q_out); cnt = (uint32_t )((unsigned short )tmp); tmp___0 = readl((void const volatile *)(vha->req)->req_q_in); cnt_in = (uint32_t )((unsigned short )tmp___0); if ((uint32_t )(vha->req)->ring_index < cnt) { (vha->req)->cnt = (int )((uint16_t )cnt) - (int )(vha->req)->ring_index; } else { (vha->req)->cnt = (int )(vha->req)->length + ((int )((uint16_t )cnt) - (int )(vha->req)->ring_index); } } else { } tmp___1 = ldv__builtin_expect((uint32_t )(vha->req)->cnt < req_cnt + 2U, 0L); if (tmp___1 != 0L) { ql_dbg(134217728U, vha, 12378, "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n", (int )vha->vp_idx, (int )(vha->req)->ring_index, (int )(vha->req)->cnt, req_cnt, cnt, cnt_in, (int )(vha->req)->length); return (-11); } else { } (vha->req)->cnt = (int )(vha->req)->cnt - (int )((uint16_t )req_cnt); return (0); } } __inline static void *qlt_get_req_pkt(struct scsi_qla_host *vha ) { { (vha->req)->ring_index = (uint16_t )((int )(vha->req)->ring_index + 1); if ((int )(vha->req)->ring_index == (int )(vha->req)->length) { (vha->req)->ring_index = 0U; (vha->req)->ring_ptr = (vha->req)->ring; } else { (vha->req)->ring_ptr = (vha->req)->ring_ptr + 1; } return ((void *)(vha->req)->ring_ptr); } } __inline static uint32_t qlt_make_handle(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; uint32_t h ; { ha = vha->hw; h = (uint32_t )ha->tgt.current_handle; ldv_66615: h = h + 1U; if (h > 1024U) { h = 1U; } else { } if ((uint32_t )ha->tgt.current_handle == h) { ql_dbg(134217728U, vha, 12379, "qla_target(%d): Ran out of empty cmd slots in ha %p\n", (int )vha->vp_idx, ha); h = 0U; goto ldv_66614; } else { } if ((h == 0U || h == 3758096383U) || (unsigned long )ha->tgt.cmds[h - 1U] != (unsigned long )((struct qla_tgt_cmd *)0)) { goto ldv_66615; } else { } ldv_66614: ; if (h != 0U) { ha->tgt.current_handle = (uint16_t )h; } else { } return (h); } } static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm , struct scsi_qla_host *vha ) { uint32_t h ; struct ctio7_to_24xx *pkt ; struct qla_hw_data *ha ; struct atio_from_isp *atio ; uint16_t temp ; long tmp ; __u16 tmp___0 ; { ha = vha->hw; atio = & (prm->cmd)->atio; pkt = (struct ctio7_to_24xx *)(vha->req)->ring_ptr; prm->pkt = (void *)pkt; memset((void *)pkt, 0, 64UL); pkt->entry_type = 18U; pkt->entry_count = (unsigned char )prm->req_cnt; pkt->vp_index = (uint8_t )vha->vp_idx; h = qlt_make_handle(vha); tmp = ldv__builtin_expect(h == 0U, 0L); if (tmp != 0L) { return (-11); } else { ha->tgt.cmds[h - 1U] = prm->cmd; } pkt->handle = h | 536870912U; pkt->nport_handle = (prm->cmd)->loop_id; pkt->timeout = 10U; pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; pkt->exchange_addr = atio->u.isp24.exchange_addr; pkt->u.status0.flags = (__le16 )((int )((short )pkt->u.status0.flags) | (int )((short )((int )atio->u.isp24.attr << 9))); tmp___0 = __fswab16((int )atio->u.isp24.fcp_hdr.ox_id); temp = tmp___0; pkt->u.status0.ox_id = temp; pkt->u.status0.relative_offset = (unsigned int )(prm->cmd)->offset; return (0); } } static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm , struct scsi_qla_host *vha ) { int cnt ; uint32_t *dword_ptr ; int enable_64bit_addressing ; cont_a64_entry_t *cont_pkt64 ; void *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; { enable_64bit_addressing = (int )(prm->tgt)->tgt_enable_64bit_addr; goto ldv_66637; ldv_66636: tmp = qlt_get_req_pkt(vha); cont_pkt64 = (cont_a64_entry_t *)tmp; memset((void *)cont_pkt64, 0, 64UL); cont_pkt64->entry_count = 1U; cont_pkt64->sys_define = 0U; if (enable_64bit_addressing != 0) { cont_pkt64->entry_type = 10U; dword_ptr = (uint32_t *)(& cont_pkt64->dseg_0_address); } else { cont_pkt64->entry_type = 2U; dword_ptr = & ((cont_entry_t *)cont_pkt64)->dseg_0_address; } cnt = 0; goto ldv_66634; ldv_66633: tmp___0 = dword_ptr; dword_ptr = dword_ptr + 1; *tmp___0 = (unsigned int )(prm->sg)->dma_address; if (enable_64bit_addressing != 0) { tmp___1 = dword_ptr; dword_ptr = dword_ptr + 1; *tmp___1 = (unsigned int )((prm->sg)->dma_address >> 32ULL); } else { } tmp___2 = dword_ptr; dword_ptr = dword_ptr + 1; *tmp___2 = (prm->sg)->dma_length; prm->sg = sg_next(prm->sg); cnt = cnt + 1; prm->seg_cnt = prm->seg_cnt - 1; ldv_66634: ; if ((prm->tgt)->datasegs_per_cont > cnt && prm->seg_cnt != 0) { goto ldv_66633; } else { } ldv_66637: ; if (prm->seg_cnt > 0) { goto ldv_66636; } else { } return; } } static void qlt_load_data_segments(struct qla_tgt_prm *prm , struct scsi_qla_host *vha ) { int cnt ; uint32_t *dword_ptr ; int enable_64bit_addressing ; struct ctio7_to_24xx *pkt24 ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; { enable_64bit_addressing = (int )(prm->tgt)->tgt_enable_64bit_addr; pkt24 = (struct ctio7_to_24xx *)prm->pkt; pkt24->u.status0.transfer_length = (unsigned int )(prm->cmd)->bufflen; dword_ptr = (uint32_t *)(& pkt24->u.status0.dseg_0_address); if (prm->seg_cnt != 0) { pkt24->dseg_count = (unsigned short )prm->seg_cnt; } else { } if (prm->seg_cnt == 0) { tmp = dword_ptr; dword_ptr = dword_ptr + 1; *tmp = 0U; *dword_ptr = 0U; return; } else { } cnt = 0; goto ldv_66648; ldv_66647: tmp___0 = dword_ptr; dword_ptr = dword_ptr + 1; *tmp___0 = (unsigned int )(prm->sg)->dma_address; if (enable_64bit_addressing != 0) { tmp___1 = dword_ptr; dword_ptr = dword_ptr + 1; *tmp___1 = (unsigned int )((prm->sg)->dma_address >> 32ULL); } else { } tmp___2 = dword_ptr; dword_ptr = dword_ptr + 1; *tmp___2 = (prm->sg)->dma_length; prm->sg = sg_next(prm->sg); cnt = cnt + 1; prm->seg_cnt = prm->seg_cnt - 1; ldv_66648: ; if ((prm->tgt)->datasegs_per_cmd > cnt && prm->seg_cnt != 0) { goto ldv_66647; } else { } qlt_load_cont_data_segments(prm, vha); return; } } __inline static int qlt_has_data(struct qla_tgt_cmd *cmd ) { { return (cmd->bufflen > 0); } } static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd , struct qla_tgt_prm *prm , int xmit_type , uint8_t scsi_status , uint32_t *full_req_cnt ) { struct qla_tgt *tgt ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct se_cmd *se_cmd ; long tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { tgt = cmd->tgt; vha = tgt->vha; ha = vha->hw; se_cmd = & cmd->se_cmd; tmp = ldv__builtin_expect((unsigned int )*((unsigned char *)cmd + 1104UL) != 0U, 0L); if (tmp != 0L) { ql_dbg(8192U, vha, 61460, "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)", (int )vha->vp_idx, cmd, se_cmd, se_cmd->tag); cmd->state = 4; cmd->cmd_flags = cmd->cmd_flags | 64U; qlt_send_term_exchange(vha, cmd, & cmd->atio, 0); return (5911); } else { } prm->cmd = cmd; prm->tgt = tgt; prm->rq_result = (uint16_t )scsi_status; prm->sense_buffer = (unsigned char *)(& cmd->sense_buffer); prm->sense_buffer_len = 96; prm->sg = (struct scatterlist *)0; prm->seg_cnt = -1; prm->req_cnt = 1; prm->add_status_pkt = 0; tmp___0 = qlt_issue_marker(vha, 0); if (tmp___0 != 0) { return (-14); } else { } if (xmit_type & 1) { tmp___2 = qlt_has_data(cmd); if (tmp___2 != 0) { tmp___1 = qlt_pci_map_calc_cnt(prm); if (tmp___1 != 0) { return (-11); } else { } } else { } } else { } *full_req_cnt = (uint32_t )prm->req_cnt; if ((se_cmd->se_cmd_flags & 8192U) != 0U) { prm->residual = (int )se_cmd->residual_count; ql_dbg(134250496U, vha, 12380, "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", prm->residual, se_cmd->tag, (unsigned long )se_cmd->t_task_cdb != (unsigned long )((unsigned char *)0U) ? (int )*(se_cmd->t_task_cdb) : 0, cmd->bufflen, (int )prm->rq_result); prm->rq_result = (uint16_t )((unsigned int )prm->rq_result | 2048U); } else if ((se_cmd->se_cmd_flags & 4096U) != 0U) { prm->residual = (int )se_cmd->residual_count; ql_dbg(134217728U, vha, 12381, "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", prm->residual, se_cmd->tag, (unsigned long )se_cmd->t_task_cdb != (unsigned long )((unsigned char *)0U) ? (int )*(se_cmd->t_task_cdb) : 0, cmd->bufflen, (int )prm->rq_result); prm->rq_result = (uint16_t )((unsigned int )prm->rq_result | 1024U); } else { } if ((xmit_type & 2) != 0) { tmp___3 = qlt_has_data(cmd); if (tmp___3 != 0) { if (((unsigned long )prm->sense_buffer != (unsigned long )((unsigned char *)0U) && ((int )*((uint8_t const *)prm->sense_buffer) & 112) == 112) || ((ha->device_type & 134217728U) != 0U && (unsigned int )prm->rq_result != 0U)) { prm->add_status_pkt = 1; *full_req_cnt = *full_req_cnt + 1U; } else { } } else { } } else { } return (0); } } __inline static int qlt_need_explicit_conf(struct qla_hw_data *ha , struct qla_tgt_cmd *cmd , int sending_sense ) { { if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { return (0); } else { } if (sending_sense != 0) { return ((int )cmd->conf_compl_supported); } else { return ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U && (unsigned int )*((unsigned char *)cmd + 1104UL) != 0U); } } } __inline static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd , int *xmit_type ) { { return; } } static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio , struct qla_tgt_prm *prm ) { uint32_t __min1 ; uint32_t __min2 ; int tmp ; int i ; int tmp___0 ; __u32 tmp___1 ; { __min1 = (uint32_t )prm->sense_buffer_len; __min2 = 24U; prm->sense_buffer_len = (int )(__min1 < __min2 ? __min1 : __min2); ctio->u.status0.flags = (__le16 )((unsigned int )ctio->u.status0.flags | 32768U); tmp = qlt_need_explicit_conf((prm->tgt)->ha, prm->cmd, 0); if (tmp != 0) { ctio->u.status0.flags = (__le16 )((unsigned int )ctio->u.status0.flags | 8224U); } else { } ctio->u.status0.residual = (unsigned int )prm->residual; ctio->u.status0.scsi_status = prm->rq_result; if ((unsigned long )prm->sense_buffer != (unsigned long )((unsigned char *)0U) && ((int )*((uint8_t const *)prm->sense_buffer) & 112) == 112) { tmp___0 = qlt_need_explicit_conf((prm->tgt)->ha, prm->cmd, 1); if (tmp___0 != 0) { if ((unsigned int )(prm->cmd)->se_cmd.scsi_status != 0U) { ql_dbg(16384U, (prm->cmd)->vha, 57367, "Skipping EXPLICIT_CONFORM and CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ non GOOD status\n"); goto skip_explict_conf; } else { } ctio->u.status1.flags = (uint16_t )((unsigned int )ctio->u.status1.flags | 8224U); } else { } skip_explict_conf: ctio->u.status1.flags = ctio->u.status1.flags; ctio->u.status1.flags = (uint16_t )((unsigned int )ctio->u.status1.flags | 64U); ctio->u.status1.scsi_status = (uint16_t )((unsigned int )ctio->u.status1.scsi_status | 512U); ctio->u.status1.sense_length = (unsigned short )prm->sense_buffer_len; i = 0; goto ldv_66683; ldv_66682: tmp___1 = __fswab32(*((uint32_t *)prm->sense_buffer + (unsigned long )i)); *((uint32_t *)(& ctio->u.status1.sense_data) + (unsigned long )i) = tmp___1; i = i + 1; ldv_66683: ; if (prm->sense_buffer_len / 4 > i) { goto ldv_66682; } else { } } else { ctio->u.status1.flags = ctio->u.status1.flags; ctio->u.status1.flags = (uint16_t )((unsigned int )ctio->u.status1.flags | 64U); ctio->u.status1.sense_length = 0U; memset((void *)(& ctio->u.status1.sense_data), 0, 24UL); } return; } } __inline static int qlt_hba_err_chk_enabled(struct se_cmd *se_cmd ) { { switch ((unsigned int )se_cmd->prot_op) { case 2U: ; case 4U: ; if (ql2xenablehba_err_chk > 0) { return (1); } else { } goto ldv_66690; case 32U: ; case 16U: ; if (ql2xenablehba_err_chk > 1) { return (1); } else { } goto ldv_66690; case 1U: ; case 8U: ; return (1); default: ; goto ldv_66690; } ldv_66690: ; return (0); } } __inline static void qlt_set_t10dif_tags(struct se_cmd *se_cmd , struct crc_context *ctx ) { uint32_t lba ; int tmp ; int tmp___0 ; int tmp___1 ; uint8_t tmp___2 ; uint8_t tmp___3 ; uint8_t tmp___4 ; { lba = (uint32_t )se_cmd->t_task_lba; ctx->app_tag = 0U; ctx->app_tag_mask[0] = 0U; ctx->app_tag_mask[1] = 0U; switch ((unsigned int )se_cmd->prot_type) { case 0U: ctx->ref_tag = lba; tmp = qlt_hba_err_chk_enabled(se_cmd); if (tmp == 0) { goto ldv_66702; } else { } ctx->ref_tag_mask[0] = 255U; ctx->ref_tag_mask[1] = 255U; ctx->ref_tag_mask[2] = 255U; ctx->ref_tag_mask[3] = 255U; goto ldv_66702; case 1U: ctx->ref_tag = lba; tmp___0 = qlt_hba_err_chk_enabled(se_cmd); if (tmp___0 == 0) { goto ldv_66702; } else { } ctx->ref_tag_mask[0] = 255U; ctx->ref_tag_mask[1] = 255U; ctx->ref_tag_mask[2] = 255U; ctx->ref_tag_mask[3] = 255U; goto ldv_66702; case 2U: ctx->ref_tag = lba; tmp___1 = qlt_hba_err_chk_enabled(se_cmd); if (tmp___1 == 0) { goto ldv_66702; } else { } ctx->ref_tag_mask[0] = 255U; ctx->ref_tag_mask[1] = 255U; ctx->ref_tag_mask[2] = 255U; ctx->ref_tag_mask[3] = 255U; goto ldv_66702; case 3U: tmp___4 = 0U; ctx->ref_tag_mask[3] = tmp___4; tmp___3 = tmp___4; ctx->ref_tag_mask[2] = tmp___3; tmp___2 = tmp___3; ctx->ref_tag_mask[1] = tmp___2; ctx->ref_tag_mask[0] = tmp___2; goto ldv_66702; } ldv_66702: ; return; } } __inline static int qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm , scsi_qla_host_t *vha ) { uint32_t *cur_dsd ; int sgc ; uint32_t transfer_length ; uint32_t data_bytes ; uint32_t dif_bytes ; uint8_t bundling ; uint8_t *clr_ptr ; struct crc_context *crc_ctx_pkt ; struct qla_hw_data *ha ; struct ctio_crc2_to_fw *pkt ; dma_addr_t crc_ctx_dma ; uint16_t fw_prot_opts ; struct qla_tgt_cmd *cmd ; struct se_cmd *se_cmd ; uint32_t h ; struct atio_from_isp *atio ; uint16_t t16 ; int tmp ; long tmp___0 ; __u16 tmp___1 ; struct crc_context *tmp___2 ; void *tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { transfer_length = 0U; bundling = 1U; crc_ctx_pkt = (struct crc_context *)0; fw_prot_opts = 0U; cmd = prm->cmd; se_cmd = & cmd->se_cmd; atio = & (prm->cmd)->atio; sgc = 0; ha = vha->hw; pkt = (struct ctio_crc2_to_fw *)(vha->req)->ring_ptr; prm->pkt = (void *)pkt; memset((void *)pkt, 0, 64UL); ql_dbg(16384U, vha, 57457, "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", (int )vha->vp_idx, "qlt_build_ctio_crc2_pkt", se_cmd, (unsigned int )se_cmd->prot_op, prm->prot_sg, (int )prm->prot_seg_cnt, se_cmd->t_task_lba); if ((unsigned int )se_cmd->prot_op == 1U || (unsigned int )se_cmd->prot_op == 8U) { bundling = 0U; } else { } data_bytes = (uint32_t )cmd->bufflen; dif_bytes = (data_bytes / cmd->blk_sz) * 8U; switch ((unsigned int )se_cmd->prot_op) { case 1U: ; case 8U: transfer_length = data_bytes; data_bytes = data_bytes + dif_bytes; goto ldv_66730; case 4U: ; case 2U: ; case 16U: ; case 32U: transfer_length = data_bytes + dif_bytes; goto ldv_66730; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"), "i" (2132), "i" (12UL)); ldv_66736: ; goto ldv_66736; } ldv_66730: tmp = qlt_hba_err_chk_enabled(se_cmd); if (tmp == 0) { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 16U); } else if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { if ((unsigned int )se_cmd->prot_type == 1U || (unsigned int )se_cmd->prot_type == 2U) { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 1024U); } else if ((unsigned int )se_cmd->prot_type == 3U) { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 2048U); } else { } } else { } switch ((unsigned int )se_cmd->prot_op) { case 1U: ; case 2U: fw_prot_opts = fw_prot_opts; goto ldv_66739; case 4U: ; case 8U: fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 1U); goto ldv_66739; case 16U: ; case 32U: fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 2U); goto ldv_66739; default: fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 2U); goto ldv_66739; } ldv_66739: pkt->entry_type = 122U; pkt->entry_count = 1U; pkt->vp_index = (uint8_t )vha->vp_idx; h = qlt_make_handle(vha); tmp___0 = ldv__builtin_expect(h == 0U, 0L); if (tmp___0 != 0L) { return (-11); } else { ha->tgt.cmds[h - 1U] = prm->cmd; } pkt->handle = h | 536870912U; pkt->nport_handle = (prm->cmd)->loop_id; pkt->timeout = 10U; pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; pkt->exchange_addr = atio->u.isp24.exchange_addr; tmp___1 = __fswab16((int )atio->u.isp24.fcp_hdr.ox_id); t16 = tmp___1; pkt->ox_id = t16; t16 = (int )((uint16_t )atio->u.isp24.attr) << 9U; pkt->flags = (__le16 )((int )pkt->flags | (int )t16); pkt->relative_offset = (unsigned int )(prm->cmd)->offset; if ((unsigned int )cmd->dma_data_direction == 1U) { pkt->flags = 2U; } else if ((unsigned int )cmd->dma_data_direction == 2U) { pkt->flags = 1U; } else { } pkt->dseg_count = prm->tot_dsds; pkt->transfer_length = transfer_length; tmp___3 = dma_pool_alloc(ha->dl_dma_pool, 32U, & crc_ctx_dma); tmp___2 = (struct crc_context *)tmp___3; cmd->ctx = tmp___2; crc_ctx_pkt = tmp___2; if ((unsigned long )crc_ctx_pkt == (unsigned long )((struct crc_context *)0)) { goto crc_queuing_error; } else { } clr_ptr = (uint8_t *)crc_ctx_pkt; memset((void *)clr_ptr, 0, 360UL); crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; INIT_LIST_HEAD(& crc_ctx_pkt->dsd_list); crc_ctx_pkt->handle = pkt->handle; qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); pkt->crc_context_address[0] = (unsigned int )crc_ctx_dma; pkt->crc_context_address[1] = (unsigned int )(crc_ctx_dma >> 32ULL); pkt->crc_context_len = 64U; if ((unsigned int )bundling == 0U) { cur_dsd = (uint32_t *)(& crc_ctx_pkt->u.nobundling.data_address); } else { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 256U); crc_ctx_pkt->u.bundling.dif_byte_count = dif_bytes; crc_ctx_pkt->u.bundling.dseg_count = (int )prm->tot_dsds - (int )prm->prot_seg_cnt; cur_dsd = (uint32_t *)(& crc_ctx_pkt->u.bundling.data_address); } crc_ctx_pkt->blk_size = (unsigned short )cmd->blk_sz; crc_ctx_pkt->prot_opts = fw_prot_opts; crc_ctx_pkt->byte_count = data_bytes; crc_ctx_pkt->guard_seed = 0U; pkt->flags = (__le16 )((unsigned int )pkt->flags | 4U); if ((unsigned int )bundling == 0U && (unsigned int )prm->prot_seg_cnt != 0U) { tmp___4 = qla24xx_walk_and_build_sglist_no_difb(ha, (srb_t *)0, cur_dsd, (int )prm->tot_dsds, cmd); if (tmp___4 != 0) { goto crc_queuing_error; } else { } } else { tmp___5 = qla24xx_walk_and_build_sglist(ha, (srb_t *)0, cur_dsd, (int )prm->tot_dsds - (int )prm->prot_seg_cnt, cmd); if (tmp___5 != 0) { goto crc_queuing_error; } else { } } if ((unsigned int )bundling != 0U && (unsigned int )prm->prot_seg_cnt != 0U) { pkt->add_flags = (uint8_t )((unsigned int )pkt->add_flags | 8U); cur_dsd = (uint32_t *)(& crc_ctx_pkt->u.bundling.dif_address); tmp___6 = qla24xx_walk_and_build_prot_sglist(ha, (srb_t *)0, cur_dsd, (int )prm->prot_seg_cnt, cmd); if (tmp___6 != 0) { goto crc_queuing_error; } else { } } else { } return (0); crc_queuing_error: ; return (258); } } int qlt_xmit_response(struct qla_tgt_cmd *cmd , int xmit_type , uint8_t scsi_status ) { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct ctio7_to_24xx *pkt ; struct qla_tgt_prm prm ; uint32_t full_req_cnt ; unsigned long flags ; int res ; long tmp ; raw_spinlock_t *tmp___0 ; int tmp___1 ; int tmp___2 ; long tmp___3 ; long tmp___4 ; int tmp___5 ; struct ctio7_to_24xx *ctio ; void *tmp___6 ; struct _ddebug descriptor ; long tmp___7 ; int tmp___8 ; { vha = cmd->vha; ha = vha->hw; full_req_cnt = 0U; flags = 0UL; memset((void *)(& prm), 0, 80UL); qlt_check_srr_debug(cmd, & xmit_type); ql_dbg(16384U, cmd->vha, 57368, "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", (xmit_type & 2) != 0, cmd->bufflen, cmd->sg_cnt, (unsigned int )cmd->dma_data_direction, & cmd->se_cmd); res = qlt_pre_xmit_response(cmd, & prm, xmit_type, (int )scsi_status, & full_req_cnt); tmp = ldv__builtin_expect(res != 0, 0L); if (tmp != 0L) { if (res == 5911) { return (0); } else { } return (res); } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); tmp___2 = qla2x00_reset_active(vha); if (tmp___2 != 0 || cmd->reset_count != ha->chip_reset) { cmd->state = 3; qlt_abort_cmd_on_host_reset(cmd->vha, cmd); tmp___1 = qla2x00_reset_active(vha); ql_dbg(33554432U, vha, 57601, "RESET-RSP active/old-count/new-count = %d/%d/%d.\n", tmp___1, cmd->reset_count, ha->chip_reset); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); } else { } res = qlt_check_reserve_free_req(vha, full_req_cnt); tmp___3 = ldv__builtin_expect(res != 0, 0L); if (tmp___3 != 0L) { goto out_unmap_unlock; } else { } if ((unsigned int )cmd->se_cmd.prot_op != 0U && xmit_type & 1) { res = qlt_build_ctio_crc2_pkt(& prm, vha); } else { res = qlt_24xx_build_ctio_pkt(& prm, vha); } tmp___4 = ldv__builtin_expect(res != 0, 0L); if (tmp___4 != 0L) { goto out_unmap_unlock; } else { } pkt = (struct ctio7_to_24xx *)prm.pkt; tmp___8 = qlt_has_data(cmd); if (tmp___8 != 0 && xmit_type & 1) { pkt->u.status0.flags = (__le16 )((unsigned int )pkt->u.status0.flags | 2U); if ((unsigned int )cmd->se_cmd.prot_op == 0U) { qlt_load_data_segments(& prm, vha); } else { } if (prm.add_status_pkt == 0) { if ((xmit_type & 2) != 0) { pkt->u.status0.scsi_status = prm.rq_result; pkt->u.status0.residual = (unsigned int )prm.residual; pkt->u.status0.flags = (__le16 )((unsigned int )pkt->u.status0.flags | 32768U); tmp___5 = qlt_need_explicit_conf(ha, cmd, 0); if (tmp___5 != 0) { pkt->u.status0.flags = (__le16 )((unsigned int )pkt->u.status0.flags | 8224U); } else { } } else { } } else { tmp___6 = qlt_get_req_pkt(vha); ctio = (struct ctio7_to_24xx *)tmp___6; ql_dbg(134217728U, vha, 12382, "Building additional status packet 0x%p.\n", ctio); memcpy((void *)ctio, (void const *)pkt, 64UL); ctio->entry_count = 1U; ctio->entry_type = 18U; ctio->dseg_count = 0U; ctio->u.status1.flags = (unsigned int )ctio->u.status1.flags & 65533U; pkt->handle = pkt->handle | 1073741824U; pkt->u.status0.flags = (__le16 )((unsigned int )pkt->u.status0.flags | 256U); qlt_24xx_init_ctio_to_isp(ctio, & prm); descriptor.modname = "qla2xxx"; descriptor.function = "qlt_xmit_response"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"; descriptor.format = "Status CTIO7: %p\n"; descriptor.lineno = 2414U; descriptor.flags = 0U; tmp___7 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___7 != 0L) { __dynamic_pr_debug(& descriptor, "Status CTIO7: %p\n", ctio); } else { } } } else { qlt_24xx_init_ctio_to_isp(pkt, & prm); } cmd->state = 3; cmd->cmd_sent_to_fw = 1U; __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs(vha, vha->req); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); out_unmap_unlock: qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (res); } } static char const __kstrtab_qlt_xmit_response[18U] = { 'q', 'l', 't', '_', 'x', 'm', 'i', 't', '_', 'r', 'e', 's', 'p', 'o', 'n', 's', 'e', '\000'}; struct kernel_symbol const __ksymtab_qlt_xmit_response ; struct kernel_symbol const __ksymtab_qlt_xmit_response = {(unsigned long )(& qlt_xmit_response), (char const *)(& __kstrtab_qlt_xmit_response)}; int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd ) { struct ctio7_to_24xx *pkt ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_prm prm ; unsigned long flags ; int res ; int tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; int tmp___2 ; int tmp___3 ; long tmp___4 ; { vha = cmd->vha; ha = vha->hw; tgt = cmd->tgt; res = 0; memset((void *)(& prm), 0, 80UL); prm.cmd = cmd; prm.tgt = tgt; prm.sg = (struct scatterlist *)0; prm.req_cnt = 1; tmp = qlt_issue_marker(vha, 0); if (tmp != 0) { return (-5); } else { } tmp___0 = qlt_pci_map_calc_cnt(& prm); if (tmp___0 != 0) { return (-11); } else { } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); tmp___3 = qla2x00_reset_active(vha); if (tmp___3 != 0 || cmd->reset_count != ha->chip_reset) { cmd->state = 1; qlt_abort_cmd_on_host_reset(cmd->vha, cmd); tmp___2 = qla2x00_reset_active(vha); ql_dbg(33554432U, vha, 57602, "RESET-XFR active/old-count/new-count = %d/%d/%d.\n", tmp___2, cmd->reset_count, ha->chip_reset); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); } else { } res = qlt_check_reserve_free_req(vha, (uint32_t )prm.req_cnt); if (res != 0) { goto out_unlock_free_unmap; } else { } if ((unsigned int )cmd->se_cmd.prot_op != 0U) { res = qlt_build_ctio_crc2_pkt(& prm, vha); } else { res = qlt_24xx_build_ctio_pkt(& prm, vha); } tmp___4 = ldv__builtin_expect(res != 0, 0L); if (tmp___4 != 0L) { goto out_unlock_free_unmap; } else { } pkt = (struct ctio7_to_24xx *)prm.pkt; pkt->u.status0.flags = (__le16 )((unsigned int )pkt->u.status0.flags | 1U); if ((unsigned int )cmd->se_cmd.prot_op == 0U) { qlt_load_data_segments(& prm, vha); } else { } cmd->state = 1; cmd->cmd_sent_to_fw = 1U; __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs(vha, vha->req); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (res); out_unlock_free_unmap: qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (res); } } static char const __kstrtab_qlt_rdy_to_xfer[16U] = { 'q', 'l', 't', '_', 'r', 'd', 'y', '_', 't', 'o', '_', 'x', 'f', 'e', 'r', '\000'}; struct kernel_symbol const __ksymtab_qlt_rdy_to_xfer ; struct kernel_symbol const __ksymtab_qlt_rdy_to_xfer = {(unsigned long )(& qlt_rdy_to_xfer), (char const *)(& __kstrtab_qlt_rdy_to_xfer)}; __inline static int qlt_handle_dif_error(struct scsi_qla_host *vha , struct qla_tgt_cmd *cmd , struct ctio_crc_from_fw *sts ) { uint8_t *ap ; uint8_t *ep ; uint32_t e_ref_tag ; uint32_t a_ref_tag ; uint16_t e_app_tag ; uint16_t a_app_tag ; uint16_t e_guard ; uint16_t a_guard ; uint64_t lba ; __u16 tmp ; __u16 tmp___0 ; __u32 tmp___1 ; __u16 tmp___2 ; __u16 tmp___3 ; __u32 tmp___4 ; uint32_t blocks_done ; uint32_t i ; uint32_t j ; uint32_t k ; uint32_t num_ent ; struct scatterlist *sg ; struct scatterlist *sgl ; { ap = (uint8_t *)(& sts->actual_dif); ep = (uint8_t *)(& sts->expected_dif); lba = cmd->se_cmd.t_task_lba; tmp = __fswab16((int )*((uint16_t *)ap)); a_guard = tmp; tmp___0 = __fswab16((int )*((uint16_t *)ap + 2U)); a_app_tag = tmp___0; tmp___1 = __fswab32(*((uint32_t *)ap + 4U)); a_ref_tag = tmp___1; tmp___2 = __fswab16((int )*((uint16_t *)ep)); e_guard = tmp___2; tmp___3 = __fswab16((int )*((uint16_t *)ep + 2U)); e_app_tag = tmp___3; tmp___4 = __fswab32(*((uint32_t *)ep + 4U)); e_ref_tag = tmp___4; ql_dbg(16384U, vha, 57461, "iocb(s) %p Returned STATUS.\n", sts); ql_dbg(16384U, vha, 61557, "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", (int )cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, a_ref_tag, e_ref_tag, (int )a_app_tag, (int )e_app_tag, (int )a_guard, (int )e_guard); if ((unsigned int )a_app_tag == 65535U && ((unsigned int )cmd->se_cmd.prot_type != 3U || a_ref_tag == 4294967295U)) { blocks_done = (e_ref_tag - (uint32_t )lba) + 1U; cmd->se_cmd.bad_sector = (sector_t )e_ref_tag; cmd->se_cmd.pi_err = 0U; ql_dbg(16384U, vha, 61556, "need to return scsi good\n"); if (cmd->prot_sg_cnt != 0U) { j = 0U; k = 0U; sgl = cmd->prot_sg; i = 0U; sg = sgl; goto ldv_66819; ldv_66818: num_ent = sg->dma_length / 8U; if (k + num_ent < blocks_done) { k = k + num_ent; goto ldv_66816; } else { } j = (blocks_done - k) - 1U; k = blocks_done; goto ldv_66817; ldv_66816: i = i + 1U; sg = sg_next(sg); ldv_66819: ; if (cmd->prot_sg_cnt > i) { goto ldv_66818; } else { } ldv_66817: ; if (k != blocks_done) { ql_log(1U, vha, 61558, "unexpected tag values tag:lba=%u:%llu)\n", e_ref_tag, lba); goto out; } else { } } else { } return (0); } else { } if ((int )e_guard != (int )a_guard) { cmd->se_cmd.pi_err = 21U; cmd->se_cmd.bad_sector = (sector_t )cmd->se_cmd.t_task_lba; ql_log(1U, vha, 57462, "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", (int )cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, a_ref_tag, e_ref_tag, (int )a_app_tag, (int )e_app_tag, (int )a_guard, (int )e_guard, cmd); goto out; } else { } if (e_ref_tag != a_ref_tag) { cmd->se_cmd.pi_err = 23U; cmd->se_cmd.bad_sector = (sector_t )e_ref_tag; ql_log(1U, vha, 57463, "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", (int )cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, a_ref_tag, e_ref_tag, (int )a_app_tag, (int )e_app_tag, (int )a_guard, (int )e_guard, cmd); goto out; } else { } if ((int )e_app_tag != (int )a_app_tag) { cmd->se_cmd.pi_err = 22U; cmd->se_cmd.bad_sector = (sector_t )cmd->se_cmd.t_task_lba; ql_log(1U, vha, 57464, "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", (int )cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, a_ref_tag, e_ref_tag, (int )a_app_tag, (int )e_app_tag, (int )a_guard, (int )e_guard, cmd); goto out; } else { } out: ; return (1); } } static int __qlt_send_term_exchange(struct scsi_qla_host *vha , struct qla_tgt_cmd *cmd , struct atio_from_isp *atio ) { struct ctio7_to_24xx *ctio24 ; struct qla_hw_data *ha ; request_t *pkt ; int ret ; uint16_t temp ; void *tmp ; __u16 tmp___0 ; { ha = vha->hw; ret = 0; ql_dbg(16384U, vha, 57372, "Sending TERM EXCH CTIO (ha=%p)\n", ha); tmp = qla2x00_alloc_iocbs_ready(vha, (srb_t *)0); pkt = (request_t *)tmp; if ((unsigned long )pkt == (unsigned long )((request_t *)0)) { ql_dbg(16384U, vha, 57424, "qla_target(%d): %s failed: unable to allocate request packet\n", (int )vha->vp_idx, "__qlt_send_term_exchange"); return (-12); } else { } if ((unsigned long )cmd != (unsigned long )((struct qla_tgt_cmd *)0)) { if (cmd->state <= 2) { ql_dbg(16384U, vha, 57425, "qla_target(%d): Terminating cmd %p with incorrect state %d\n", (int )vha->vp_idx, cmd, cmd->state); } else { ret = 1; } } else { } pkt->entry_count = 1U; pkt->handle = 4294967295U; ctio24 = (struct ctio7_to_24xx *)pkt; ctio24->entry_type = 18U; ctio24->nport_handle = (unsigned long )cmd != (unsigned long )((struct qla_tgt_cmd *)0) ? cmd->loop_id : 65535U; ctio24->timeout = 10U; ctio24->vp_index = (uint8_t )vha->vp_idx; ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio24->exchange_addr = atio->u.isp24.exchange_addr; ctio24->u.status1.flags = (uint16_t )((int )((short )((int )atio->u.isp24.attr << 9)) | 16448); tmp___0 = __fswab16((int )atio->u.isp24.fcp_hdr.ox_id); temp = tmp___0; ctio24->u.status1.ox_id = temp; ctio24->u.status1.residual = get_unaligned_le32((void const *)(& atio->u.isp24.fcp_cmnd.add_cdb) + (unsigned long )atio->u.isp24.fcp_cmnd.add_cdb_len); if (ctio24->u.status1.residual != 0U) { ctio24->u.status1.scsi_status = (uint16_t )((unsigned int )ctio24->u.status1.scsi_status | 2048U); } else { } __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs(vha, vha->req); return (ret); } } static void qlt_send_term_exchange(struct scsi_qla_host *vha , struct qla_tgt_cmd *cmd , struct atio_from_isp *atio , int ha_locked ) { unsigned long flags ; int rc ; int tmp ; raw_spinlock_t *tmp___0 ; int tmp___1 ; { tmp = qlt_issue_marker(vha, ha_locked); if (tmp < 0) { return; } else { } if (ha_locked != 0) { rc = __qlt_send_term_exchange(vha, cmd, atio); if (rc == -12) { qlt_alloc_qfull_cmd(vha, atio, 0, 0); } else { } goto done; } else { } tmp___0 = spinlock_check(& (vha->hw)->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); rc = __qlt_send_term_exchange(vha, cmd, atio); if (rc == -12) { qlt_alloc_qfull_cmd(vha, atio, 0, 0); } else { } spin_unlock_irqrestore(& (vha->hw)->hardware_lock, flags); done: ; if ((unsigned long )cmd != (unsigned long )((struct qla_tgt_cmd *)0) && (cmd->state != 4 || (unsigned int )*((unsigned char *)cmd + 1105UL) == 0U)) { if (ha_locked == 0) { tmp___1 = preempt_count(); if (((unsigned long )tmp___1 & 2096896UL) == 0UL) { msleep(250U); } else { } } else { } qlt_unmap_sg(vha, cmd); (*(((vha->hw)->tgt.tgt_ops)->free_cmd))(cmd); } else { } return; } } static void qlt_init_term_exchange(struct scsi_qla_host *vha ) { struct list_head free_list ; struct qla_tgt_cmd *cmd ; struct qla_tgt_cmd *tcmd ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int tmp ; { (vha->hw)->tgt.leak_exchg_thresh_hold = (uint32_t )((int )((unsigned int )(vha->hw)->fw_xcb_count / 100U) * 75); tcmd = (struct qla_tgt_cmd *)0; cmd = tcmd; tmp = list_empty((struct list_head const *)(& (vha->hw)->tgt.q_full_list)); if (tmp == 0) { INIT_LIST_HEAD(& free_list); list_splice_init(& (vha->hw)->tgt.q_full_list, & free_list); __mptr = (struct list_head const *)free_list.next; cmd = (struct qla_tgt_cmd *)__mptr + 0xfffffffffffffb70UL; __mptr___0 = (struct list_head const *)cmd->cmd_list.next; tcmd = (struct qla_tgt_cmd *)__mptr___0 + 0xfffffffffffffb70UL; goto ldv_66857; ldv_66856: list_del(& cmd->cmd_list); qlt_free_cmd(cmd); (vha->hw)->tgt.num_qfull_cmds_alloc = (vha->hw)->tgt.num_qfull_cmds_alloc - 1U; cmd = tcmd; __mptr___1 = (struct list_head const *)tcmd->cmd_list.next; tcmd = (struct qla_tgt_cmd *)__mptr___1 + 0xfffffffffffffb70UL; ldv_66857: ; if ((unsigned long )(& cmd->cmd_list) != (unsigned long )(& free_list)) { goto ldv_66856; } else { } } else { } (vha->hw)->tgt.num_qfull_cmds_dropped = 0U; return; } } static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha ) { uint32_t total_leaked ; { total_leaked = (vha->hw)->tgt.num_qfull_cmds_dropped; if ((vha->hw)->tgt.leak_exchg_thresh_hold != 0U && (vha->hw)->tgt.leak_exchg_thresh_hold < total_leaked) { ql_dbg(16384U, vha, 57465, "Chip reset due to exchange starvation: %d/%d.\n", total_leaked, (int )(vha->hw)->fw_xcb_count); if (((vha->hw)->device_type & 16384U) != 0U || ((vha->hw)->device_type & 262144U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } qla2xxx_wake_dpc(vha); } else { } return; } } void qlt_free_cmd(struct qla_tgt_cmd *cmd ) { struct qla_tgt_sess *sess ; __u16 tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; int __ret_warn_on ; long tmp___3 ; { sess = cmd->sess; tmp = __fswab16((int )cmd->atio.u.isp24.fcp_hdr.ox_id); ql_dbg(16384U, cmd->vha, 57460, "%s: se_cmd[%p] ox_id %04x\n", "qlt_free_cmd", & cmd->se_cmd, (int )tmp); tmp___0 = ldv__builtin_expect((unsigned int )*((unsigned char *)cmd + 1105UL) != 0U, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"), "i" (2804), "i" (12UL)); ldv_66868: ; goto ldv_66868; } else { } if ((unsigned int )*((unsigned char *)cmd + 1104UL) == 0U) { qlt_decr_num_pend_cmds(cmd->vha); } else { } tmp___1 = ldv__builtin_expect((unsigned int )*((unsigned char *)cmd + 1104UL) != 0U, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"), "i" (2809), "i" (12UL)); ldv_66869: ; goto ldv_66869; } else { } cmd->jiffies_at_free = get_jiffies_64(); tmp___2 = ldv__builtin_expect((unsigned int )*((unsigned char *)cmd + 1104UL) != 0U, 0L); if (tmp___2 != 0L) { kfree((void const *)cmd->sg); } else { } if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0) || (unsigned long )sess->se_sess == (unsigned long )((struct se_session *)0)) { __ret_warn_on = 1; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c", 2815); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } else { } cmd->jiffies_at_free = get_jiffies_64(); percpu_ida_free(& (sess->se_sess)->sess_tag_pool, cmd->se_cmd.map_tag); return; } } static char const __kstrtab_qlt_free_cmd[13U] = { 'q', 'l', 't', '_', 'f', 'r', 'e', 'e', '_', 'c', 'm', 'd', '\000'}; struct kernel_symbol const __ksymtab_qlt_free_cmd ; struct kernel_symbol const __ksymtab_qlt_free_cmd = {(unsigned long )(& qlt_free_cmd), (char const *)(& __kstrtab_qlt_free_cmd)}; static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha , struct qla_tgt_cmd *cmd , void *ctio ) { struct qla_tgt_srr_ctio *sc ; struct qla_tgt *tgt ; struct qla_tgt_srr_imm *imm ; void *tmp ; int found ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct qla_tgt_srr_imm *ti ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; { tgt = vha->vha_tgt.qla_tgt; tgt->ctio_srr_id = tgt->ctio_srr_id + 1; cmd->cmd_flags = cmd->cmd_flags | 32768U; ql_dbg(8192U, vha, 61465, "qla_target(%d): CTIO with SRR status received\n", (int )vha->vp_idx); if ((unsigned long )ctio == (unsigned long )((void *)0)) { ql_dbg(8192U, vha, 61525, "qla_target(%d): SRR CTIO, but ctio is NULL\n", (int )vha->vp_idx); return (-22); } else { } tmp = kzalloc(32UL, 32U); sc = (struct qla_tgt_srr_ctio *)tmp; if ((unsigned long )sc != (unsigned long )((struct qla_tgt_srr_ctio *)0)) { sc->cmd = cmd; spin_lock(& tgt->srr_lock); sc->srr_id = tgt->ctio_srr_id; list_add_tail(& sc->srr_list_entry, & tgt->srr_ctio_list); ql_dbg(8192U, vha, 61466, "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); if (tgt->imm_srr_id == tgt->ctio_srr_id) { found = 0; __mptr = (struct list_head const *)tgt->srr_imm_list.next; imm = (struct qla_tgt_srr_imm *)__mptr; goto ldv_66894; ldv_66893: ; if (imm->srr_id == sc->srr_id) { found = 1; goto ldv_66892; } else { } __mptr___0 = (struct list_head const *)imm->srr_list_entry.next; imm = (struct qla_tgt_srr_imm *)__mptr___0; ldv_66894: ; if ((unsigned long )(& imm->srr_list_entry) != (unsigned long )(& tgt->srr_imm_list)) { goto ldv_66893; } else { } ldv_66892: ; if (found != 0) { ql_dbg(8192U, vha, 61467, "Scheduling srr work\n"); schedule_work___0(& tgt->srr_work); } else { ql_dbg(8192U, vha, 61526, "qla_target(%d): imm_srr_id == ctio_srr_id (%d), but there is no corresponding SRR IMM, deleting CTIO SRR %p\n", (int )vha->vp_idx, tgt->ctio_srr_id, sc); list_del(& sc->srr_list_entry); spin_unlock(& tgt->srr_lock); kfree((void const *)sc); return (-22); } } else { } spin_unlock(& tgt->srr_lock); } else { ql_dbg(8192U, vha, 61527, "qla_target(%d): Unable to allocate SRR CTIO entry\n", (int )vha->vp_idx); spin_lock(& tgt->srr_lock); __mptr___1 = (struct list_head const *)tgt->srr_imm_list.next; imm = (struct qla_tgt_srr_imm *)__mptr___1; __mptr___2 = (struct list_head const *)imm->srr_list_entry.next; ti = (struct qla_tgt_srr_imm *)__mptr___2; goto ldv_66903; ldv_66902: ; if (imm->srr_id == tgt->ctio_srr_id) { ql_dbg(8192U, vha, 61468, "IMM SRR %p deleted (id %d)\n", imm, imm->srr_id); list_del(& imm->srr_list_entry); qlt_reject_free_srr_imm(vha, imm, 1); } else { } imm = ti; __mptr___3 = (struct list_head const *)ti->srr_list_entry.next; ti = (struct qla_tgt_srr_imm *)__mptr___3; ldv_66903: ; if ((unsigned long )(& imm->srr_list_entry) != (unsigned long )(& tgt->srr_imm_list)) { goto ldv_66902; } else { } spin_unlock(& tgt->srr_lock); return (-12); } return (0); } } static int qlt_term_ctio_exchange(struct scsi_qla_host *vha , void *ctio , struct qla_tgt_cmd *cmd , uint32_t status ) { int term ; struct ctio7_from_24xx *c ; { term = 0; if ((unsigned long )ctio != (unsigned long )((void *)0)) { c = (struct ctio7_from_24xx *)ctio; term = ((int )c->flags & 16384) == 0; } else { term = 1; } if (term != 0) { qlt_send_term_exchange(vha, cmd, & cmd->atio, 1); } else { } return (term); } } __inline static struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha , uint32_t handle ) { struct qla_hw_data *ha ; struct qla_tgt_cmd *cmd ; { ha = vha->hw; handle = handle - 1U; if ((unsigned long )ha->tgt.cmds[handle] != (unsigned long )((struct qla_tgt_cmd *)0)) { cmd = ha->tgt.cmds[handle]; ha->tgt.cmds[handle] = (struct qla_tgt_cmd *)0; return (cmd); } else { return ((struct qla_tgt_cmd *)0); } } } static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha , uint32_t handle , void *ctio ) { struct qla_tgt_cmd *cmd ; long tmp ; long tmp___0 ; long tmp___1 ; { cmd = (struct qla_tgt_cmd *)0; handle = handle & 2684354559U; if (handle != 0U) { tmp = ldv__builtin_expect(handle == 3758096383U, 0L); if (tmp != 0L) { return ((struct qla_tgt_cmd *)0); } else { } tmp___0 = ldv__builtin_expect(handle > 1024U, 0L); if (tmp___0 != 0L) { ql_dbg(16384U, vha, 57426, "qla_target(%d): Wrong handle %x received\n", (int )vha->vp_idx, handle); return ((struct qla_tgt_cmd *)0); } else { } cmd = qlt_get_cmd(vha, handle); tmp___1 = ldv__builtin_expect((unsigned long )cmd == (unsigned long )((struct qla_tgt_cmd *)0), 0L); if (tmp___1 != 0L) { ql_dbg(16384U, vha, 57427, "qla_target(%d): Suspicious: unable to find the command with handle %x\n", (int )vha->vp_idx, handle); return ((struct qla_tgt_cmd *)0); } else { } } else if ((unsigned long )ctio != (unsigned long )((void *)0)) { ql_dbg(16384U, vha, 57428, "qla_target(%d): Wrong CTIO received: QLA24xx doesn\'t support NULL handles\n", (int )vha->vp_idx); return ((struct qla_tgt_cmd *)0); } else { } return (cmd); } } static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha , struct qla_tgt_cmd *cmd ) { struct qla_hw_data *ha ; uint32_t handle ; { ha = vha->hw; if ((unsigned int )*((unsigned char *)cmd + 1104UL) != 0U) { qlt_unmap_sg(vha, cmd); } else { } handle = qlt_make_handle(vha); if (cmd->state == 3) { ql_dbg(134217728U, vha, 65280, "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle); } else if (cmd->state == 1) { cmd->write_data_transferred = 0U; cmd->state = 2; ql_dbg(134217728U, vha, 65281, "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle); (*((ha->tgt.tgt_ops)->handle_data))(cmd); return; } else if (cmd->state == 4) { ql_dbg(134217728U, vha, 65282, "HOST-ABORT: handle=%d, state=ABORTED.\n", handle); } else { ql_dbg(134217728U, vha, 65283, "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle, cmd->state); dump_stack(); } cmd->cmd_flags = cmd->cmd_flags | 4096U; (*((ha->tgt.tgt_ops)->free_cmd))(cmd); return; } } void qlt_host_reset_handler(struct qla_hw_data *ha ) { struct qla_tgt_cmd *cmd ; unsigned long flags ; scsi_qla_host_t *base_vha ; void *tmp ; scsi_qla_host_t *vha ; struct qla_tgt *tgt ; uint32_t i ; bool tmp___0 ; raw_spinlock_t *tmp___1 ; { tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; vha = (scsi_qla_host_t *)0; tgt = base_vha->vha_tgt.qla_tgt; if ((unsigned long )(base_vha->hw)->tgt.tgt_ops == (unsigned long )((struct qla_tgt_func_tmpl *)0)) { return; } else { } if ((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0)) { ql_dbg(8192U, vha, 61443, "Target mode disabled\n"); return; } else { tmp___0 = qla_ini_mode_enabled(base_vha); if ((int )tmp___0) { ql_dbg(8192U, vha, 61443, "Target mode disabled\n"); return; } else { } } ql_dbg(8192U, vha, 65296, "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n", base_vha->dpc_flags); tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); i = 1U; goto ldv_66945; ldv_66944: cmd = qlt_get_cmd(base_vha, i); if ((unsigned long )cmd == (unsigned long )((struct qla_tgt_cmd *)0)) { goto ldv_66943; } else { } vha = cmd->vha; qlt_abort_cmd_on_host_reset(vha, cmd); ldv_66943: i = i + 1U; ldv_66945: ; if (i <= 1024U) { goto ldv_66944; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qlt_do_ctio_completion(struct scsi_qla_host *vha , uint32_t handle , uint32_t status , void *ctio ) { struct qla_hw_data *ha ; struct se_cmd *se_cmd ; struct target_core_fabric_ops const *tfo ; struct qla_tgt_cmd *cmd ; int tmp ; struct ctio_crc_from_fw *crc ; int tmp___0 ; int tmp___1 ; long tmp___2 ; int rx_status ; long tmp___3 ; long tmp___4 ; { ha = vha->hw; if ((handle & 1073741824U) != 0U) { if (status != 1U) { ql_dbg(8192U, vha, 61469, "Intermediate CTIO received (status %x)\n", status); } else { } return; } else { } cmd = qlt_ctio_to_cmd(vha, handle, ctio); if ((unsigned long )cmd == (unsigned long )((struct qla_tgt_cmd *)0)) { return; } else { } se_cmd = & cmd->se_cmd; tfo = se_cmd->se_tfo; cmd->cmd_sent_to_fw = 0U; qlt_unmap_sg(vha, cmd); tmp___2 = ldv__builtin_expect(status != 1U, 0L); if (tmp___2 != 0L) { switch (status & 65535U) { case 14U: ; case 23U: ; case 2U: ; case 11U: ; case 8U: ql_dbg(8192U, vha, 61528, "qla_target(%d): CTIO with status %#x received, state %x, se_cmd %p, (LIP_RESET=e, ABORTED=2, TARGET_RESET=17, TIMEOUT=b, INVALID_RX_ID=8)\n", (int )vha->vp_idx, status, cmd->state, se_cmd); goto ldv_66962; case 41U: ; case 40U: ql_dbg(8192U, vha, 61529, "qla_target(%d): CTIO with PORT LOGGED OUT (29) or PORT UNAVAILABLE (28) status %x received (state %x, se_cmd %p)\n", (int )vha->vp_idx, status, cmd->state, se_cmd); goto ldv_66962; case 69U: ql_dbg(8192U, vha, 61530, "qla_target(%d): CTIO with SRR_RECEIVED status %x received (state %x, se_cmd %p)\n", (int )vha->vp_idx, status, cmd->state, se_cmd); tmp = qlt_prepare_srr_ctio(vha, cmd, ctio); if (tmp != 0) { goto ldv_66962; } else { return; } case 12U: crc = (struct ctio_crc_from_fw *)ctio; ql_dbg(8192U, vha, 61555, "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", (int )vha->vp_idx, status, cmd->state, se_cmd, *((u64 *)(& crc->actual_dif)), *((u64 *)(& crc->expected_dif))); tmp___0 = qlt_handle_dif_error(vha, cmd, (struct ctio_crc_from_fw *)ctio); if (tmp___0 != 0) { if (cmd->state == 1) { goto skip_term; } else { cmd->state = 3; (*((ha->tgt.tgt_ops)->handle_dif_err))(cmd); return; } } else { status = 0U; goto skip_term; } default: ql_dbg(8192U, vha, 61531, "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", (int )vha->vp_idx, status, cmd->state, se_cmd); goto ldv_66962; } ldv_66962: ; if (cmd->state != 1 && cmd->state != 4) { cmd->cmd_flags = cmd->cmd_flags | 8192U; tmp___1 = qlt_term_ctio_exchange(vha, ctio, cmd, status); if (tmp___1 != 0) { return; } else { } } else { } } else { } skip_term: ; if (cmd->state == 3) { } else if (cmd->state == 1) { rx_status = 0; cmd->state = 2; tmp___3 = ldv__builtin_expect(status != 1U, 0L); if (tmp___3 != 0L) { rx_status = -5; } else { cmd->write_data_transferred = 1U; } (*((ha->tgt.tgt_ops)->handle_data))(cmd); return; } else if (cmd->state == 4) { ql_dbg(8192U, vha, 61470, "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); } else { ql_dbg(8192U, vha, 61532, "qla_target(%d): A command in state (%d) should not return a CTIO complete\n", (int )vha->vp_idx, cmd->state); } tmp___4 = ldv__builtin_expect(status != 1U, 0L); if (tmp___4 != 0L && cmd->state != 4) { ql_dbg(8192U, vha, 61471, "Finishing failed CTIO\n"); dump_stack(); } else { } (*((ha->tgt.tgt_ops)->free_cmd))(cmd); return; } } __inline static int qlt_get_fcp_task_attr(struct scsi_qla_host *vha , uint8_t task_codes ) { int fcp_task_attr ; { switch ((int )task_codes) { case 0: fcp_task_attr = 32; goto ldv_66977; case 1: fcp_task_attr = 33; goto ldv_66977; case 2: fcp_task_attr = 34; goto ldv_66977; case 4: fcp_task_attr = 36; goto ldv_66977; case 5: fcp_task_attr = 32; goto ldv_66977; default: ql_dbg(8192U, vha, 61533, "qla_target: unknown task code %x, use ORDERED instead\n", (int )task_codes); fcp_task_attr = 34; goto ldv_66977; } ldv_66977: ; return (fcp_task_attr); } } static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha , uint8_t *s_id ) ; static void __qlt_do_work(struct qla_tgt_cmd *cmd ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_sess *sess ; struct atio_from_isp *atio ; unsigned char *cdb ; unsigned long flags ; uint32_t data_length ; int ret ; int fcp_task_attr ; int data_dir ; int bidi ; u64 tmp ; u32 tmp___0 ; __u32 tmp___1 ; raw_spinlock_t *tmp___2 ; raw_spinlock_t *tmp___3 ; { vha = cmd->vha; ha = vha->hw; tgt = vha->vha_tgt.qla_tgt; sess = cmd->sess; atio = & cmd->atio; bidi = 0; cmd->cmd_in_wq = 0U; cmd->cmd_flags = cmd->cmd_flags | 2U; if (tgt->tgt_stop != 0) { goto out_term; } else { } cdb = (unsigned char *)(& atio->u.isp24.fcp_cmnd.cdb); cmd->se_cmd.tag = (u64 )atio->u.isp24.exchange_addr; tmp = scsilun_to_int((struct scsi_lun *)(& atio->u.isp24.fcp_cmnd.lun)); cmd->unpacked_lun = (uint32_t )tmp; if ((unsigned int )*((unsigned char *)atio + 43UL) != 0U && (unsigned int )*((unsigned char *)atio + 43UL) != 0U) { bidi = 1; data_dir = 1; } else if ((unsigned int )*((unsigned char *)atio + 43UL) != 0U) { data_dir = 2; } else if ((unsigned int )*((unsigned char *)atio + 43UL) != 0U) { data_dir = 1; } else { data_dir = 3; } fcp_task_attr = qlt_get_fcp_task_attr(vha, (int )atio->u.isp24.fcp_cmnd.task_attr); tmp___0 = get_unaligned_le32((void const *)(& atio->u.isp24.fcp_cmnd.add_cdb) + (unsigned long )atio->u.isp24.fcp_cmnd.add_cdb_len); tmp___1 = __fswab32(tmp___0); data_length = tmp___1; ret = (*((ha->tgt.tgt_ops)->handle_cmd))(vha, cmd, cdb, data_length, fcp_task_attr, data_dir, bidi); if (ret != 0) { goto out_term; } else { } tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); (*((ha->tgt.tgt_ops)->put_sess))(sess); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; out_term: ql_dbg(134217728U, vha, 12384, "Terminating work cmd %p", cmd); cmd->cmd_flags = cmd->cmd_flags | 4U; tmp___3 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___3); qlt_send_term_exchange(vha, (struct qla_tgt_cmd *)0, & cmd->atio, 1); qlt_decr_num_pend_cmds(vha); percpu_ida_free(& (sess->se_sess)->sess_tag_pool, cmd->se_cmd.map_tag); (*((ha->tgt.tgt_ops)->put_sess))(sess); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qlt_do_work(struct work_struct *work ) { struct qla_tgt_cmd *cmd ; struct work_struct const *__mptr ; { __mptr = (struct work_struct const *)work; cmd = (struct qla_tgt_cmd *)__mptr + 0xfffffffffffffc60UL; __qlt_do_work(cmd); return; } } static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha , struct qla_tgt_sess *sess , struct atio_from_isp *atio ) { struct se_session *se_sess ; struct qla_tgt_cmd *cmd ; int tag ; { se_sess = sess->se_sess; tag = percpu_ida_alloc(& se_sess->sess_tag_pool, 0); if (tag < 0) { return ((struct qla_tgt_cmd *)0); } else { } cmd = (struct qla_tgt_cmd *)se_sess->sess_cmd_map + (unsigned long )tag; memset((void *)cmd, 0, 1296UL); memcpy((void *)(& cmd->atio), (void const *)atio, 64UL); cmd->state = 0; cmd->tgt = vha->vha_tgt.qla_tgt; qlt_incr_num_pend_cmds(vha); cmd->vha = vha; cmd->se_cmd.map_tag = (unsigned int )tag; cmd->sess = sess; cmd->loop_id = sess->loop_id; cmd->conf_compl_supported = sess->conf_compl_supported; return (cmd); } } static void qlt_send_busy(struct scsi_qla_host *vha , struct atio_from_isp *atio , uint16_t status ) ; static void qlt_create_sess_from_atio(struct work_struct *work ) { struct qla_tgt_sess_op *op ; struct work_struct const *__mptr ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; struct qla_tgt_cmd *cmd ; unsigned long flags ; uint8_t *s_id ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; { __mptr = (struct work_struct const *)work; op = (struct qla_tgt_sess_op *)__mptr + 0xffffffffffffffb8UL; vha = op->vha; ha = vha->hw; s_id = (uint8_t *)(& op->atio.u.isp24.fcp_hdr.s_id); ql_dbg(8192U, vha, 61474, "qla_target(%d): Unable to find wwn login (s_id %x:%x:%x), trying to create it manually\n", (int )vha->vp_idx, (int )*s_id, (int )*(s_id + 1UL), (int )*(s_id + 2UL)); if ((unsigned int )op->atio.u.raw.entry_count > 1U) { ql_dbg(8192U, vha, 61475, "Dropping multy entry atio %p\n", & op->atio); goto out_term; } else { } mutex_lock_nested(& vha->vha_tgt.tgt_mutex, 0U); sess = qlt_make_local_sess(vha, s_id); mutex_unlock(& vha->vha_tgt.tgt_mutex); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { goto out_term; } else { } cmd = qlt_get_tag(vha, sess, & op->atio); if ((unsigned long )cmd == (unsigned long )((struct qla_tgt_cmd *)0)) { tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); qlt_send_busy(vha, & op->atio, 8); (*((ha->tgt.tgt_ops)->put_sess))(sess); spin_unlock_irqrestore(& ha->hardware_lock, flags); kfree((void const *)op); return; } else { } __qlt_do_work(cmd); kfree((void const *)op); return; out_term: tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); qlt_send_term_exchange(vha, (struct qla_tgt_cmd *)0, & op->atio, 1); spin_unlock_irqrestore(& ha->hardware_lock, flags); kfree((void const *)op); return; } } static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha , struct atio_from_isp *atio ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_sess *sess ; struct qla_tgt_cmd *cmd ; long tmp ; struct qla_tgt_sess_op *op ; void *tmp___0 ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; long tmp___1 ; struct lock_class_key __key___0 ; atomic_long_t __constr_expr_1 ; { ha = vha->hw; tgt = vha->vha_tgt.qla_tgt; tmp = ldv__builtin_expect(tgt->tgt_stop != 0, 0L); if (tmp != 0L) { ql_dbg(134217728U, vha, 12385, "New command while device %p is shutting down\n", tgt); return (-14); } else { } sess = (*((ha->tgt.tgt_ops)->find_sess_by_s_id))(vha, (uint8_t const *)(& atio->u.isp24.fcp_hdr.s_id)); tmp___1 = ldv__builtin_expect((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0), 0L); if (tmp___1 != 0L) { tmp___0 = kzalloc(152UL, 32U); op = (struct qla_tgt_sess_op *)tmp___0; if ((unsigned long )op == (unsigned long )((struct qla_tgt_sess_op *)0)) { return (-12); } else { } memcpy((void *)(& op->atio), (void const *)atio, 64UL); op->vha = vha; __init_work(& op->work, 0); __constr_expr_0.counter = 137438953408L; op->work.data = __constr_expr_0; lockdep_init_map(& op->work.lockdep_map, "(&op->work)", & __key, 0); INIT_LIST_HEAD(& op->work.entry); op->work.func = & qlt_create_sess_from_atio; queue_work___1(qla_tgt_wq, & op->work); return (0); } else { } kref_get___0(& (sess->se_sess)->sess_kref); cmd = qlt_get_tag(vha, sess, atio); if ((unsigned long )cmd == (unsigned long )((struct qla_tgt_cmd *)0)) { ql_dbg(134217728U, vha, 12386, "qla_target(%d): Allocation of cmd failed\n", (int )vha->vp_idx); (*((ha->tgt.tgt_ops)->put_sess))(sess); return (-12); } else { } cmd->cmd_flags = 0U; cmd->jiffies_at_alloc = get_jiffies_64(); cmd->reset_count = (vha->hw)->chip_reset; cmd->cmd_in_wq = 1U; cmd->cmd_flags = cmd->cmd_flags | 1U; __init_work(& cmd->work, 0); __constr_expr_1.counter = 137438953408L; cmd->work.data = __constr_expr_1; lockdep_init_map(& cmd->work.lockdep_map, "(&cmd->work)", & __key___0, 0); INIT_LIST_HEAD(& cmd->work.entry); cmd->work.func = & qlt_do_work; queue_work___1(qla_tgt_wq, & cmd->work); return (0); } } static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess , uint32_t lun , int fn , void *iocb , int flags ) { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct qla_tgt_mgmt_cmd *mcmd ; int res ; uint8_t tmr_func ; void *tmp ; { vha = sess->vha; ha = vha->hw; tmp = mempool_alloc(qla_tgt_mgmt_cmd_mempool, 32U); mcmd = (struct qla_tgt_mgmt_cmd *)tmp; if ((unsigned long )mcmd == (unsigned long )((struct qla_tgt_mgmt_cmd *)0)) { ql_dbg(4096U, vha, 65545, "qla_target(%d): Allocation of management command failed, some commands and their data could leak\n", (int )vha->vp_idx); return (-12); } else { } memset((void *)mcmd, 0, 1000UL); mcmd->sess = sess; if ((unsigned long )iocb != (unsigned long )((void *)0)) { memcpy((void *)(& mcmd->orig_iocb.imm_ntfy), (void const *)iocb, 64UL); } else { } mcmd->tmr_func = (uint8_t )fn; mcmd->flags = (unsigned int )flags; mcmd->reset_count = (vha->hw)->chip_reset; switch (fn) { case 64: ql_dbg(4096U, vha, 65536, "qla_target(%d): CLEAR_ACA received\n", (int )(sess->vha)->vp_idx); tmr_func = 3U; goto ldv_67071; case 32: ql_dbg(4096U, vha, 65537, "qla_target(%d): TARGET_RESET received\n", (int )(sess->vha)->vp_idx); tmr_func = 6U; goto ldv_67071; case 16: ql_dbg(4096U, vha, 65538, "qla_target(%d): LUN_RESET received\n", (int )(sess->vha)->vp_idx); tmr_func = 5U; goto ldv_67071; case 4: ql_dbg(4096U, vha, 65539, "qla_target(%d): CLEAR_TS received\n", (int )(sess->vha)->vp_idx); tmr_func = 4U; goto ldv_67071; case 2: ql_dbg(4096U, vha, 65540, "qla_target(%d): ABORT_TS received\n", (int )(sess->vha)->vp_idx); tmr_func = 2U; goto ldv_67071; default: ql_dbg(4096U, vha, 65546, "qla_target(%d): Unknown task mgmt fn 0x%x\n", (int )(sess->vha)->vp_idx, fn); mempool_free((void *)mcmd, qla_tgt_mgmt_cmd_mempool); return (-38); } ldv_67071: res = (*((ha->tgt.tgt_ops)->handle_tmr))(mcmd, lun, (int )tmr_func, 0U); if (res != 0) { ql_dbg(4096U, vha, 65547, "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", (int )(sess->vha)->vp_idx, res); mempool_free((void *)mcmd, qla_tgt_mgmt_cmd_mempool); return (-14); } else { } return (0); } } static int qlt_handle_task_mgmt(struct scsi_qla_host *vha , void *iocb ) { struct atio_from_isp *a ; struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_sess *sess ; uint32_t lun ; uint32_t unpacked_lun ; int lun_size ; int fn ; u64 tmp ; int tmp___0 ; int tmp___1 ; { a = (struct atio_from_isp *)iocb; ha = vha->hw; tgt = vha->vha_tgt.qla_tgt; lun = (uint32_t )a->u.isp24.fcp_cmnd.lun; lun_size = 8; fn = (int )a->u.isp24.fcp_cmnd.task_mgmt_flags; sess = (*((ha->tgt.tgt_ops)->find_sess_by_s_id))(vha, (uint8_t const *)(& a->u.isp24.fcp_hdr.s_id)); tmp = scsilun_to_int((struct scsi_lun *)(& lun)); unpacked_lun = (uint32_t )tmp; if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { ql_dbg(8192U, vha, 61476, "qla_target(%d): task mgmt fn 0x%x for non-existant session\n", (int )vha->vp_idx, fn); tmp___0 = qlt_sched_sess_work(tgt, 2, (void const *)iocb, 64U); return (tmp___0); } else { } tmp___1 = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); return (tmp___1); } } static int __qlt_abort_task(struct scsi_qla_host *vha , struct imm_ntfy_from_isp *iocb , struct qla_tgt_sess *sess ) { struct atio_from_isp *a ; struct qla_hw_data *ha ; struct qla_tgt_mgmt_cmd *mcmd ; uint32_t lun ; uint32_t unpacked_lun ; int rc ; void *tmp ; u64 tmp___0 ; { a = (struct atio_from_isp *)iocb; ha = vha->hw; tmp = mempool_alloc(qla_tgt_mgmt_cmd_mempool, 32U); mcmd = (struct qla_tgt_mgmt_cmd *)tmp; if ((unsigned long )mcmd == (unsigned long )((struct qla_tgt_mgmt_cmd *)0)) { ql_dbg(8192U, vha, 61535, "qla_target(%d): %s: Allocation of ABORT cmd failed\n", (int )vha->vp_idx, "__qlt_abort_task"); return (-12); } else { } memset((void *)mcmd, 0, 1000UL); mcmd->sess = sess; memcpy((void *)(& mcmd->orig_iocb.imm_ntfy), (void const *)iocb, 64UL); lun = (uint32_t )a->u.isp24.fcp_cmnd.lun; tmp___0 = scsilun_to_int((struct scsi_lun *)(& lun)); unpacked_lun = (uint32_t )tmp___0; mcmd->reset_count = (vha->hw)->chip_reset; rc = (*((ha->tgt.tgt_ops)->handle_tmr))(mcmd, unpacked_lun, 1, (uint32_t )iocb->u.isp2x.seq_id); if (rc != 0) { ql_dbg(8192U, vha, 61536, "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", (int )vha->vp_idx, rc); mempool_free((void *)mcmd, qla_tgt_mgmt_cmd_mempool); return (-14); } else { } return (0); } } static int qlt_abort_task(struct scsi_qla_host *vha , struct imm_ntfy_from_isp *iocb ) { struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; int loop_id ; int tmp ; int tmp___0 ; { ha = vha->hw; loop_id = (int )ha->device_type < 0 ? (int )((struct atio_from_isp *)iocb)->u.isp2x.target.extended : (int )((struct atio_from_isp *)iocb)->u.isp2x.target.id.standard; sess = (*((ha->tgt.tgt_ops)->find_sess_by_loop_id))(vha, (int )((uint16_t const )loop_id)); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { ql_dbg(8192U, vha, 61477, "qla_target(%d): task abort for unexisting session\n", (int )vha->vp_idx); tmp = qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 1, (void const *)iocb, 64U); return (tmp); } else { } tmp___0 = __qlt_abort_task(vha, iocb, sess); return (tmp___0); } } static int qlt_24xx_handle_els(struct scsi_qla_host *vha , struct imm_ntfy_from_isp *iocb ) { int res ; struct qla_tgt *tgt ; { res = 0; ql_dbg(8192U, vha, 61478, "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", (int )vha->vp_idx, (uint8_t *)(& iocb->u.isp24.port_id), (int )iocb->u.isp24.status_subcode); switch ((int )iocb->u.isp24.status_subcode) { case 3: ; case 4: ; case 32: ; case 5: ; case 33: res = qlt_reset(vha, (void *)iocb, 65533); goto ldv_67118; case 80: ; case 82: tgt = vha->vha_tgt.qla_tgt; if ((unsigned int )*((unsigned char *)tgt + 32UL) != 0U) { qlt_send_notify_ack(vha, & tgt->link_reinit_iocb, 0U, 0, 0, 0, 0, 0); tgt->link_reinit_iocb_pending = 0U; } else { } res = 1; goto ldv_67118; default: ql_dbg(8192U, vha, 61537, "qla_target(%d): Unsupported ELS command %x received\n", (int )vha->vp_idx, (int )iocb->u.isp24.status_subcode); res = qlt_reset(vha, (void *)iocb, 65533); goto ldv_67118; } ldv_67118: ; return (res); } } static int qlt_set_data_offset(struct qla_tgt_cmd *cmd , uint32_t offset ) { struct _ddebug descriptor ; long tmp ; { descriptor.modname = "qla2xxx"; descriptor.function = "qlt_set_data_offset"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"; descriptor.format = "Rejecting non zero SRR rel_offs: %u\n"; descriptor.lineno = 3718U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Rejecting non zero SRR rel_offs: %u\n", offset); } else { } return (-1); } } __inline static int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd , uint32_t srr_rel_offs , int *xmit_type ) { int res ; int rel_offs ; { res = 0; rel_offs = (int )(srr_rel_offs - (uint32_t )cmd->offset); ql_dbg(8192U, cmd->vha, 61479, "srr_rel_offs=%d, rel_offs=%d", srr_rel_offs, rel_offs); *xmit_type = 3; if (rel_offs < 0) { ql_dbg(8192U, cmd->vha, 61538, "qla_target(%d): SRR rel_offs (%d) < 0", (int )(cmd->vha)->vp_idx, rel_offs); res = -1; } else if (cmd->bufflen == rel_offs) { *xmit_type = 2; } else if (rel_offs > 0) { res = qlt_set_data_offset(cmd, (uint32_t )rel_offs); } else { } return (res); } } static void qlt_handle_srr(struct scsi_qla_host *vha , struct qla_tgt_srr_ctio *sctio , struct qla_tgt_srr_imm *imm ) { struct imm_ntfy_from_isp *ntfy ; struct qla_hw_data *ha ; struct qla_tgt_cmd *cmd ; struct se_cmd *se_cmd ; unsigned long flags ; int xmit_type ; int resp ; uint32_t offset ; uint16_t srr_ui ; raw_spinlock_t *tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; int tmp___2 ; int tmp___3 ; raw_spinlock_t *tmp___4 ; int tmp___5 ; raw_spinlock_t *tmp___6 ; { ntfy = & imm->imm_ntfy; ha = vha->hw; cmd = sctio->cmd; se_cmd = & cmd->se_cmd; xmit_type = 0; resp = 0; offset = ntfy->u.isp24.srr_rel_offs; srr_ui = ntfy->u.isp24.srr_ui; ql_dbg(8192U, vha, 61480, "SRR cmd %p, srr_ui %x\n", cmd, (int )srr_ui); switch ((int )srr_ui) { case 7: tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); qlt_send_notify_ack(vha, ntfy, 0U, 0, 0, 0, 0, 0); spin_unlock_irqrestore(& ha->hardware_lock, flags); xmit_type = 2; resp = 1; goto ldv_67154; case 1: ; if ((unsigned long )cmd->sg == (unsigned long )((struct scatterlist *)0) || cmd->sg_cnt == 0) { ql_dbg(8192U, vha, 61539, "Unable to process SRR_IU_DATA_IN due to missing cmd->sg, state: %d\n", cmd->state); dump_stack(); goto out_reject; } else { } if ((unsigned int )se_cmd->scsi_status != 0U) { ql_dbg(16384U, vha, 57386, "Rejecting SRR_IU_DATA_IN with non GOOD scsi_status\n"); goto out_reject; } else { } cmd->bufflen = (int )se_cmd->data_length; tmp___2 = qlt_has_data(cmd); if (tmp___2 != 0) { tmp___0 = qlt_srr_adjust_data(cmd, offset, & xmit_type); if (tmp___0 != 0) { goto out_reject; } else { } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); qlt_send_notify_ack(vha, ntfy, 0U, 0, 0, 0, 0, 0); spin_unlock_irqrestore(& ha->hardware_lock, flags); resp = 1; } else { ql_dbg(8192U, vha, 61540, "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject", (int )vha->vp_idx, se_cmd->tag, (int )cmd->se_cmd.scsi_status); goto out_reject; } goto ldv_67154; case 5: ; if ((unsigned long )cmd->sg == (unsigned long )((struct scatterlist *)0) || cmd->sg_cnt == 0) { ql_dbg(8192U, vha, 61541, "Unable to process SRR_IU_DATA_OUT due to missing cmd->sg\n"); dump_stack(); goto out_reject; } else { } if ((unsigned int )se_cmd->scsi_status != 0U) { ql_dbg(16384U, vha, 57387, "Rejecting SRR_IU_DATA_OUT with non GOOD scsi_status\n"); goto out_reject; } else { } cmd->bufflen = (int )se_cmd->data_length; tmp___5 = qlt_has_data(cmd); if (tmp___5 != 0) { tmp___3 = qlt_srr_adjust_data(cmd, offset, & xmit_type); if (tmp___3 != 0) { goto out_reject; } else { } tmp___4 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___4); qlt_send_notify_ack(vha, ntfy, 0U, 0, 0, 0, 0, 0); spin_unlock_irqrestore(& ha->hardware_lock, flags); if (xmit_type & 1) { cmd->cmd_flags = cmd->cmd_flags | 256U; qlt_rdy_to_xfer(cmd); } else { } } else { ql_dbg(8192U, vha, 61542, "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject", (int )vha->vp_idx, se_cmd->tag, (int )cmd->se_cmd.scsi_status); goto out_reject; } goto ldv_67154; default: ql_dbg(8192U, vha, 61543, "qla_target(%d): Unknown srr_ui value %x", (int )vha->vp_idx, (int )srr_ui); goto out_reject; } ldv_67154: ; if (resp != 0) { cmd->cmd_flags = cmd->cmd_flags | 128U; qlt_xmit_response(cmd, xmit_type, (int )se_cmd->scsi_status); } else { } return; out_reject: tmp___6 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___6); qlt_send_notify_ack(vha, ntfy, 0U, 0, 0, 1, 9, 0); if (cmd->state == 1) { cmd->state = 2; dump_stack(); } else { cmd->cmd_flags = cmd->cmd_flags | 512U; qlt_send_term_exchange(vha, cmd, & cmd->atio, 1); } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha , struct qla_tgt_srr_imm *imm , int ha_locked ) { struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; { ha = vha->hw; flags = 0UL; if (ha_locked == 0) { tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); } else { } qlt_send_notify_ack(vha, & imm->imm_ntfy, 0U, 0, 0, 1, 9, 0); if (ha_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } kfree((void const *)imm); return; } } static void qlt_handle_srr_work(struct work_struct *work ) { struct qla_tgt *tgt ; struct work_struct const *__mptr ; struct scsi_qla_host *vha ; struct qla_tgt_srr_ctio *sctio ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr___0 ; struct qla_tgt_srr_imm *imm ; struct qla_tgt_srr_imm *i ; struct qla_tgt_srr_imm *ti ; struct qla_tgt_cmd *cmd ; struct se_cmd *se_cmd ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; { __mptr = (struct work_struct const *)work; tgt = (struct qla_tgt *)__mptr + 0xfffffffffffffd10UL; vha = tgt->vha; ql_dbg(8192U, vha, 61481, "Entering SRR work (tgt %p)\n", tgt); restart: tmp = spinlock_check(& tgt->srr_lock); flags = _raw_spin_lock_irqsave(tmp); __mptr___0 = (struct list_head const *)tgt->srr_ctio_list.next; sctio = (struct qla_tgt_srr_ctio *)__mptr___0; goto ldv_67211; ldv_67210: imm = (struct qla_tgt_srr_imm *)0; __mptr___1 = (struct list_head const *)tgt->srr_imm_list.next; i = (struct qla_tgt_srr_imm *)__mptr___1; __mptr___2 = (struct list_head const *)i->srr_list_entry.next; ti = (struct qla_tgt_srr_imm *)__mptr___2; goto ldv_67207; ldv_67206: ; if (i->srr_id == sctio->srr_id) { list_del(& i->srr_list_entry); if ((unsigned long )imm != (unsigned long )((struct qla_tgt_srr_imm *)0)) { ql_dbg(8192U, vha, 61544, "qla_target(%d): There must be only one IMM SRR per CTIO SRR (IMM SRR %p, id %d, CTIO %p\n", (int )vha->vp_idx, i, i->srr_id, sctio); qlt_reject_free_srr_imm(tgt->vha, i, 0); } else { imm = i; } } else { } i = ti; __mptr___3 = (struct list_head const *)ti->srr_list_entry.next; ti = (struct qla_tgt_srr_imm *)__mptr___3; ldv_67207: ; if ((unsigned long )(& i->srr_list_entry) != (unsigned long )(& tgt->srr_imm_list)) { goto ldv_67206; } else { } ql_dbg(8192U, vha, 61482, "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, sctio->srr_id); if ((unsigned long )imm == (unsigned long )((struct qla_tgt_srr_imm *)0)) { ql_dbg(8192U, vha, 61483, "Not found matching IMM for SRR CTIO (id %d)\n", sctio->srr_id); goto ldv_67209; } else { list_del(& sctio->srr_list_entry); } spin_unlock_irqrestore(& tgt->srr_lock, flags); cmd = sctio->cmd; cmd->offset = 0; if ((unsigned int )*((unsigned char *)cmd + 1104UL) != 0U) { kfree((void const *)cmd->sg); cmd->sg = (struct scatterlist *)0; cmd->free_sg = 0U; } else { } se_cmd = & cmd->se_cmd; cmd->sg_cnt = (int )se_cmd->t_data_nents; cmd->sg = se_cmd->t_data_sg; ql_dbg(8192U, vha, 61484, "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d", cmd, & cmd->se_cmd, se_cmd->tag, (unsigned long )se_cmd->t_task_cdb != (unsigned long )((unsigned char *)0U) ? (int )*(se_cmd->t_task_cdb) : 0, cmd->sg_cnt, cmd->offset); qlt_handle_srr(vha, sctio, imm); kfree((void const *)imm); kfree((void const *)sctio); goto restart; ldv_67209: __mptr___4 = (struct list_head const *)sctio->srr_list_entry.next; sctio = (struct qla_tgt_srr_ctio *)__mptr___4; ldv_67211: ; if ((unsigned long )(& sctio->srr_list_entry) != (unsigned long )(& tgt->srr_ctio_list)) { goto ldv_67210; } else { } spin_unlock_irqrestore(& tgt->srr_lock, flags); return; } } static void qlt_prepare_srr_imm(struct scsi_qla_host *vha , struct imm_ntfy_from_isp *iocb ) { struct qla_tgt_srr_imm *imm ; struct qla_tgt *tgt ; struct qla_tgt_srr_ctio *sctio ; void *tmp ; int found ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct qla_tgt_srr_ctio *ts ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; { tgt = vha->vha_tgt.qla_tgt; tgt->imm_srr_id = tgt->imm_srr_id + 1; ql_log(1U, vha, 61485, "qla_target(%d): SRR received\n", (int )vha->vp_idx); tmp = kzalloc(88UL, 32U); imm = (struct qla_tgt_srr_imm *)tmp; if ((unsigned long )imm != (unsigned long )((struct qla_tgt_srr_imm *)0)) { memcpy((void *)(& imm->imm_ntfy), (void const *)iocb, 64UL); spin_lock(& tgt->srr_lock); imm->srr_id = tgt->imm_srr_id; list_add_tail(& imm->srr_list_entry, & tgt->srr_imm_list); ql_dbg(8192U, vha, 61486, "IMM NTFY SRR %p added (id %d, ui %x)\n", imm, imm->srr_id, (int )iocb->u.isp24.srr_ui); if (tgt->imm_srr_id == tgt->ctio_srr_id) { found = 0; __mptr = (struct list_head const *)tgt->srr_ctio_list.next; sctio = (struct qla_tgt_srr_ctio *)__mptr; goto ldv_67227; ldv_67226: ; if (sctio->srr_id == imm->srr_id) { found = 1; goto ldv_67225; } else { } __mptr___0 = (struct list_head const *)sctio->srr_list_entry.next; sctio = (struct qla_tgt_srr_ctio *)__mptr___0; ldv_67227: ; if ((unsigned long )(& sctio->srr_list_entry) != (unsigned long )(& tgt->srr_ctio_list)) { goto ldv_67226; } else { } ldv_67225: ; if (found != 0) { ql_dbg(8192U, vha, 61487, "%s", (char *)"Scheduling srr work\n"); schedule_work___0(& tgt->srr_work); } else { ql_dbg(8192U, vha, 61488, "qla_target(%d): imm_srr_id == ctio_srr_id (%d), but there is no corresponding SRR CTIO, deleting IMM SRR %p\n", (int )vha->vp_idx, tgt->ctio_srr_id, imm); list_del(& imm->srr_list_entry); kfree((void const *)imm); spin_unlock(& tgt->srr_lock); goto out_reject; } } else { } spin_unlock(& tgt->srr_lock); } else { ql_dbg(8192U, vha, 61545, "qla_target(%d): Unable to allocate SRR IMM entry, SRR request will be rejected\n", (int )vha->vp_idx); spin_lock(& tgt->srr_lock); __mptr___1 = (struct list_head const *)tgt->srr_ctio_list.next; sctio = (struct qla_tgt_srr_ctio *)__mptr___1; __mptr___2 = (struct list_head const *)sctio->srr_list_entry.next; ts = (struct qla_tgt_srr_ctio *)__mptr___2; goto ldv_67237; ldv_67236: ; if (sctio->srr_id == tgt->imm_srr_id) { ql_dbg(8192U, vha, 61489, "CTIO SRR %p deleted (id %d)\n", sctio, sctio->srr_id); list_del(& sctio->srr_list_entry); qlt_send_term_exchange(vha, sctio->cmd, & (sctio->cmd)->atio, 1); kfree((void const *)sctio); } else { } sctio = ts; __mptr___3 = (struct list_head const *)ts->srr_list_entry.next; ts = (struct qla_tgt_srr_ctio *)__mptr___3; ldv_67237: ; if ((unsigned long )(& sctio->srr_list_entry) != (unsigned long )(& tgt->srr_ctio_list)) { goto ldv_67236; } else { } spin_unlock(& tgt->srr_lock); goto out_reject; } return; out_reject: qlt_send_notify_ack(vha, iocb, 0U, 0, 0, 1, 9, 0); return; } } static void qlt_handle_imm_notify(struct scsi_qla_host *vha , struct imm_ntfy_from_isp *iocb ) { struct qla_hw_data *ha ; uint32_t add_flags ; int send_notify_ack ; uint16_t status ; int tmp ; struct qla_tgt *tgt ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { ha = vha->hw; add_flags = 0U; send_notify_ack = 1; status = iocb->u.isp2x.status; switch ((int )status) { case 14: ql_dbg(8192U, vha, 61490, "qla_target(%d): LIP reset (loop %#x), subcode %x\n", (int )vha->vp_idx, (int )iocb->u.isp24.nport_handle, (int )iocb->u.isp24.status_subcode); tmp = qlt_reset(vha, (void *)iocb, 65534); if (tmp == 0) { send_notify_ack = 0; } else { } goto ldv_67248; case 15: tgt = vha->vha_tgt.qla_tgt; ql_dbg(8192U, vha, 61491, "qla_target(%d): LINK REINIT (loop %#x, subcode %x)\n", (int )vha->vp_idx, (int )iocb->u.isp24.nport_handle, (int )iocb->u.isp24.status_subcode); if ((unsigned int )*((unsigned char *)tgt + 32UL) != 0U) { qlt_send_notify_ack(vha, & tgt->link_reinit_iocb, 0U, 0, 0, 0, 0, 0); } else { } memcpy((void *)(& tgt->link_reinit_iocb), (void const *)iocb, 64UL); tgt->link_reinit_iocb_pending = 1U; send_notify_ack = 0; goto ldv_67248; case 41: ql_dbg(8192U, vha, 61492, "qla_target(%d): Port logout (loop %#x, subcode %x)\n", (int )vha->vp_idx, (int )iocb->u.isp24.nport_handle, (int )iocb->u.isp24.status_subcode); tmp___0 = qlt_reset(vha, (void *)iocb, 65533); if (tmp___0 == 0) { send_notify_ack = 0; } else { } goto ldv_67248; case 45: ql_dbg(8192U, vha, 61493, "qla_target(%d): Global TPRLO (%x)\n", (int )vha->vp_idx, (int )status); tmp___1 = qlt_reset(vha, (void *)iocb, 65532); if (tmp___1 == 0) { send_notify_ack = 0; } else { } goto ldv_67248; case 42: ql_dbg(8192U, vha, 61494, "qla_target(%d): Port config changed (%x)\n", (int )vha->vp_idx, (int )status); tmp___2 = qlt_reset(vha, (void *)iocb, 65534); if (tmp___2 == 0) { send_notify_ack = 0; } else { } goto ldv_67248; case 46: ql_dbg(8192U, vha, 61546, "qla_target(%d): Link failure detected\n", (int )vha->vp_idx); tmp___3 = qlt_reset(vha, (void *)iocb, 65532); if (tmp___3 == 0) { send_notify_ack = 0; } else { } goto ldv_67248; case 22: ql_dbg(8192U, vha, 61547, "qla_target(%d): Cannot provide requested capability (IOCB overflowed the immediate notify resource count)\n", (int )vha->vp_idx); goto ldv_67248; case 32: ql_dbg(8192U, vha, 61495, "qla_target(%d): Abort Task (S %08x I %#x -> L %#x)\n", (int )vha->vp_idx, (int )iocb->u.isp2x.seq_id, (int )ha->device_type < 0 ? (int )((struct atio_from_isp *)iocb)->u.isp2x.target.extended : (int )((struct atio_from_isp *)iocb)->u.isp2x.target.id.standard, (int )iocb->u.isp2x.lun); tmp___4 = qlt_abort_task(vha, iocb); if (tmp___4 == 0) { send_notify_ack = 0; } else { } goto ldv_67248; case 52: ql_dbg(8192U, vha, 61548, "qla_target(%d): Out of resources, host %ld\n", (int )vha->vp_idx, vha->host_no); goto ldv_67248; case 54: ql_dbg(8192U, vha, 61496, "qla_target(%d): Immediate notify task %x\n", (int )vha->vp_idx, (int )iocb->u.isp2x.task_flags); tmp___5 = qlt_handle_task_mgmt(vha, (void *)iocb); if (tmp___5 == 0) { send_notify_ack = 0; } else { } goto ldv_67248; case 70: tmp___6 = qlt_24xx_handle_els(vha, iocb); if (tmp___6 == 0) { send_notify_ack = 0; } else { } goto ldv_67248; case 69: qlt_prepare_srr_imm(vha, iocb); send_notify_ack = 0; goto ldv_67248; default: ql_dbg(8192U, vha, 61549, "qla_target(%d): Received unknown immediate notify status %x\n", (int )vha->vp_idx, (int )status); goto ldv_67248; } ldv_67248: ; if (send_notify_ack != 0) { qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); } else { } return; } } static int __qlt_send_busy(struct scsi_qla_host *vha , struct atio_from_isp *atio , uint16_t status ) { struct ctio7_to_24xx *ctio24 ; struct qla_hw_data *ha ; request_t *pkt ; struct qla_tgt_sess *sess ; void *tmp ; __u16 tmp___0 ; { ha = vha->hw; sess = (struct qla_tgt_sess *)0; sess = (*((ha->tgt.tgt_ops)->find_sess_by_s_id))(vha, (uint8_t const *)(& atio->u.isp24.fcp_hdr.s_id)); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { qlt_send_term_exchange(vha, (struct qla_tgt_cmd *)0, atio, 1); return (0); } else { } tmp = qla2x00_alloc_iocbs(vha, (srb_t *)0); pkt = (request_t *)tmp; if ((unsigned long )pkt == (unsigned long )((request_t *)0)) { ql_dbg(134217728U, vha, 12387, "qla_target(%d): %s failed: unable to allocate request packet", (int )vha->vp_idx, "__qlt_send_busy"); return (-12); } else { } pkt->entry_count = 1U; pkt->handle = 4294967295U; ctio24 = (struct ctio7_to_24xx *)pkt; ctio24->entry_type = 18U; ctio24->nport_handle = sess->loop_id; ctio24->timeout = 10U; ctio24->vp_index = (uint8_t )vha->vp_idx; ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio24->exchange_addr = atio->u.isp24.exchange_addr; ctio24->u.status1.flags = (uint16_t )((int )((short )((int )atio->u.isp24.attr << 9)) | -32448); tmp___0 = __fswab16((int )atio->u.isp24.fcp_hdr.ox_id); ctio24->u.status1.ox_id = tmp___0; ctio24->u.status1.scsi_status = status; __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs(vha, vha->req); return (0); } } static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha , struct atio_from_isp *atio , uint16_t status , int qfull ) { struct qla_tgt *tgt ; struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; struct se_session *se_sess ; struct qla_tgt_cmd *cmd ; int tag ; long tmp ; { tgt = vha->vha_tgt.qla_tgt; ha = vha->hw; tmp = ldv__builtin_expect(tgt->tgt_stop != 0, 0L); if (tmp != 0L) { ql_dbg(134217728U, vha, 12298, "New command while device %p is shutting down\n", tgt); return; } else { } if ((vha->hw)->tgt.num_qfull_cmds_alloc + 1U > 8192U) { (vha->hw)->tgt.num_qfull_cmds_dropped = (vha->hw)->tgt.num_qfull_cmds_dropped + 1U; if ((vha->hw)->tgt.num_qfull_cmds_dropped > (vha->hw)->qla_stats.stat_max_qfull_cmds_dropped) { (vha->hw)->qla_stats.stat_max_qfull_cmds_dropped = (vha->hw)->tgt.num_qfull_cmds_dropped; } else { } ql_dbg(134217728U, vha, 12392, "qla_target(%d): %s: QFull CMD dropped[%d]\n", (int )vha->vp_idx, "qlt_alloc_qfull_cmd", (vha->hw)->tgt.num_qfull_cmds_dropped); qlt_chk_exch_leak_thresh_hold(vha); return; } else { } sess = (*((ha->tgt.tgt_ops)->find_sess_by_s_id))(vha, (uint8_t const *)(& atio->u.isp24.fcp_hdr.s_id)); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { return; } else { } se_sess = sess->se_sess; tag = percpu_ida_alloc(& se_sess->sess_tag_pool, 0); if (tag < 0) { return; } else { } cmd = (struct qla_tgt_cmd *)se_sess->sess_cmd_map + (unsigned long )tag; if ((unsigned long )cmd == (unsigned long )((struct qla_tgt_cmd *)0)) { ql_dbg(134217728U, vha, 12297, "qla_target(%d): %s: Allocation of cmd failed\n", (int )vha->vp_idx, "qlt_alloc_qfull_cmd"); (vha->hw)->tgt.num_qfull_cmds_dropped = (vha->hw)->tgt.num_qfull_cmds_dropped + 1U; if ((vha->hw)->tgt.num_qfull_cmds_dropped > (vha->hw)->qla_stats.stat_max_qfull_cmds_dropped) { (vha->hw)->qla_stats.stat_max_qfull_cmds_dropped = (vha->hw)->tgt.num_qfull_cmds_dropped; } else { } qlt_chk_exch_leak_thresh_hold(vha); return; } else { } memset((void *)cmd, 0, 1296UL); qlt_incr_num_pend_cmds(vha); INIT_LIST_HEAD(& cmd->cmd_list); memcpy((void *)(& cmd->atio), (void const *)atio, 64UL); cmd->tgt = vha->vha_tgt.qla_tgt; cmd->vha = vha; cmd->reset_count = (vha->hw)->chip_reset; cmd->q_full = 1U; if (qfull != 0) { cmd->q_full = 1U; cmd->state = (int )status; } else { cmd->term_exchg = 1U; } list_add_tail(& cmd->cmd_list, & (vha->hw)->tgt.q_full_list); (vha->hw)->tgt.num_qfull_cmds_alloc = (vha->hw)->tgt.num_qfull_cmds_alloc + 1U; if ((vha->hw)->tgt.num_qfull_cmds_alloc > (vha->hw)->qla_stats.stat_max_qfull_cmds_alloc) { (vha->hw)->qla_stats.stat_max_qfull_cmds_alloc = (vha->hw)->tgt.num_qfull_cmds_alloc; } else { } return; } } int qlt_free_qfull_cmds(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; unsigned long flags ; struct qla_tgt_cmd *cmd ; struct qla_tgt_cmd *tcmd ; struct list_head free_list ; int rc ; int tmp ; raw_spinlock_t *tmp___0 ; int tmp___1 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; __u16 tmp___2 ; __u16 tmp___3 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; { ha = vha->hw; rc = 0; tmp = list_empty((struct list_head const *)(& ha->tgt.q_full_list)); if (tmp != 0) { return (0); } else { } INIT_LIST_HEAD(& free_list); tmp___0 = spinlock_check(& (vha->hw)->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); tmp___1 = list_empty((struct list_head const *)(& ha->tgt.q_full_list)); if (tmp___1 != 0) { spin_unlock_irqrestore(& (vha->hw)->hardware_lock, flags); return (0); } else { } __mptr = (struct list_head const *)ha->tgt.q_full_list.next; cmd = (struct qla_tgt_cmd *)__mptr + 0xfffffffffffffb70UL; __mptr___0 = (struct list_head const *)cmd->cmd_list.next; tcmd = (struct qla_tgt_cmd *)__mptr___0 + 0xfffffffffffffb70UL; goto ldv_67306; ldv_67305: ; if ((unsigned int )*((unsigned char *)cmd + 1104UL) != 0U) { rc = __qlt_send_busy(vha, & cmd->atio, (int )((uint16_t )cmd->state)); } else if ((unsigned int )*((unsigned char *)cmd + 1104UL) != 0U) { rc = __qlt_send_term_exchange(vha, (struct qla_tgt_cmd *)0, & cmd->atio); } else { } if (rc == -12) { goto ldv_67303; } else { } if ((unsigned int )*((unsigned char *)cmd + 1104UL) != 0U) { tmp___2 = __fswab16((int )cmd->atio.u.isp24.fcp_hdr.ox_id); ql_dbg(134217728U, vha, 12294, "%s: busy sent for ox_id[%04x]\n", "qlt_free_qfull_cmds", (int )tmp___2); } else if ((unsigned int )*((unsigned char *)cmd + 1104UL) != 0U) { tmp___3 = __fswab16((int )cmd->atio.u.isp24.fcp_hdr.ox_id); ql_dbg(134217728U, vha, 12295, "%s: Term exchg sent for ox_id[%04x]\n", "qlt_free_qfull_cmds", (int )tmp___3); } else { ql_dbg(134217728U, vha, 12296, "%s: Unexpected cmd in QFull list %p\n", "qlt_free_qfull_cmds", cmd); } list_del(& cmd->cmd_list); list_add_tail(& cmd->cmd_list, & free_list); (vha->hw)->tgt.num_qfull_cmds_alloc = (vha->hw)->tgt.num_qfull_cmds_alloc - 1U; cmd = tcmd; __mptr___1 = (struct list_head const *)tcmd->cmd_list.next; tcmd = (struct qla_tgt_cmd *)__mptr___1 + 0xfffffffffffffb70UL; ldv_67306: ; if ((unsigned long )(& cmd->cmd_list) != (unsigned long )(& ha->tgt.q_full_list)) { goto ldv_67305; } else { } ldv_67303: spin_unlock_irqrestore(& (vha->hw)->hardware_lock, flags); cmd = (struct qla_tgt_cmd *)0; __mptr___2 = (struct list_head const *)free_list.next; cmd = (struct qla_tgt_cmd *)__mptr___2 + 0xfffffffffffffb70UL; __mptr___3 = (struct list_head const *)cmd->cmd_list.next; tcmd = (struct qla_tgt_cmd *)__mptr___3 + 0xfffffffffffffb70UL; goto ldv_67314; ldv_67313: list_del(& cmd->cmd_list); qlt_free_cmd(cmd); cmd = tcmd; __mptr___4 = (struct list_head const *)tcmd->cmd_list.next; tcmd = (struct qla_tgt_cmd *)__mptr___4 + 0xfffffffffffffb70UL; ldv_67314: ; if ((unsigned long )(& cmd->cmd_list) != (unsigned long )(& free_list)) { goto ldv_67313; } else { } return (rc); } } static void qlt_send_busy(struct scsi_qla_host *vha , struct atio_from_isp *atio , uint16_t status ) { int rc ; { rc = 0; rc = __qlt_send_busy(vha, atio, (int )status); if (rc == -12) { qlt_alloc_qfull_cmd(vha, atio, (int )status, 1); } else { } return; } } static int qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha , struct atio_from_isp *atio ) { struct qla_hw_data *ha ; uint16_t status ; { ha = vha->hw; if (ha->tgt.num_pend_cmds < (uint32_t )((int )((unsigned int )ha->fw_xcb_count / 100U) * 90)) { return (0); } else { } status = (uint16_t )temp_sam_status; qlt_send_busy(vha, atio, (int )status); return (1); } } static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha , struct atio_from_isp *atio ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; int rc ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { ha = vha->hw; tgt = vha->vha_tgt.qla_tgt; tmp = ldv__builtin_expect((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0), 0L); if (tmp != 0L) { ql_dbg(134217728U, vha, 12388, "ATIO pkt, but no tgt (ha %p)", ha); return; } else { } tgt->irq_cmd_count = tgt->irq_cmd_count + 1; switch ((int )atio->u.raw.entry_type) { case 6: tmp___0 = ldv__builtin_expect(atio->u.isp24.exchange_addr == 4294967295U, 0L); if (tmp___0 != 0L) { ql_dbg(134217728U, vha, 12389, "qla_target(%d): ATIO_TYPE7 received with UNKNOWN exchange address, sending QUEUE_FULL\n", (int )vha->vp_idx); qlt_send_busy(vha, atio, 40); goto ldv_67336; } else { } tmp___1 = ldv__builtin_expect((unsigned int )atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0U, 1L); if (tmp___1 != 0L) { rc = qlt_chk_qfull_thresh_hold(vha, atio); if (rc != 0) { tgt->irq_cmd_count = tgt->irq_cmd_count - 1; return; } else { } rc = qlt_handle_cmd_for_atio(vha, atio); } else { rc = qlt_handle_task_mgmt(vha, (void *)atio); } tmp___2 = ldv__builtin_expect(rc != 0, 0L); if (tmp___2 != 0L) { if (rc == -3) { qlt_send_busy(vha, atio, 8); } else if (tgt->tgt_stop != 0) { ql_dbg(16384U, vha, 57433, "qla_target: Unable to send command to target for req, ignoring.\n"); } else { ql_dbg(16384U, vha, 57434, "qla_target(%d): Unable to send command to target, sending BUSY status.\n", (int )vha->vp_idx); qlt_send_busy(vha, atio, 8); } } else { } goto ldv_67336; case 13: tmp___3 = ldv__builtin_expect((unsigned int )atio->u.isp2x.entry_status != 0U, 0L); if (tmp___3 != 0L) { ql_dbg(16384U, vha, 57435, "qla_target(%d): Received ATIO packet %x with error status %x\n", (int )vha->vp_idx, (int )atio->u.raw.entry_type, (int )atio->u.isp2x.entry_status); goto ldv_67336; } else { } ql_dbg(16384U, vha, 57390, "%s", (char *)"IMMED_NOTIFY ATIO"); qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); goto ldv_67336; default: ql_dbg(16384U, vha, 57436, "qla_target(%d): Received unknown ATIO atio type %x\n", (int )vha->vp_idx, (int )atio->u.raw.entry_type); goto ldv_67336; } ldv_67336: tgt->irq_cmd_count = tgt->irq_cmd_count - 1; return; } } static void qlt_response_pkt(struct scsi_qla_host *vha , response_t *pkt ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; long tmp ; struct ctio7_from_24xx *entry ; struct atio_from_isp *atio ; int rc ; long tmp___0 ; struct ctio_to_2xxx *entry___0 ; struct ctio_to_2xxx *entry___1 ; struct nack_to_isp *entry___2 ; struct abts_resp_from_24xx_fw *entry___3 ; { ha = vha->hw; tgt = vha->vha_tgt.qla_tgt; tmp = ldv__builtin_expect((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0), 0L); if (tmp != 0L) { ql_dbg(16384U, vha, 57437, "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n", (int )vha->vp_idx, (int )pkt->entry_type, ha); return; } else { } tgt->irq_cmd_count = tgt->irq_cmd_count + 1; switch ((int )pkt->entry_type) { case 122: ; case 18: entry = (struct ctio7_from_24xx *)pkt; qlt_do_ctio_completion(vha, entry->handle, (uint32_t )((int )entry->status | ((int )pkt->entry_status << 16)), (void *)entry); goto ldv_67348; case 22: atio = (struct atio_from_isp *)pkt; if ((unsigned int )atio->u.isp2x.status != 61U) { ql_dbg(16384U, vha, 57438, "qla_target(%d): ATIO with error status %x received\n", (int )vha->vp_idx, (int )atio->u.isp2x.status); goto ldv_67348; } else { } rc = qlt_chk_qfull_thresh_hold(vha, atio); if (rc != 0) { tgt->irq_cmd_count = tgt->irq_cmd_count - 1; return; } else { } rc = qlt_handle_cmd_for_atio(vha, atio); tmp___0 = ldv__builtin_expect(rc != 0, 0L); if (tmp___0 != 0L) { if (rc == -3) { qlt_send_busy(vha, atio, 0); } else if (tgt->tgt_stop != 0) { ql_dbg(16384U, vha, 57439, "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); qlt_send_term_exchange(vha, (struct qla_tgt_cmd *)0, atio, 1); } else { ql_dbg(16384U, vha, 57440, "qla_target(%d): Unable to send command to target, sending BUSY status\n", (int )vha->vp_idx); qlt_send_busy(vha, atio, 0); } } else { } goto ldv_67348; case 23: entry___0 = (struct ctio_to_2xxx *)pkt; qlt_do_ctio_completion(vha, entry___0->handle, (uint32_t )((int )entry___0->status | ((int )pkt->entry_status << 16)), (void *)entry___0); goto ldv_67348; case 31: entry___1 = (struct ctio_to_2xxx *)pkt; qlt_do_ctio_completion(vha, entry___1->handle, (uint32_t )((int )entry___1->status | ((int )pkt->entry_status << 16)), (void *)entry___1); goto ldv_67348; case 13: ql_dbg(16384U, vha, 57397, "%s", (char *)"IMMED_NOTIFY\n"); qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); goto ldv_67348; case 14: ; if (tgt->notify_ack_expected > 0) { entry___2 = (struct nack_to_isp *)pkt; ql_dbg(16384U, vha, 57398, "NOTIFY_ACK seq %08x status %x\n", (int )entry___2->u.isp2x.seq_id, (int )entry___2->u.isp2x.status); tgt->notify_ack_expected = tgt->notify_ack_expected - 1; if ((unsigned int )entry___2->u.isp2x.status != 1U) { ql_dbg(16384U, vha, 57441, "qla_target(%d): NOTIFY_ACK failed %x\n", (int )vha->vp_idx, (int )entry___2->u.isp2x.status); } else { } } else { ql_dbg(16384U, vha, 57442, "qla_target(%d): Unexpected NOTIFY_ACK received\n", (int )vha->vp_idx); } goto ldv_67348; case 84: ql_dbg(16384U, vha, 57399, "ABTS_RECV_24XX: instance %d\n", (int )vha->vp_idx); qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); goto ldv_67348; case 85: ; if (tgt->abts_resp_expected > 0) { entry___3 = (struct abts_resp_from_24xx_fw *)pkt; ql_dbg(16384U, vha, 57400, "ABTS_RESP_24XX: compl_status %x\n", (int )entry___3->compl_status); tgt->abts_resp_expected = tgt->abts_resp_expected - 1; if ((unsigned int )entry___3->compl_status != 0U) { if (entry___3->error_subcode1 == 30U && entry___3->error_subcode2 == 0U) { qlt_24xx_retry_term_exchange(vha, entry___3); } else { ql_dbg(16384U, vha, 57443, "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)", (int )vha->vp_idx, (int )entry___3->compl_status, entry___3->error_subcode1, entry___3->error_subcode2); } } else { } } else { ql_dbg(16384U, vha, 57444, "qla_target(%d): Unexpected ABTS_RESP_24XX received\n", (int )vha->vp_idx); } goto ldv_67348; default: ql_dbg(16384U, vha, 57445, "qla_target(%d): Received unknown response pkt type %x\n", (int )vha->vp_idx, (int )pkt->entry_type); goto ldv_67348; } ldv_67348: tgt->irq_cmd_count = tgt->irq_cmd_count - 1; return; } } void qlt_async_event(uint16_t code , struct scsi_qla_host *vha , uint16_t *mailbox ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; int login_code ; long tmp ; { ha = vha->hw; tgt = vha->vha_tgt.qla_tgt; if ((unsigned long )ha->tgt.tgt_ops == (unsigned long )((struct qla_tgt_func_tmpl *)0)) { return; } else { } tmp = ldv__builtin_expect((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0), 0L); if (tmp != 0L) { ql_dbg(16384U, vha, 57402, "ASYNC EVENT %#x, but no tgt (ha %p)\n", (int )code, ha); return; } else { } if (((unsigned int )code == 32816U || (unsigned int )code == 32822U) && (int )ha->device_type & 1) { return; } else { } tgt->irq_cmd_count = tgt->irq_cmd_count + 1; switch ((int )code) { case 32769: ; case 32770: ; case 32771: ; case 32772: ql_dbg(8192U, vha, 61498, "qla_target(%d): System error async event %#x occurred", (int )vha->vp_idx, (int )code); goto ldv_67375; case 32773: set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_67375; case 32785: ql_dbg(8192U, vha, 61499, "qla_target(%d): Async LOOP_UP occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", (int )vha->vp_idx, (int )*mailbox, (int )*(mailbox + 1UL), (int )*(mailbox + 2UL), (int )*(mailbox + 3UL)); if ((unsigned int )*((unsigned char *)tgt + 32UL) != 0U) { qlt_send_notify_ack(vha, & tgt->link_reinit_iocb, 0U, 0, 0, 0, 0, 0); tgt->link_reinit_iocb_pending = 0U; } else { } goto ldv_67375; case 32784: ; case 32786: ; case 32787: ; case 32789: ql_dbg(8192U, vha, 61500, "qla_target(%d): Async event %#x occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", (int )vha->vp_idx, (int )code, (int )*mailbox, (int )*(mailbox + 1UL), (int )*(mailbox + 2UL), (int )*(mailbox + 3UL)); goto ldv_67375; case 32788: ql_dbg(8192U, vha, 61501, "qla_target(%d): Port update async event %#x occurred: updating the ports database (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", (int )vha->vp_idx, (int )code, (int )*mailbox, (int )*(mailbox + 1UL), (int )*(mailbox + 2UL), (int )*(mailbox + 3UL)); login_code = (int )*(mailbox + 2UL); if (login_code == 4) { ql_dbg(8192U, vha, 61502, "Async MB 2: Got PLOGI Complete\n"); } else if (login_code == 7) { ql_dbg(8192U, vha, 61503, "Async MB 2: Port Logged Out\n"); } else { } goto ldv_67375; default: ; goto ldv_67375; } ldv_67375: tgt->irq_cmd_count = tgt->irq_cmd_count - 1; return; } } static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha , uint16_t loop_id ) { fc_port_t *fcport ; int rc ; void *tmp ; { tmp = kzalloc(136UL, 208U); fcport = (fc_port_t *)tmp; if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { ql_dbg(8192U, vha, 61551, "qla_target(%d): Allocation of tmp FC port failed", (int )vha->vp_idx); return ((fc_port_t *)0); } else { } fcport->loop_id = loop_id; rc = qla2x00_get_port_database(vha, fcport, 0); if (rc != 0) { ql_dbg(8192U, vha, 61552, "qla_target(%d): Failed to retrieve fcport information -- get_port_database() returned %x (loop_id=0x%04x)", (int )vha->vp_idx, rc, (int )loop_id); kfree((void const *)fcport); return ((fc_port_t *)0); } else { } return (fcport); } } static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha , uint8_t *s_id ) { struct qla_tgt_sess *sess ; fc_port_t *fcport ; int rc ; int global_resets ; uint16_t loop_id ; int tmp ; int tmp___0 ; { sess = (struct qla_tgt_sess *)0; fcport = (fc_port_t *)0; loop_id = 0U; retry: global_resets = atomic_read((atomic_t const *)(& (vha->vha_tgt.qla_tgt)->tgt_global_resets_count)); rc = qla24xx_get_loop_id(vha, (uint8_t const *)s_id, & loop_id); if (rc != 0) { if ((unsigned int )*s_id == 255U && (unsigned int )*(s_id + 1UL) == 252U) { ql_dbg(8192U, vha, 61506, "Unable to find initiator with S_ID %x:%x:%x", (int )*s_id, (int )*(s_id + 1UL), (int )*(s_id + 2UL)); } else { ql_dbg(8192U, vha, 61553, "qla_target(%d): Unable to find initiator with S_ID %x:%x:%x", (int )vha->vp_idx, (int )*s_id, (int )*(s_id + 1UL), (int )*(s_id + 2UL)); } return ((struct qla_tgt_sess *)0); } else { } fcport = qlt_get_port_database(vha, (int )loop_id); if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return ((struct qla_tgt_sess *)0); } else { } tmp___0 = atomic_read((atomic_t const *)(& (vha->vha_tgt.qla_tgt)->tgt_global_resets_count)); if (tmp___0 != global_resets) { tmp = atomic_read((atomic_t const *)(& (vha->vha_tgt.qla_tgt)->tgt_global_resets_count)); ql_dbg(8192U, vha, 61507, "qla_target(%d): global reset during session discovery (counter was %d, new %d), retrying", (int )vha->vp_idx, global_resets, tmp); goto retry; } else { } sess = qlt_create_sess(vha, fcport, 1); kfree((void const *)fcport); return (sess); } } static void qlt_abort_work(struct qla_tgt *tgt , struct qla_tgt_sess_work_param *prm ) { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; unsigned long flags ; uint32_t be_s_id ; uint8_t s_id[3U] ; int rc ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; { vha = tgt->vha; ha = vha->hw; sess = (struct qla_tgt_sess *)0; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if (tgt->tgt_stop != 0) { goto out_term; } else { } s_id[0] = prm->__annonCompField127.abts.fcp_hdr_le.s_id[2]; s_id[1] = prm->__annonCompField127.abts.fcp_hdr_le.s_id[1]; s_id[2] = prm->__annonCompField127.abts.fcp_hdr_le.s_id[0]; sess = (*((ha->tgt.tgt_ops)->find_sess_by_s_id))(vha, (uint8_t const *)(& be_s_id)); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { spin_unlock_irqrestore(& ha->hardware_lock, flags); mutex_lock_nested(& vha->vha_tgt.tgt_mutex, 0U); sess = qlt_make_local_sess(vha, (uint8_t *)(& s_id)); mutex_unlock(& vha->vha_tgt.tgt_mutex); tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { goto out_term; } else { } } else { kref_get___0(& (sess->se_sess)->sess_kref); } if (tgt->tgt_stop != 0) { goto out_term; } else { } rc = __qlt_24xx_handle_abts(vha, & prm->__annonCompField127.abts, sess); if (rc != 0) { goto out_term; } else { } (*((ha->tgt.tgt_ops)->put_sess))(sess); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; out_term: qlt_24xx_send_abts_resp(vha, & prm->__annonCompField127.abts, 4U, 0); if ((unsigned long )sess != (unsigned long )((struct qla_tgt_sess *)0)) { (*((ha->tgt.tgt_ops)->put_sess))(sess); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qlt_tmr_work(struct qla_tgt *tgt , struct qla_tgt_sess_work_param *prm ) { struct atio_from_isp *a ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; unsigned long flags ; uint8_t *s_id ; int rc ; uint32_t lun ; uint32_t unpacked_lun ; int lun_size ; int fn ; void *iocb ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; u64 tmp___1 ; { a = & prm->__annonCompField127.tm_iocb2; vha = tgt->vha; ha = vha->hw; sess = (struct qla_tgt_sess *)0; s_id = (uint8_t *)0U; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if (tgt->tgt_stop != 0) { goto out_term; } else { } s_id = (uint8_t *)(& prm->__annonCompField127.tm_iocb2.u.isp24.fcp_hdr.s_id); sess = (*((ha->tgt.tgt_ops)->find_sess_by_s_id))(vha, (uint8_t const *)s_id); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { spin_unlock_irqrestore(& ha->hardware_lock, flags); mutex_lock_nested(& vha->vha_tgt.tgt_mutex, 0U); sess = qlt_make_local_sess(vha, s_id); mutex_unlock(& vha->vha_tgt.tgt_mutex); tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { goto out_term; } else { } } else { kref_get___0(& (sess->se_sess)->sess_kref); } iocb = (void *)a; lun = (uint32_t )a->u.isp24.fcp_cmnd.lun; lun_size = 4; fn = (int )a->u.isp24.fcp_cmnd.task_mgmt_flags; tmp___1 = scsilun_to_int((struct scsi_lun *)(& lun)); unpacked_lun = (uint32_t )tmp___1; rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); if (rc != 0) { goto out_term; } else { } (*((ha->tgt.tgt_ops)->put_sess))(sess); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; out_term: qlt_send_term_exchange(vha, (struct qla_tgt_cmd *)0, & prm->__annonCompField127.tm_iocb2, 1); if ((unsigned long )sess != (unsigned long )((struct qla_tgt_sess *)0)) { (*((ha->tgt.tgt_ops)->put_sess))(sess); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qlt_sess_work_fn(struct work_struct *work ) { struct qla_tgt *tgt ; struct work_struct const *__mptr ; struct scsi_qla_host *vha ; unsigned long flags ; raw_spinlock_t *tmp ; struct qla_tgt_sess_work_param *prm ; struct list_head const *__mptr___0 ; long tmp___0 ; raw_spinlock_t *tmp___1 ; int tmp___2 ; { __mptr = (struct work_struct const *)work; tgt = (struct qla_tgt *)__mptr + 0xfffffffffffffe78UL; vha = tgt->vha; ql_dbg(8192U, vha, 61440, "Sess work (tgt %p)", tgt); tmp = spinlock_check(& tgt->sess_work_lock); flags = _raw_spin_lock_irqsave(tmp); goto ldv_67464; ldv_67463: __mptr___0 = (struct list_head const *)tgt->sess_works_list.next; prm = (struct qla_tgt_sess_work_param *)__mptr___0; list_del(& prm->sess_works_list_entry); spin_unlock_irqrestore(& tgt->sess_work_lock, flags); switch (prm->type) { case 1: qlt_abort_work(tgt, prm); goto ldv_67456; case 2: qlt_tmr_work(tgt, prm); goto ldv_67456; default: tmp___0 = ldv__builtin_expect(1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"), "i" (5123), "i" (12UL)); ldv_67459: ; goto ldv_67459; } else { } goto ldv_67456; } ldv_67456: tmp___1 = spinlock_check(& tgt->sess_work_lock); flags = _raw_spin_lock_irqsave(tmp___1); kfree((void const *)prm); ldv_67464: tmp___2 = list_empty((struct list_head const *)(& tgt->sess_works_list)); if (tmp___2 == 0) { goto ldv_67463; } else { } spin_unlock_irqrestore(& tgt->sess_work_lock, flags); return; } } int qlt_add_target(struct qla_hw_data *ha , struct scsi_qla_host *base_vha ) { struct qla_tgt *tgt ; long tmp ; void *tmp___0 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; atomic_long_t __constr_expr_0 ; struct lock_class_key __key___1 ; struct lock_class_key __key___2 ; struct lock_class_key __key___3 ; atomic_long_t __constr_expr_1 ; struct lock_class_key __key___4 ; struct lock_class_key __key___5 ; atomic_long_t __constr_expr_2 ; int _min1 ; int _min2 ; { if (ql2x_ini_mode == 2) { return (0); } else { } if ((unsigned int )ha->tgt.atio_q_length == 0U) { ql_log(1U, base_vha, 57456, "This adapter does not support target mode.\n"); return (0); } else { } ql_dbg(16384U, base_vha, 57403, "Registering target for host %ld(%p).\n", base_vha->host_no, ha); tmp = ldv__builtin_expect((unsigned long )base_vha->vha_tgt.qla_tgt != (unsigned long )((struct qla_tgt *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"), "i" (5151), "i" (12UL)); ldv_67471: ; goto ldv_67471; } else { } tmp___0 = kzalloc(856UL, 208U); tgt = (struct qla_tgt *)tmp___0; if ((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0)) { ql_dbg(16384U, base_vha, 57446, "Unable to allocate struct qla_tgt\n"); return (-12); } else { } if (((int )((base_vha->host)->hostt)->supported_mode & 2) == 0) { ((base_vha->host)->hostt)->supported_mode = (unsigned char )((unsigned int )((base_vha->host)->hostt)->supported_mode | 2U); } else { } tgt->ha = ha; tgt->vha = base_vha; __init_waitqueue_head(& tgt->waitQ, "&tgt->waitQ", & __key); INIT_LIST_HEAD(& tgt->sess_list); INIT_LIST_HEAD(& tgt->del_sess_list); __init_work(& tgt->sess_del_work.work, 0); __constr_expr_0.counter = 137438953408L; tgt->sess_del_work.work.data = __constr_expr_0; lockdep_init_map(& tgt->sess_del_work.work.lockdep_map, "(&(&tgt->sess_del_work)->work)", & __key___0, 0); INIT_LIST_HEAD(& tgt->sess_del_work.work.entry); tgt->sess_del_work.work.func = (void (*)(struct work_struct * ))(& qlt_del_sess_work_fn); init_timer_key(& tgt->sess_del_work.timer, 2097152U, "(&(&tgt->sess_del_work)->timer)", & __key___1); tgt->sess_del_work.timer.function = & delayed_work_timer_fn; tgt->sess_del_work.timer.data = (unsigned long )(& tgt->sess_del_work); spinlock_check(& tgt->sess_work_lock); __raw_spin_lock_init(& tgt->sess_work_lock.__annonCompField18.rlock, "&(&tgt->sess_work_lock)->rlock", & __key___2); __init_work(& tgt->sess_work, 0); __constr_expr_1.counter = 137438953408L; tgt->sess_work.data = __constr_expr_1; lockdep_init_map(& tgt->sess_work.lockdep_map, "(&tgt->sess_work)", & __key___3, 0); INIT_LIST_HEAD(& tgt->sess_work.entry); tgt->sess_work.func = & qlt_sess_work_fn; INIT_LIST_HEAD(& tgt->sess_works_list); spinlock_check(& tgt->srr_lock); __raw_spin_lock_init(& tgt->srr_lock.__annonCompField18.rlock, "&(&tgt->srr_lock)->rlock", & __key___4); INIT_LIST_HEAD(& tgt->srr_ctio_list); INIT_LIST_HEAD(& tgt->srr_imm_list); __init_work(& tgt->srr_work, 0); __constr_expr_2.counter = 137438953408L; tgt->srr_work.data = __constr_expr_2; lockdep_init_map(& tgt->srr_work.lockdep_map, "(&tgt->srr_work)", & __key___5, 0); INIT_LIST_HEAD(& tgt->srr_work.entry); tgt->srr_work.func = & qlt_handle_srr_work; atomic_set(& tgt->tgt_global_resets_count, 0); base_vha->vha_tgt.qla_tgt = tgt; ql_dbg(16384U, base_vha, 57447, "qla_target(%d): using 64 Bit PCI addressing", (int )base_vha->vp_idx); tgt->tgt_enable_64bit_addr = 1U; _min1 = 1270; _min2 = (int )(base_vha->req)->length + -3 > 0 ? (int )(base_vha->req)->length * 5 + -19 : 0; tgt->sg_tablesize = _min1 < _min2 ? _min1 : _min2; tgt->datasegs_per_cmd = 1; tgt->datasegs_per_cont = 5; if ((unsigned long )base_vha->fc_vport != (unsigned long )((struct fc_vport *)0)) { return (0); } else { } mutex_lock_nested(& qla_tgt_mutex, 0U); list_add_tail(& tgt->tgt_list_entry, & qla_tgt_glist); mutex_unlock(& qla_tgt_mutex); return (0); } } int qlt_remove_target(struct qla_hw_data *ha , struct scsi_qla_host *vha ) { { if ((unsigned long )vha->vha_tgt.qla_tgt == (unsigned long )((struct qla_tgt *)0)) { return (0); } else { } if ((unsigned long )vha->fc_vport != (unsigned long )((struct fc_vport *)0)) { qlt_release(vha->vha_tgt.qla_tgt); return (0); } else { } qlt_init_term_exchange(vha); mutex_lock_nested(& qla_tgt_mutex, 0U); list_del(& (vha->vha_tgt.qla_tgt)->tgt_list_entry); mutex_unlock(& qla_tgt_mutex); ql_dbg(16384U, vha, 57404, "Unregistering target for host %ld(%p)", vha->host_no, ha); qlt_release(vha->vha_tgt.qla_tgt); return (0); } } static void qlt_lport_dump(struct scsi_qla_host *vha , u64 wwpn , unsigned char *b ) { int i ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; struct _ddebug descriptor___2 ; long tmp___2 ; struct _ddebug descriptor___3 ; long tmp___3 ; struct _ddebug descriptor___4 ; long tmp___4 ; struct _ddebug descriptor___5 ; long tmp___5 ; struct _ddebug descriptor___6 ; long tmp___6 ; struct _ddebug descriptor___7 ; long tmp___7 ; { descriptor.modname = "qla2xxx"; descriptor.function = "qlt_lport_dump"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"; descriptor.format = "qla2xxx HW vha->node_name: "; descriptor.lineno = 5230U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "qla2xxx HW vha->node_name: "); } else { } i = 0; goto ldv_67500; ldv_67499: descriptor___0.modname = "qla2xxx"; descriptor___0.function = "qlt_lport_dump"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"; descriptor___0.format = "%02x "; descriptor___0.lineno = 5232U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor___0, "%02x ", (int )vha->node_name[i]); } else { } i = i + 1; ldv_67500: ; if (i <= 7) { goto ldv_67499; } else { } descriptor___1.modname = "qla2xxx"; descriptor___1.function = "qlt_lport_dump"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"; descriptor___1.format = "\n"; descriptor___1.lineno = 5233U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___1, "\n"); } else { } descriptor___2.modname = "qla2xxx"; descriptor___2.function = "qlt_lport_dump"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"; descriptor___2.format = "qla2xxx HW vha->port_name: "; descriptor___2.lineno = 5234U; descriptor___2.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor___2, "qla2xxx HW vha->port_name: "); } else { } i = 0; goto ldv_67506; ldv_67505: descriptor___3.modname = "qla2xxx"; descriptor___3.function = "qlt_lport_dump"; descriptor___3.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"; descriptor___3.format = "%02x "; descriptor___3.lineno = 5236U; descriptor___3.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_pr_debug(& descriptor___3, "%02x ", (int )vha->port_name[i]); } else { } i = i + 1; ldv_67506: ; if (i <= 7) { goto ldv_67505; } else { } descriptor___4.modname = "qla2xxx"; descriptor___4.function = "qlt_lport_dump"; descriptor___4.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"; descriptor___4.format = "\n"; descriptor___4.lineno = 5237U; descriptor___4.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___4.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_pr_debug(& descriptor___4, "\n"); } else { } descriptor___5.modname = "qla2xxx"; descriptor___5.function = "qlt_lport_dump"; descriptor___5.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"; descriptor___5.format = "qla2xxx passed configfs WWPN: "; descriptor___5.lineno = 5239U; descriptor___5.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___5.flags & 1L, 0L); if (tmp___5 != 0L) { __dynamic_pr_debug(& descriptor___5, "qla2xxx passed configfs WWPN: "); } else { } put_unaligned_be64(wwpn, (void *)b); i = 0; goto ldv_67512; ldv_67511: descriptor___6.modname = "qla2xxx"; descriptor___6.function = "qlt_lport_dump"; descriptor___6.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"; descriptor___6.format = "%02x "; descriptor___6.lineno = 5242U; descriptor___6.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___6.flags & 1L, 0L); if (tmp___6 != 0L) { __dynamic_pr_debug(& descriptor___6, "%02x ", (int )*(b + (unsigned long )i)); } else { } i = i + 1; ldv_67512: ; if (i <= 7) { goto ldv_67511; } else { } descriptor___7.modname = "qla2xxx"; descriptor___7.function = "qlt_lport_dump"; descriptor___7.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"; descriptor___7.format = "\n"; descriptor___7.lineno = 5243U; descriptor___7.flags = 0U; tmp___7 = ldv__builtin_expect((long )descriptor___7.flags & 1L, 0L); if (tmp___7 != 0L) { __dynamic_pr_debug(& descriptor___7, "\n"); } else { } return; } } int qlt_lport_register(void *target_lport_ptr , u64 phys_wwpn , u64 npiv_wwpn , u64 npiv_wwnn , int (*callback)(struct scsi_qla_host * , void * , u64 , u64 ) ) { struct qla_tgt *tgt ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct Scsi_Host *host ; unsigned long flags ; int rc ; u8 b[8U] ; struct list_head const *__mptr ; raw_spinlock_t *tmp ; struct _ddebug descriptor ; long tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; struct Scsi_Host *tmp___2 ; int tmp___3 ; struct list_head const *__mptr___0 ; { mutex_lock_nested(& qla_tgt_mutex, 0U); __mptr = (struct list_head const *)qla_tgt_glist.next; tgt = (struct qla_tgt *)__mptr + 0xfffffffffffffcb8UL; goto ldv_67545; ldv_67544: vha = tgt->vha; ha = vha->hw; host = vha->host; if ((unsigned long )host == (unsigned long )((struct Scsi_Host *)0)) { goto ldv_67537; } else { } if (((int )(host->hostt)->supported_mode & 2) == 0) { goto ldv_67537; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if ((npiv_wwpn == 0ULL || npiv_wwnn == 0ULL) && ((int )host->active_mode & 2) != 0) { descriptor.modname = "qla2xxx"; descriptor.function = "qlt_lport_register"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"; descriptor.format = "MODE_TARGET already active on qla2xxx(%d)\n"; descriptor.lineno = 5281U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "MODE_TARGET already active on qla2xxx(%d)\n", host->host_no); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); goto ldv_67537; } else { } if (tgt->tgt_stop != 0) { descriptor___0.modname = "qla2xxx"; descriptor___0.function = "qlt_lport_register"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/6010/dscv_tempdir/dscv/ri/08_1a/drivers/scsi/qla2xxx/qla_target.c"; descriptor___0.format = "MODE_TARGET in shutdown on qla2xxx(%d)\n"; descriptor___0.lineno = 5287U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___0, "MODE_TARGET in shutdown on qla2xxx(%d)\n", host->host_no); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); goto ldv_67537; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___2 = scsi_host_get(host); if ((unsigned long )tmp___2 == (unsigned long )((struct Scsi_Host *)0)) { ql_dbg(16384U, vha, 57448, "Unable to scsi_host_get() for qla2xxx scsi_host\n"); goto ldv_67537; } else { } qlt_lport_dump(vha, phys_wwpn, (unsigned char *)(& b)); tmp___3 = memcmp((void const *)(& vha->port_name), (void const *)(& b), 8UL); if (tmp___3 != 0) { scsi_host_put(host); goto ldv_67537; } else { } rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); if (rc != 0) { scsi_host_put(host); } else { } mutex_unlock(& qla_tgt_mutex); return (rc); ldv_67537: __mptr___0 = (struct list_head const *)tgt->tgt_list_entry.next; tgt = (struct qla_tgt *)__mptr___0 + 0xfffffffffffffcb8UL; ldv_67545: ; if ((unsigned long )(& tgt->tgt_list_entry) != (unsigned long )(& qla_tgt_glist)) { goto ldv_67544; } else { } mutex_unlock(& qla_tgt_mutex); return (-19); } } static char const __kstrtab_qlt_lport_register[19U] = { 'q', 'l', 't', '_', 'l', 'p', 'o', 'r', 't', '_', 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '\000'}; struct kernel_symbol const __ksymtab_qlt_lport_register ; struct kernel_symbol const __ksymtab_qlt_lport_register = {(unsigned long )(& qlt_lport_register), (char const *)(& __kstrtab_qlt_lport_register)}; void qlt_lport_deregister(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct Scsi_Host *sh ; { ha = vha->hw; sh = vha->host; vha->vha_tgt.target_lport_ptr = (void *)0; ha->tgt.tgt_ops = (struct qla_tgt_func_tmpl *)0; scsi_host_put(sh); return; } } static char const __kstrtab_qlt_lport_deregister[21U] = { 'q', 'l', 't', '_', 'l', 'p', 'o', 'r', 't', '_', 'd', 'e', 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '\000'}; struct kernel_symbol const __ksymtab_qlt_lport_deregister ; struct kernel_symbol const __ksymtab_qlt_lport_deregister = {(unsigned long )(& qlt_lport_deregister), (char const *)(& __kstrtab_qlt_lport_deregister)}; static void qlt_set_mode(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; switch (ql2x_ini_mode) { case 1: ; case 0: (vha->host)->active_mode = 2U; goto ldv_67576; case 2: (vha->host)->active_mode = (unsigned char )((unsigned int )(vha->host)->active_mode | 2U); goto ldv_67576; default: ; goto ldv_67576; } ldv_67576: ; if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { qla_reverse_ini_mode(vha); } else { } return; } } static void qlt_clear_mode(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; switch (ql2x_ini_mode) { case 1: (vha->host)->active_mode = 0U; goto ldv_67584; case 0: (vha->host)->active_mode = 1U; goto ldv_67584; case 2: (vha->host)->active_mode = (unsigned int )(vha->host)->active_mode & 1U; goto ldv_67584; default: ; goto ldv_67584; } ldv_67584: ; if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { qla_reverse_ini_mode(vha); } else { } return; } } void qlt_enable_vha(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; unsigned long flags ; scsi_qla_host_t *base_vha ; void *tmp ; raw_spinlock_t *tmp___0 ; { ha = vha->hw; tgt = vha->vha_tgt.qla_tgt; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; if ((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0)) { ql_dbg(16384U, vha, 57449, "Unable to locate qla_tgt pointer from struct qla_hw_data\n"); dump_stack(); return; } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); tgt->tgt_stopped = 0; qlt_set_mode(vha); spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((unsigned int )vha->vp_idx != 0U) { qla24xx_disable_vp(vha); qla24xx_enable_vp(vha); } else { set_bit(2L, (unsigned long volatile *)(& base_vha->dpc_flags)); qla2xxx_wake_dpc(base_vha); qla2x00_wait_for_hba_online(base_vha); } return; } } static char const __kstrtab_qlt_enable_vha[15U] = { 'q', 'l', 't', '_', 'e', 'n', 'a', 'b', 'l', 'e', '_', 'v', 'h', 'a', '\000'}; struct kernel_symbol const __ksymtab_qlt_enable_vha ; struct kernel_symbol const __ksymtab_qlt_enable_vha = {(unsigned long )(& qlt_enable_vha), (char const *)(& __kstrtab_qlt_enable_vha)}; static void qlt_disable_vha(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; unsigned long flags ; raw_spinlock_t *tmp ; { ha = vha->hw; tgt = vha->vha_tgt.qla_tgt; if ((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0)) { ql_dbg(16384U, vha, 57450, "Unable to locate qla_tgt pointer from struct qla_hw_data\n"); dump_stack(); return; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); qlt_clear_mode(vha); spin_unlock_irqrestore(& ha->hardware_lock, flags); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); qla2x00_wait_for_hba_online(vha); return; } } void qlt_vport_create(struct scsi_qla_host *vha , struct qla_hw_data *ha ) { bool tmp ; int tmp___0 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; { tmp = qla_tgt_mode_enabled(vha); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } vha->vha_tgt.qla_tgt = (struct qla_tgt *)0; __mutex_init(& vha->vha_tgt.tgt_mutex, "&vha->vha_tgt.tgt_mutex", & __key); __mutex_init(& vha->vha_tgt.tgt_host_action_mutex, "&vha->vha_tgt.tgt_host_action_mutex", & __key___0); qlt_clear_mode(vha); ha->tgt.atio_q_length = 4096U; qlt_add_target(ha, vha); return; } } void qlt_rff_id(struct scsi_qla_host *vha , struct ct_sns_req *ct_req ) { bool tmp ; bool tmp___0 ; bool tmp___1 ; { tmp___1 = qla_tgt_mode_enabled(vha); if ((int )tmp___1) { tmp = qla_ini_mode_enabled(vha); if ((int )tmp) { ct_req->req.rff_id.fc4_feature = 3U; } else { ct_req->req.rff_id.fc4_feature = 1U; } } else { tmp___0 = qla_ini_mode_enabled(vha); if ((int )tmp___0) { ct_req->req.rff_id.fc4_feature = 2U; } else { } } return; } } void qlt_init_atio_q_entries(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; uint16_t cnt ; struct atio_from_isp *pkt ; bool tmp ; int tmp___0 ; { ha = vha->hw; pkt = (struct atio_from_isp *)ha->tgt.atio_ring; tmp = qla_tgt_mode_enabled(vha); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } cnt = 0U; goto ldv_67631; ldv_67630: pkt->u.raw.signature = 3735936685U; pkt = pkt + 1; cnt = (uint16_t )((int )cnt + 1); ldv_67631: ; if ((int )ha->tgt.atio_q_length > (int )cnt) { goto ldv_67630; } else { } return; } } void qlt_24xx_process_atio_queue(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct atio_from_isp *pkt ; int cnt ; int i ; { ha = vha->hw; if (*((unsigned long *)vha + 19UL) == 0UL) { return; } else { } goto ldv_67644; ldv_67643: pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; cnt = (int )pkt->u.raw.entry_count; qlt_24xx_atio_pkt_all_vps(vha, pkt); i = 0; goto ldv_67641; ldv_67640: ha->tgt.atio_ring_index = (uint16_t )((int )ha->tgt.atio_ring_index + 1); if ((int )ha->tgt.atio_ring_index == (int )ha->tgt.atio_q_length) { ha->tgt.atio_ring_index = 0U; ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; } else { ha->tgt.atio_ring_ptr = ha->tgt.atio_ring_ptr + 1; } pkt->u.raw.signature = 3735936685U; pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; i = i + 1; ldv_67641: ; if (i < cnt) { goto ldv_67640; } else { } __asm__ volatile ("sfence": : : "memory"); ldv_67644: ; if ((ha->tgt.atio_ring_ptr)->signature != 3735936685U) { goto ldv_67643; } else { } writel((unsigned int )ha->tgt.atio_ring_index, (void volatile *)(vha->hw)->tgt.atio_q_out); return; } } void qlt_24xx_config_rings(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct qla_msix_entry *msix ; struct init_cb_24xx *icb ; { ha = vha->hw; if (ql2x_ini_mode == 2) { return; } else { } writel(0U, (void volatile *)(vha->hw)->tgt.atio_q_in); writel(0U, (void volatile *)(vha->hw)->tgt.atio_q_out); readl((void const volatile *)(vha->hw)->tgt.atio_q_out); if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { msix = ha->msix_entries + 2UL; icb = (struct init_cb_24xx *)ha->init_cb; icb->msix_atio = msix->entry; ql_dbg(1073741824U, vha, 61554, "Registering ICB vector 0x%x for atio que.\n", (int )msix->entry); } else { } return; } } void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha , struct nvram_24xx *nv ) { struct qla_hw_data *ha ; bool tmp ; int tmp___0 ; bool tmp___1 ; { ha = vha->hw; tmp___1 = qla_tgt_mode_enabled(vha); if ((int )tmp___1) { if (ha->tgt.saved_set == 0) { ha->tgt.saved_exchange_count = nv->exchange_count; ha->tgt.saved_firmware_options_1 = nv->firmware_options_1; ha->tgt.saved_firmware_options_2 = nv->firmware_options_2; ha->tgt.saved_firmware_options_3 = nv->firmware_options_3; ha->tgt.saved_set = 1; } else { } nv->exchange_count = 65535U; nv->firmware_options_1 = nv->firmware_options_1 | 16U; tmp = qla_ini_mode_enabled(vha); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { nv->firmware_options_1 = nv->firmware_options_1 | 32U; } else { } nv->firmware_options_1 = nv->firmware_options_1 & 4294959103U; nv->firmware_options_1 = nv->firmware_options_1 & 4294966783U; if (ql2xtgt_tape_enable != 0) { nv->firmware_options_2 = nv->firmware_options_2 | 4096U; } else { nv->firmware_options_2 = nv->firmware_options_2 & 4294963199U; } nv->host_p = nv->host_p & 4294966271U; nv->firmware_options_2 = nv->firmware_options_2 | 16384U; } else { if (ha->tgt.saved_set != 0) { nv->exchange_count = ha->tgt.saved_exchange_count; nv->firmware_options_1 = ha->tgt.saved_firmware_options_1; nv->firmware_options_2 = ha->tgt.saved_firmware_options_2; nv->firmware_options_3 = ha->tgt.saved_firmware_options_3; } else { } return; } nv->firmware_options_3 = nv->firmware_options_3 | 576U; if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { if (*((unsigned long *)vha + 19UL) != 0UL) { ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_classes = 12U; } else { } nv->firmware_options_2 = nv->firmware_options_2 | 256U; } else { if (*((unsigned long *)vha + 19UL) != 0UL) { ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_classes = 8U; } else { } nv->firmware_options_2 = nv->firmware_options_2 & 4294967039U; } return; } } void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha , struct init_cb_24xx *icb ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { memcpy((void *)(& icb->node_name), (void const *)(& ha->tgt.tgt_node_name), 8UL); icb->firmware_options_1 = icb->firmware_options_1 | 16384U; } else { } return; } } void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha , struct nvram_81xx *nv ) { struct qla_hw_data *ha ; bool tmp ; int tmp___0 ; bool tmp___1 ; { ha = vha->hw; if (ql2x_ini_mode == 2) { return; } else { } tmp___1 = qla_tgt_mode_enabled(vha); if ((int )tmp___1) { if (ha->tgt.saved_set == 0) { ha->tgt.saved_exchange_count = nv->exchange_count; ha->tgt.saved_firmware_options_1 = nv->firmware_options_1; ha->tgt.saved_firmware_options_2 = nv->firmware_options_2; ha->tgt.saved_firmware_options_3 = nv->firmware_options_3; ha->tgt.saved_set = 1; } else { } nv->exchange_count = 65535U; nv->firmware_options_1 = nv->firmware_options_1 | 16U; tmp = qla_ini_mode_enabled(vha); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { nv->firmware_options_1 = nv->firmware_options_1 | 32U; } else { } nv->firmware_options_1 = nv->firmware_options_1 & 4294959103U; nv->firmware_options_1 = nv->firmware_options_1 & 4294966783U; if (ql2xtgt_tape_enable != 0) { nv->firmware_options_2 = nv->firmware_options_2 | 4096U; } else { nv->firmware_options_2 = nv->firmware_options_2 & 4294963199U; } nv->host_p = nv->host_p & 4294966271U; nv->firmware_options_2 = nv->firmware_options_2 | 16384U; } else { if (ha->tgt.saved_set != 0) { nv->exchange_count = ha->tgt.saved_exchange_count; nv->firmware_options_1 = ha->tgt.saved_firmware_options_1; nv->firmware_options_2 = ha->tgt.saved_firmware_options_2; nv->firmware_options_3 = ha->tgt.saved_firmware_options_3; } else { } return; } nv->firmware_options_3 = nv->firmware_options_3 | 576U; if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { if (*((unsigned long *)vha + 19UL) != 0UL) { ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_classes = 12U; } else { } nv->firmware_options_2 = nv->firmware_options_2 | 256U; } else { if (*((unsigned long *)vha + 19UL) != 0UL) { ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_classes = 8U; } else { } nv->firmware_options_2 = nv->firmware_options_2 & 4294967039U; } return; } } void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha , struct init_cb_81xx *icb ) { struct qla_hw_data *ha ; { ha = vha->hw; if (ql2x_ini_mode == 2) { return; } else { } if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { memcpy((void *)(& icb->node_name), (void const *)(& ha->tgt.tgt_node_name), 8UL); icb->firmware_options_1 = icb->firmware_options_1 | 16384U; } else { } return; } } void qlt_83xx_iospace_config(struct qla_hw_data *ha ) { { if (ql2x_ini_mode == 2) { return; } else { } ha->msix_count = (unsigned int )ha->msix_count + 1U; return; } } int qlt_24xx_process_response_error(struct scsi_qla_host *vha , struct sts_entry_24xx *pkt ) { { switch ((int )pkt->entry_type) { case 84: ; case 85: ; case 18: ; case 14: ; case 122: ; return (1); default: ; return (0); } } } void qlt_modify_vp_config(struct scsi_qla_host *vha , struct vp_config_entry_24xx *vpmod ) { bool tmp ; bool tmp___0 ; int tmp___1 ; { tmp = qla_tgt_mode_enabled(vha); if ((int )tmp) { vpmod->options_idx1 = (unsigned int )vpmod->options_idx1 & 223U; } else { } tmp___0 = qla_ini_mode_enabled(vha); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { vpmod->options_idx1 = (unsigned int )vpmod->options_idx1 & 239U; } else { } return; } } void qlt_probe_one_stage1(struct scsi_qla_host *base_vha , struct qla_hw_data *ha ) { struct lock_class_key __key ; struct lock_class_key __key___0 ; { if (ql2x_ini_mode == 2) { return; } else { } if ((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { (base_vha->hw)->tgt.atio_q_in = & (ha->mqiobase)->isp25mq.atio_q_in; (base_vha->hw)->tgt.atio_q_out = & (ha->mqiobase)->isp25mq.atio_q_out; } else { (base_vha->hw)->tgt.atio_q_in = & (ha->iobase)->isp24.atio_q_in; (base_vha->hw)->tgt.atio_q_out = & (ha->iobase)->isp24.atio_q_out; } __mutex_init(& base_vha->vha_tgt.tgt_mutex, "&base_vha->vha_tgt.tgt_mutex", & __key); __mutex_init(& base_vha->vha_tgt.tgt_host_action_mutex, "&base_vha->vha_tgt.tgt_host_action_mutex", & __key___0); qlt_clear_mode(base_vha); return; } } irqreturn_t qla83xx_msix_atio_q(int irq , void *dev_id ) { struct rsp_que *rsp ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; unsigned long flags ; void *tmp ; raw_spinlock_t *tmp___0 ; { rsp = (struct rsp_que *)dev_id; ha = rsp->hw; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); qlt_24xx_process_atio_queue(vha); qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } int qlt_mem_alloc(struct qla_hw_data *ha ) { void *tmp ; void *tmp___0 ; { if (ql2x_ini_mode == 2) { return (0); } else { } tmp = kzalloc(4096UL, 208U); ha->tgt.tgt_vp_map = (struct qla_tgt_vp_map *)tmp; if ((unsigned long )ha->tgt.tgt_vp_map == (unsigned long )((struct qla_tgt_vp_map *)0)) { return (-12); } else { } tmp___0 = dma_alloc_attrs(& (ha->pdev)->dev, (unsigned long )((int )ha->tgt.atio_q_length + 1) * 64UL, & ha->tgt.atio_dma, 208U, (struct dma_attrs *)0); ha->tgt.atio_ring = (struct atio *)tmp___0; if ((unsigned long )ha->tgt.atio_ring == (unsigned long )((struct atio *)0)) { kfree((void const *)ha->tgt.tgt_vp_map); return (-12); } else { } return (0); } } void qlt_mem_free(struct qla_hw_data *ha ) { { if (ql2x_ini_mode == 2) { return; } else { } if ((unsigned long )ha->tgt.atio_ring != (unsigned long )((struct atio *)0)) { dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )ha->tgt.atio_q_length + 1) * 64UL, (void *)ha->tgt.atio_ring, ha->tgt.atio_dma, (struct dma_attrs *)0); } else { } kfree((void const *)ha->tgt.tgt_vp_map); return; } } void qlt_update_vp_map(struct scsi_qla_host *vha , int cmd ) { { if (ql2x_ini_mode == 2) { return; } else { } switch (cmd) { case 1: ((vha->hw)->tgt.tgt_vp_map + (unsigned long )vha->vp_idx)->vha = vha; goto ldv_67717; case 2: ((vha->hw)->tgt.tgt_vp_map + (unsigned long )vha->d_id.b.al_pa)->idx = (uint8_t )vha->vp_idx; goto ldv_67717; case 3: ((vha->hw)->tgt.tgt_vp_map + (unsigned long )vha->vp_idx)->vha = (scsi_qla_host_t *)0; goto ldv_67717; case 4: ((vha->hw)->tgt.tgt_vp_map + (unsigned long )vha->d_id.b.al_pa)->idx = 0U; goto ldv_67717; } ldv_67717: ; return; } } static int qlt_parse_ini_mode(void) { int tmp ; int tmp___0 ; int tmp___1 ; { tmp___1 = strcasecmp((char const *)qlini_mode, "exclusive"); if (tmp___1 == 0) { ql2x_ini_mode = 0; } else { tmp___0 = strcasecmp((char const *)qlini_mode, "disabled"); if (tmp___0 == 0) { ql2x_ini_mode = 1; } else { tmp = strcasecmp((char const *)qlini_mode, "enabled"); if (tmp == 0) { ql2x_ini_mode = 2; } else { return (0); } } } return (1); } } int qlt_init(void) { int ret ; int tmp ; struct lock_class_key __key ; char const *__lock_name ; struct workqueue_struct *tmp___0 ; { tmp = qlt_parse_ini_mode(); if (tmp == 0) { ql_log(0U, (scsi_qla_host_t *)0, 57451, "qlt_parse_ini_mode() failed\n"); return (-22); } else { } if (ql2x_ini_mode == 2) { return (0); } else { } qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 1000UL, 8UL, 0UL, (void (*)(void * ))0); if ((unsigned long )qla_tgt_mgmt_cmd_cachep == (unsigned long )((struct kmem_cache *)0)) { ql_log(0U, (scsi_qla_host_t *)0, 57453, "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); return (-12); } else { } qla_tgt_mgmt_cmd_mempool = mempool_create(25, & mempool_alloc_slab, & mempool_free_slab, (void *)qla_tgt_mgmt_cmd_cachep); if ((unsigned long )qla_tgt_mgmt_cmd_mempool == (unsigned long )((mempool_t *)0)) { ql_log(0U, (scsi_qla_host_t *)0, 57454, "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); ret = -12; goto out_mgmt_cmd_cachep; } else { } __lock_name = "\"qla_tgt_wq\""; tmp___0 = __alloc_workqueue_key("qla_tgt_wq", 0U, 0, & __key, __lock_name); qla_tgt_wq = tmp___0; if ((unsigned long )qla_tgt_wq == (unsigned long )((struct workqueue_struct *)0)) { ql_log(0U, (scsi_qla_host_t *)0, 57455, "alloc_workqueue for qla_tgt_wq failed\n"); ret = -12; goto out_cmd_mempool; } else { } return (ql2x_ini_mode == 1); out_cmd_mempool: mempool_destroy(qla_tgt_mgmt_cmd_mempool); out_mgmt_cmd_cachep: kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); return (ret); } } void qlt_exit(void) { { if (ql2x_ini_mode == 2) { return; } else { } ldv_destroy_workqueue_302(qla_tgt_wq); mempool_destroy(qla_tgt_mgmt_cmd_mempool); kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); return; } } void call_and_disable_work_10(struct work_struct *work ) { { if ((ldv_work_10_0 == 2 || ldv_work_10_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_10_0) { ldv__builtin_trap(); ldv_work_10_0 = 1; return; } else { } if ((ldv_work_10_1 == 2 || ldv_work_10_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_10_1) { ldv__builtin_trap(); ldv_work_10_1 = 1; return; } else { } if ((ldv_work_10_2 == 2 || ldv_work_10_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_10_2) { ldv__builtin_trap(); ldv_work_10_2 = 1; return; } else { } if ((ldv_work_10_3 == 2 || ldv_work_10_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_10_3) { ldv__builtin_trap(); ldv_work_10_3 = 1; return; } else { } return; } } void work_init_9(void) { { ldv_work_9_0 = 0; ldv_work_9_1 = 0; ldv_work_9_2 = 0; ldv_work_9_3 = 0; return; } } void invoke_work_8(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_8_0 == 2 || ldv_work_8_0 == 3) { ldv_work_8_0 = 4; qlt_create_sess_from_atio(ldv_work_struct_8_0); ldv_work_8_0 = 1; } else { } goto ldv_67754; case 1: ; if (ldv_work_8_1 == 2 || ldv_work_8_1 == 3) { ldv_work_8_1 = 4; qlt_create_sess_from_atio(ldv_work_struct_8_0); ldv_work_8_1 = 1; } else { } goto ldv_67754; case 2: ; if (ldv_work_8_2 == 2 || ldv_work_8_2 == 3) { ldv_work_8_2 = 4; qlt_create_sess_from_atio(ldv_work_struct_8_0); ldv_work_8_2 = 1; } else { } goto ldv_67754; case 3: ; if (ldv_work_8_3 == 2 || ldv_work_8_3 == 3) { ldv_work_8_3 = 4; qlt_create_sess_from_atio(ldv_work_struct_8_0); ldv_work_8_3 = 1; } else { } goto ldv_67754; default: ldv_stop(); } ldv_67754: ; return; } } void disable_work_7(struct work_struct *work ) { { if ((ldv_work_7_0 == 3 || ldv_work_7_0 == 2) && (unsigned long )ldv_work_struct_7_0 == (unsigned long )work) { ldv_work_7_0 = 1; } else { } if ((ldv_work_7_1 == 3 || ldv_work_7_1 == 2) && (unsigned long )ldv_work_struct_7_1 == (unsigned long )work) { ldv_work_7_1 = 1; } else { } if ((ldv_work_7_2 == 3 || ldv_work_7_2 == 2) && (unsigned long )ldv_work_struct_7_2 == (unsigned long )work) { ldv_work_7_2 = 1; } else { } if ((ldv_work_7_3 == 3 || ldv_work_7_3 == 2) && (unsigned long )ldv_work_struct_7_3 == (unsigned long )work) { ldv_work_7_3 = 1; } else { } return; } } void activate_pending_timer_29(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_29 == (unsigned long )timer) { if (ldv_timer_state_29 == 2 || pending_flag != 0) { ldv_timer_list_29 = timer; ldv_timer_list_29->data = data; ldv_timer_state_29 = 1; } else { } return; } else { } reg_timer_29(timer); ldv_timer_list_29->data = data; return; } } void invoke_work_10(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_10_0 == 2 || ldv_work_10_0 == 3) { ldv_work_10_0 = 4; ldv__builtin_trap(); ldv_work_10_0 = 1; } else { } goto ldv_67774; case 1: ; if (ldv_work_10_1 == 2 || ldv_work_10_1 == 3) { ldv_work_10_1 = 4; ldv__builtin_trap(); ldv_work_10_1 = 1; } else { } goto ldv_67774; case 2: ; if (ldv_work_10_2 == 2 || ldv_work_10_2 == 3) { ldv_work_10_2 = 4; ldv__builtin_trap(); ldv_work_10_2 = 1; } else { } goto ldv_67774; case 3: ; if (ldv_work_10_3 == 2 || ldv_work_10_3 == 3) { ldv_work_10_3 = 4; ldv__builtin_trap(); ldv_work_10_3 = 1; } else { } goto ldv_67774; default: ldv_stop(); } ldv_67774: ; return; } } void call_and_disable_all_11(int state ) { { if (ldv_work_11_0 == state) { call_and_disable_work_11(ldv_work_struct_11_0); } else { } if (ldv_work_11_1 == state) { call_and_disable_work_11(ldv_work_struct_11_1); } else { } if (ldv_work_11_2 == state) { call_and_disable_work_11(ldv_work_struct_11_2); } else { } if (ldv_work_11_3 == state) { call_and_disable_work_11(ldv_work_struct_11_3); } else { } return; } } int reg_timer_29(struct timer_list *timer ) { { ldv_timer_list_29 = timer; ldv_timer_state_29 = 1; return (0); } } void call_and_disable_work_7(struct work_struct *work ) { { if ((ldv_work_7_0 == 2 || ldv_work_7_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_7_0) { qlt_free_session_done(work); ldv_work_7_0 = 1; return; } else { } if ((ldv_work_7_1 == 2 || ldv_work_7_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_7_1) { qlt_free_session_done(work); ldv_work_7_1 = 1; return; } else { } if ((ldv_work_7_2 == 2 || ldv_work_7_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_7_2) { qlt_free_session_done(work); ldv_work_7_2 = 1; return; } else { } if ((ldv_work_7_3 == 2 || ldv_work_7_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_7_3) { qlt_free_session_done(work); ldv_work_7_3 = 1; return; } else { } return; } } void work_init_8(void) { { ldv_work_8_0 = 0; ldv_work_8_1 = 0; ldv_work_8_2 = 0; ldv_work_8_3 = 0; return; } } void call_and_disable_all_9(int state ) { { if (ldv_work_9_0 == state) { call_and_disable_work_9(ldv_work_struct_9_0); } else { } if (ldv_work_9_1 == state) { call_and_disable_work_9(ldv_work_struct_9_1); } else { } if (ldv_work_9_2 == state) { call_and_disable_work_9(ldv_work_struct_9_2); } else { } if (ldv_work_9_3 == state) { call_and_disable_work_9(ldv_work_struct_9_3); } else { } return; } } void call_and_disable_all_12(int state ) { { if (ldv_work_12_0 == state) { call_and_disable_work_12(ldv_work_struct_12_0); } else { } if (ldv_work_12_1 == state) { call_and_disable_work_12(ldv_work_struct_12_1); } else { } if (ldv_work_12_2 == state) { call_and_disable_work_12(ldv_work_struct_12_2); } else { } if (ldv_work_12_3 == state) { call_and_disable_work_12(ldv_work_struct_12_3); } else { } return; } } void work_init_10(void) { { ldv_work_10_0 = 0; ldv_work_10_1 = 0; ldv_work_10_2 = 0; ldv_work_10_3 = 0; return; } } void call_and_disable_work_8(struct work_struct *work ) { { if ((ldv_work_8_0 == 2 || ldv_work_8_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_8_0) { qlt_create_sess_from_atio(work); ldv_work_8_0 = 1; return; } else { } if ((ldv_work_8_1 == 2 || ldv_work_8_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_8_1) { qlt_create_sess_from_atio(work); ldv_work_8_1 = 1; return; } else { } if ((ldv_work_8_2 == 2 || ldv_work_8_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_8_2) { qlt_create_sess_from_atio(work); ldv_work_8_2 = 1; return; } else { } if ((ldv_work_8_3 == 2 || ldv_work_8_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_8_3) { qlt_create_sess_from_atio(work); ldv_work_8_3 = 1; return; } else { } return; } } void invoke_work_11(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_11_0 == 2 || ldv_work_11_0 == 3) { ldv_work_11_0 = 4; qlt_sess_work_fn(ldv_work_struct_11_0); ldv_work_11_0 = 1; } else { } goto ldv_67816; case 1: ; if (ldv_work_11_1 == 2 || ldv_work_11_1 == 3) { ldv_work_11_1 = 4; qlt_sess_work_fn(ldv_work_struct_11_0); ldv_work_11_1 = 1; } else { } goto ldv_67816; case 2: ; if (ldv_work_11_2 == 2 || ldv_work_11_2 == 3) { ldv_work_11_2 = 4; qlt_sess_work_fn(ldv_work_struct_11_0); ldv_work_11_2 = 1; } else { } goto ldv_67816; case 3: ; if (ldv_work_11_3 == 2 || ldv_work_11_3 == 3) { ldv_work_11_3 = 4; qlt_sess_work_fn(ldv_work_struct_11_0); ldv_work_11_3 = 1; } else { } goto ldv_67816; default: ldv_stop(); } ldv_67816: ; return; } } void invoke_work_9(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_9_0 == 2 || ldv_work_9_0 == 3) { ldv_work_9_0 = 4; qlt_do_work(ldv_work_struct_9_0); ldv_work_9_0 = 1; } else { } goto ldv_67827; case 1: ; if (ldv_work_9_1 == 2 || ldv_work_9_1 == 3) { ldv_work_9_1 = 4; qlt_do_work(ldv_work_struct_9_0); ldv_work_9_1 = 1; } else { } goto ldv_67827; case 2: ; if (ldv_work_9_2 == 2 || ldv_work_9_2 == 3) { ldv_work_9_2 = 4; qlt_do_work(ldv_work_struct_9_0); ldv_work_9_2 = 1; } else { } goto ldv_67827; case 3: ; if (ldv_work_9_3 == 2 || ldv_work_9_3 == 3) { ldv_work_9_3 = 4; qlt_do_work(ldv_work_struct_9_0); ldv_work_9_3 = 1; } else { } goto ldv_67827; default: ldv_stop(); } ldv_67827: ; return; } } void disable_work_8(struct work_struct *work ) { { if ((ldv_work_8_0 == 3 || ldv_work_8_0 == 2) && (unsigned long )ldv_work_struct_8_0 == (unsigned long )work) { ldv_work_8_0 = 1; } else { } if ((ldv_work_8_1 == 3 || ldv_work_8_1 == 2) && (unsigned long )ldv_work_struct_8_1 == (unsigned long )work) { ldv_work_8_1 = 1; } else { } if ((ldv_work_8_2 == 3 || ldv_work_8_2 == 2) && (unsigned long )ldv_work_struct_8_2 == (unsigned long )work) { ldv_work_8_2 = 1; } else { } if ((ldv_work_8_3 == 3 || ldv_work_8_3 == 2) && (unsigned long )ldv_work_struct_8_3 == (unsigned long )work) { ldv_work_8_3 = 1; } else { } return; } } void activate_work_9(struct work_struct *work , int state ) { { if (ldv_work_9_0 == 0) { ldv_work_struct_9_0 = work; ldv_work_9_0 = state; return; } else { } if (ldv_work_9_1 == 0) { ldv_work_struct_9_1 = work; ldv_work_9_1 = state; return; } else { } if (ldv_work_9_2 == 0) { ldv_work_struct_9_2 = work; ldv_work_9_2 = state; return; } else { } if (ldv_work_9_3 == 0) { ldv_work_struct_9_3 = work; ldv_work_9_3 = state; return; } else { } return; } } void call_and_disable_all_7(int state ) { { if (ldv_work_7_0 == state) { call_and_disable_work_7(ldv_work_struct_7_0); } else { } if (ldv_work_7_1 == state) { call_and_disable_work_7(ldv_work_struct_7_1); } else { } if (ldv_work_7_2 == state) { call_and_disable_work_7(ldv_work_struct_7_2); } else { } if (ldv_work_7_3 == state) { call_and_disable_work_7(ldv_work_struct_7_3); } else { } return; } } void invoke_work_12(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_12_0 == 2 || ldv_work_12_0 == 3) { ldv_work_12_0 = 4; qlt_handle_srr_work(ldv_work_struct_12_0); ldv_work_12_0 = 1; } else { } goto ldv_67848; case 1: ; if (ldv_work_12_1 == 2 || ldv_work_12_1 == 3) { ldv_work_12_1 = 4; qlt_handle_srr_work(ldv_work_struct_12_0); ldv_work_12_1 = 1; } else { } goto ldv_67848; case 2: ; if (ldv_work_12_2 == 2 || ldv_work_12_2 == 3) { ldv_work_12_2 = 4; qlt_handle_srr_work(ldv_work_struct_12_0); ldv_work_12_2 = 1; } else { } goto ldv_67848; case 3: ; if (ldv_work_12_3 == 2 || ldv_work_12_3 == 3) { ldv_work_12_3 = 4; qlt_handle_srr_work(ldv_work_struct_12_0); ldv_work_12_3 = 1; } else { } goto ldv_67848; default: ldv_stop(); } ldv_67848: ; return; } } void call_and_disable_work_12(struct work_struct *work ) { { if ((ldv_work_12_0 == 2 || ldv_work_12_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_12_0) { qlt_handle_srr_work(work); ldv_work_12_0 = 1; return; } else { } if ((ldv_work_12_1 == 2 || ldv_work_12_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_12_1) { qlt_handle_srr_work(work); ldv_work_12_1 = 1; return; } else { } if ((ldv_work_12_2 == 2 || ldv_work_12_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_12_2) { qlt_handle_srr_work(work); ldv_work_12_2 = 1; return; } else { } if ((ldv_work_12_3 == 2 || ldv_work_12_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_12_3) { qlt_handle_srr_work(work); ldv_work_12_3 = 1; return; } else { } return; } } void work_init_11(void) { { ldv_work_11_0 = 0; ldv_work_11_1 = 0; ldv_work_11_2 = 0; ldv_work_11_3 = 0; return; } } void activate_work_11(struct work_struct *work , int state ) { { if (ldv_work_11_0 == 0) { ldv_work_struct_11_0 = work; ldv_work_11_0 = state; return; } else { } if (ldv_work_11_1 == 0) { ldv_work_struct_11_1 = work; ldv_work_11_1 = state; return; } else { } if (ldv_work_11_2 == 0) { ldv_work_struct_11_2 = work; ldv_work_11_2 = state; return; } else { } if (ldv_work_11_3 == 0) { ldv_work_struct_11_3 = work; ldv_work_11_3 = state; return; } else { } return; } } void disable_work_11(struct work_struct *work ) { { if ((ldv_work_11_0 == 3 || ldv_work_11_0 == 2) && (unsigned long )ldv_work_struct_11_0 == (unsigned long )work) { ldv_work_11_0 = 1; } else { } if ((ldv_work_11_1 == 3 || ldv_work_11_1 == 2) && (unsigned long )ldv_work_struct_11_1 == (unsigned long )work) { ldv_work_11_1 = 1; } else { } if ((ldv_work_11_2 == 3 || ldv_work_11_2 == 2) && (unsigned long )ldv_work_struct_11_2 == (unsigned long )work) { ldv_work_11_2 = 1; } else { } if ((ldv_work_11_3 == 3 || ldv_work_11_3 == 2) && (unsigned long )ldv_work_struct_11_3 == (unsigned long )work) { ldv_work_11_3 = 1; } else { } return; } } void disable_work_12(struct work_struct *work ) { { if ((ldv_work_12_0 == 3 || ldv_work_12_0 == 2) && (unsigned long )ldv_work_struct_12_0 == (unsigned long )work) { ldv_work_12_0 = 1; } else { } if ((ldv_work_12_1 == 3 || ldv_work_12_1 == 2) && (unsigned long )ldv_work_struct_12_1 == (unsigned long )work) { ldv_work_12_1 = 1; } else { } if ((ldv_work_12_2 == 3 || ldv_work_12_2 == 2) && (unsigned long )ldv_work_struct_12_2 == (unsigned long )work) { ldv_work_12_2 = 1; } else { } if ((ldv_work_12_3 == 3 || ldv_work_12_3 == 2) && (unsigned long )ldv_work_struct_12_3 == (unsigned long )work) { ldv_work_12_3 = 1; } else { } return; } } void work_init_7(void) { { ldv_work_7_0 = 0; ldv_work_7_1 = 0; ldv_work_7_2 = 0; ldv_work_7_3 = 0; return; } } void invoke_work_7(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_7_0 == 2 || ldv_work_7_0 == 3) { ldv_work_7_0 = 4; qlt_free_session_done(ldv_work_struct_7_0); ldv_work_7_0 = 1; } else { } goto ldv_67880; case 1: ; if (ldv_work_7_1 == 2 || ldv_work_7_1 == 3) { ldv_work_7_1 = 4; qlt_free_session_done(ldv_work_struct_7_0); ldv_work_7_1 = 1; } else { } goto ldv_67880; case 2: ; if (ldv_work_7_2 == 2 || ldv_work_7_2 == 3) { ldv_work_7_2 = 4; qlt_free_session_done(ldv_work_struct_7_0); ldv_work_7_2 = 1; } else { } goto ldv_67880; case 3: ; if (ldv_work_7_3 == 2 || ldv_work_7_3 == 3) { ldv_work_7_3 = 4; qlt_free_session_done(ldv_work_struct_7_0); ldv_work_7_3 = 1; } else { } goto ldv_67880; default: ldv_stop(); } ldv_67880: ; return; } } void call_and_disable_all_8(int state ) { { if (ldv_work_8_0 == state) { call_and_disable_work_8(ldv_work_struct_8_0); } else { } if (ldv_work_8_1 == state) { call_and_disable_work_8(ldv_work_struct_8_1); } else { } if (ldv_work_8_2 == state) { call_and_disable_work_8(ldv_work_struct_8_2); } else { } if (ldv_work_8_3 == state) { call_and_disable_work_8(ldv_work_struct_8_3); } else { } return; } } void call_and_disable_work_9(struct work_struct *work ) { { if ((ldv_work_9_0 == 2 || ldv_work_9_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_0) { qlt_do_work(work); ldv_work_9_0 = 1; return; } else { } if ((ldv_work_9_1 == 2 || ldv_work_9_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_1) { qlt_do_work(work); ldv_work_9_1 = 1; return; } else { } if ((ldv_work_9_2 == 2 || ldv_work_9_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_2) { qlt_do_work(work); ldv_work_9_2 = 1; return; } else { } if ((ldv_work_9_3 == 2 || ldv_work_9_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_3) { qlt_do_work(work); ldv_work_9_3 = 1; return; } else { } return; } } void activate_work_12(struct work_struct *work , int state ) { { if (ldv_work_12_0 == 0) { ldv_work_struct_12_0 = work; ldv_work_12_0 = state; return; } else { } if (ldv_work_12_1 == 0) { ldv_work_struct_12_1 = work; ldv_work_12_1 = state; return; } else { } if (ldv_work_12_2 == 0) { ldv_work_struct_12_2 = work; ldv_work_12_2 = state; return; } else { } if (ldv_work_12_3 == 0) { ldv_work_struct_12_3 = work; ldv_work_12_3 = state; return; } else { } return; } } void activate_work_8(struct work_struct *work , int state ) { { if (ldv_work_8_0 == 0) { ldv_work_struct_8_0 = work; ldv_work_8_0 = state; return; } else { } if (ldv_work_8_1 == 0) { ldv_work_struct_8_1 = work; ldv_work_8_1 = state; return; } else { } if (ldv_work_8_2 == 0) { ldv_work_struct_8_2 = work; ldv_work_8_2 = state; return; } else { } if (ldv_work_8_3 == 0) { ldv_work_struct_8_3 = work; ldv_work_8_3 = state; return; } else { } return; } } void disable_work_9(struct work_struct *work ) { { if ((ldv_work_9_0 == 3 || ldv_work_9_0 == 2) && (unsigned long )ldv_work_struct_9_0 == (unsigned long )work) { ldv_work_9_0 = 1; } else { } if ((ldv_work_9_1 == 3 || ldv_work_9_1 == 2) && (unsigned long )ldv_work_struct_9_1 == (unsigned long )work) { ldv_work_9_1 = 1; } else { } if ((ldv_work_9_2 == 3 || ldv_work_9_2 == 2) && (unsigned long )ldv_work_struct_9_2 == (unsigned long )work) { ldv_work_9_2 = 1; } else { } if ((ldv_work_9_3 == 3 || ldv_work_9_3 == 2) && (unsigned long )ldv_work_struct_9_3 == (unsigned long )work) { ldv_work_9_3 = 1; } else { } return; } } void disable_work_10(struct work_struct *work ) { { if ((ldv_work_10_0 == 3 || ldv_work_10_0 == 2) && (unsigned long )ldv_work_struct_10_0 == (unsigned long )work) { ldv_work_10_0 = 1; } else { } if ((ldv_work_10_1 == 3 || ldv_work_10_1 == 2) && (unsigned long )ldv_work_struct_10_1 == (unsigned long )work) { ldv_work_10_1 = 1; } else { } if ((ldv_work_10_2 == 3 || ldv_work_10_2 == 2) && (unsigned long )ldv_work_struct_10_2 == (unsigned long )work) { ldv_work_10_2 = 1; } else { } if ((ldv_work_10_3 == 3 || ldv_work_10_3 == 2) && (unsigned long )ldv_work_struct_10_3 == (unsigned long )work) { ldv_work_10_3 = 1; } else { } return; } } void work_init_12(void) { { ldv_work_12_0 = 0; ldv_work_12_1 = 0; ldv_work_12_2 = 0; ldv_work_12_3 = 0; return; } } void activate_work_10(struct work_struct *work , int state ) { { if (ldv_work_10_0 == 0) { ldv_work_struct_10_0 = work; ldv_work_10_0 = state; return; } else { } if (ldv_work_10_1 == 0) { ldv_work_struct_10_1 = work; ldv_work_10_1 = state; return; } else { } if (ldv_work_10_2 == 0) { ldv_work_struct_10_2 = work; ldv_work_10_2 = state; return; } else { } if (ldv_work_10_3 == 0) { ldv_work_struct_10_3 = work; ldv_work_10_3 = state; return; } else { } return; } } void call_and_disable_work_11(struct work_struct *work ) { { if ((ldv_work_11_0 == 2 || ldv_work_11_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_11_0) { qlt_sess_work_fn(work); ldv_work_11_0 = 1; return; } else { } if ((ldv_work_11_1 == 2 || ldv_work_11_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_11_1) { qlt_sess_work_fn(work); ldv_work_11_1 = 1; return; } else { } if ((ldv_work_11_2 == 2 || ldv_work_11_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_11_2) { qlt_sess_work_fn(work); ldv_work_11_2 = 1; return; } else { } if ((ldv_work_11_3 == 2 || ldv_work_11_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_11_3) { qlt_sess_work_fn(work); ldv_work_11_3 = 1; return; } else { } return; } } void activate_work_7(struct work_struct *work , int state ) { { if (ldv_work_7_0 == 0) { ldv_work_struct_7_0 = work; ldv_work_7_0 = state; return; } else { } if (ldv_work_7_1 == 0) { ldv_work_struct_7_1 = work; ldv_work_7_1 = state; return; } else { } if (ldv_work_7_2 == 0) { ldv_work_struct_7_2 = work; ldv_work_7_2 = state; return; } else { } if (ldv_work_7_3 == 0) { ldv_work_struct_7_3 = work; ldv_work_7_3 = state; return; } else { } return; } } void choose_timer_29(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_29 = 2; return; } } void call_and_disable_all_10(int state ) { { if (ldv_work_10_0 == state) { call_and_disable_work_10(ldv_work_struct_10_0); } else { } if (ldv_work_10_1 == state) { call_and_disable_work_10(ldv_work_struct_10_1); } else { } if (ldv_work_10_2 == state) { call_and_disable_work_10(ldv_work_struct_10_2); } else { } if (ldv_work_10_3 == state) { call_and_disable_work_10(ldv_work_struct_10_3); } else { } return; } } void disable_suitable_timer_29(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_29) { ldv_timer_state_29 = 0; return; } else { } return; } } bool ldv_queue_work_on_295(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_296(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_297(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_298(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_299(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_300(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } bool ldv_flush_delayed_work_301(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___14 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_7(& ldv_func_arg1->work); return (ldv_func_res); } } void ldv_destroy_workqueue_302(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_work_on_315(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_317(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_316(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_319(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_318(struct workqueue_struct *ldv_func_arg1 ) ; void disable_suitable_timer_30(struct timer_list *timer ) ; void choose_timer_30(struct timer_list *timer ) ; int reg_timer_30(struct timer_list *timer ) ; void activate_pending_timer_30(struct timer_list *timer , unsigned long data , int pending_flag ) ; int ldv_scsi_add_host_with_dma_320(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; static uint32_t const ql27xx_fwdt_default_template[351U] = { 1660944384U, 2751463424U, 2080702464U, 0U, 805306368U, 16777216U, 0U, 3225448116U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 67174400U, 335544320U, 0U, 33554432U, 1140850688U, 151060480U, 268435456U, 0U, 33554432U, 16842752U, 469762048U, 0U, 33554432U, 6291456U, 0U, 3221225472U, 16842752U, 469762048U, 0U, 33554432U, 6291456U, 0U, 3422552064U, 16842752U, 469762048U, 0U, 33554432U, 274726912U, 0U, 3556769792U, 16842752U, 469762048U, 0U, 33554432U, 1880031232U, 96U, 4026531840U, 65536U, 402653184U, 0U, 33554432U, 7340032U, 68157632U, 65536U, 402653184U, 0U, 33554432U, 275775488U, 68157632U, 65536U, 402653184U, 0U, 33554432U, 1081081856U, 68157632U, 16842752U, 469762048U, 0U, 33554432U, 8126464U, 16777216U, 3221225472U, 65536U, 402653184U, 0U, 33554432U, 8126464U, 67305668U, 65536U, 402653184U, 0U, 33554432U, 8126464U, 67174592U, 16842752U, 469762048U, 0U, 33554432U, 8126464U, 0U, 3221225472U, 65536U, 402653184U, 0U, 33554432U, 8126464U, 69206016U, 184614912U, 402653184U, 0U, 33554432U, 201326592U, 0U, 33619968U, 536870912U, 0U, 33554432U, 1880031232U, 67174652U, 4026531840U, 176U, 33619968U, 536870912U, 0U, 33554432U, 1880031232U, 67174652U, 4026531840U, 4272U, 33619968U, 536870912U, 0U, 33554432U, 1880031232U, 67174652U, 4026531840U, 8368U, 33619968U, 536870912U, 0U, 33554432U, 1880031232U, 67174652U, 4026531840U, 12464U, 33619968U, 536870912U, 0U, 33554432U, 1880031232U, 67174652U, 4026531840U, 16560U, 33619968U, 536870912U, 0U, 33554432U, 1880031232U, 67174652U, 4026531840U, 20656U, 33619968U, 536870912U, 0U, 33554432U, 1880031232U, 67174652U, 4026531840U, 24752U, 33619968U, 536870912U, 0U, 33554432U, 1880031232U, 67174652U, 4026531840U, 28848U, 33619968U, 536870912U, 0U, 33554432U, 1880031232U, 67174652U, 4026531840U, 32944U, 33619968U, 536870912U, 0U, 33554432U, 1880031232U, 67174652U, 4026531840U, 37040U, 33619968U, 536870912U, 0U, 33554432U, 1880031232U, 67174652U, 4026531840U, 41136U, 65536U, 402653184U, 0U, 33554432U, 167772160U, 67174592U, 65536U, 402653184U, 0U, 33554432U, 167772160U, 69206144U, 65536U, 402653184U, 0U, 33554432U, 12451840U, 68157632U, 65536U, 402653184U, 0U, 33554432U, 280887296U, 68157632U, 65536U, 402653184U, 0U, 33554432U, 549322752U, 68157632U, 65536U, 402653184U, 0U, 33554432U, 817758208U, 68157632U, 65536U, 402653184U, 0U, 33554432U, 11534336U, 68157632U, 65536U, 402653184U, 0U, 33554432U, 279969792U, 68157632U, 65536U, 402653184U, 0U, 33554432U, 548405248U, 68157632U, 65536U, 402653184U, 0U, 33554432U, 816840704U, 68157632U, 65536U, 402653184U, 0U, 33554432U, 3145728U, 68157632U, 65536U, 402653184U, 0U, 33554432U, 271581184U, 68157632U, 65536U, 402653184U, 0U, 33554432U, 540016640U, 68157632U, 65536U, 402653184U, 0U, 33554432U, 808452096U, 68157632U, 167837696U, 268435456U, 0U, 33554432U, 100728832U, 469762048U, 0U, 33554432U, 16777216U, 512U, 4280484352U, 100728832U, 469762048U, 0U, 33554432U, 33554432U, 4096U, 0U, 117506048U, 402653184U, 0U, 33554432U, 0U, 16777216U, 117506048U, 402653184U, 0U, 33554432U, 0U, 33554432U, 117506048U, 402653184U, 0U, 33554432U, 0U, 50331648U, 218169344U, 335544320U, 0U, 33554432U, 0U, 4278190080U, 268435456U, 0U, 128U}; __inline static void *qla27xx_isp_reg(struct scsi_qla_host *vha ) { { return ((void *)(& ((vha->hw)->iobase)->isp24)); } } __inline static void qla27xx_insert16(uint16_t value , void *buf , ulong *len ) { { if ((unsigned long )buf != (unsigned long )((void *)0)) { buf = buf + *len; *((__le16 *)buf) = value; } else { } *len = *len + 2UL; return; } } __inline static void qla27xx_insert32(uint32_t value , void *buf , ulong *len ) { { if ((unsigned long )buf != (unsigned long )((void *)0)) { buf = buf + *len; *((__le32 *)buf) = value; } else { } *len = *len + 4UL; return; } } __inline static void qla27xx_insertbuf(void *mem , ulong size , void *buf , ulong *len ) { { if (((unsigned long )buf != (unsigned long )((void *)0) && (unsigned long )mem != (unsigned long )((void *)0)) && size != 0UL) { buf = buf + *len; memcpy(buf, (void const *)mem, size); } else { } *len = *len + size; return; } } __inline static void qla27xx_read8(void *window , void *buf , ulong *len ) { uint8_t value ; { value = 255U; if ((unsigned long )buf != (unsigned long )((void *)0)) { value = readb((void const volatile *)window); } else { } qla27xx_insert32((uint32_t )value, buf, len); return; } } __inline static void qla27xx_read16(void *window , void *buf , ulong *len ) { uint16_t value ; { value = 65535U; if ((unsigned long )buf != (unsigned long )((void *)0)) { value = readw((void const volatile *)window); } else { } qla27xx_insert32((uint32_t )value, buf, len); return; } } __inline static void qla27xx_read32(void *window , void *buf , ulong *len ) { uint32_t value ; { value = 4294967295U; if ((unsigned long )buf != (unsigned long )((void *)0)) { value = readl((void const volatile *)window); } else { } qla27xx_insert32(value, buf, len); return; } } __inline static void (*qla27xx_read_vector(uint width ))(void * , void * , ulong * ) { { return (width != 1U ? (width == 2U ? & qla27xx_read16 : & qla27xx_read32) : & qla27xx_read8); } } __inline static void qla27xx_read_reg(struct device_reg_24xx *reg , uint offset , void *buf , ulong *len ) { void *window ; { window = (void *)reg + (unsigned long )offset; qla27xx_read32(window, buf, len); return; } } __inline static void qla27xx_write_reg(struct device_reg_24xx *reg , uint offset , uint32_t data , void *buf ) { void *window ; { window = (void *)reg + (unsigned long )offset; if ((unsigned long )buf != (unsigned long )((void *)0)) { writel(data, (void volatile *)window); } else { } return; } } __inline static void qla27xx_read_window(struct device_reg_24xx *reg , uint32_t addr , uint offset , uint count , uint width , void *buf , ulong *len ) { void *window ; void (*readn)(void * , void * , ulong * ) ; void (*tmp)(void * , void * , ulong * ) ; uint tmp___0 ; { window = (void *)reg + (unsigned long )offset; tmp = qla27xx_read_vector(width); readn = tmp; qla27xx_write_reg(reg, 84U, addr, buf); goto ldv_65956; ldv_65955: qla27xx_insert32(addr, buf, len); (*readn)(window, buf, len); window = window + (unsigned long )width; addr = addr + 1U; ldv_65956: tmp___0 = count; count = count - 1U; if (tmp___0 != 0U) { goto ldv_65955; } else { } return; } } __inline static void qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent , void *buf ) { { if ((unsigned long )buf != (unsigned long )((void *)0)) { ent->hdr.driver_flags = (uint8_t )((unsigned int )ent->hdr.driver_flags | 128U); } else { } ql_dbg(98304U, (scsi_qla_host_t *)0, 53265, "Skipping entry %d\n", ent->hdr.entry_type); return; } } static int qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { { ql_dbg(65536U, vha, 53504, "%s: nop [%lx]\n", "qla27xx_fwdt_entry_t0", *len); qla27xx_skip_entry(ent, buf); return (0); } } static int qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { { ql_dbg(65536U, vha, 53759, "%s: end [%lx]\n", "qla27xx_fwdt_entry_t255", *len); qla27xx_skip_entry(ent, buf); return (1); } } static int qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { struct device_reg_24xx *reg ; void *tmp ; { tmp = qla27xx_isp_reg(vha); reg = (struct device_reg_24xx *)tmp; ql_dbg(65536U, vha, 53760, "%s: rdio t1 [%lx]\n", "qla27xx_fwdt_entry_t256", *len); qla27xx_read_window(reg, ent->__annonCompField128.t256.base_addr, (uint )ent->__annonCompField128.t256.pci_offset, (uint )ent->__annonCompField128.t256.reg_count, (uint )ent->__annonCompField128.t256.reg_width, buf, len); return (0); } } static int qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { struct device_reg_24xx *reg ; void *tmp ; { tmp = qla27xx_isp_reg(vha); reg = (struct device_reg_24xx *)tmp; ql_dbg(65536U, vha, 53761, "%s: wrio t1 [%lx]\n", "qla27xx_fwdt_entry_t257", *len); qla27xx_write_reg(reg, 84U, ent->__annonCompField128.t257.base_addr, buf); qla27xx_write_reg(reg, (uint )ent->__annonCompField128.t257.pci_offset, ent->__annonCompField128.t257.write_data, buf); return (0); } } static int qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { struct device_reg_24xx *reg ; void *tmp ; { tmp = qla27xx_isp_reg(vha); reg = (struct device_reg_24xx *)tmp; ql_dbg(65536U, vha, 53762, "%s: rdio t2 [%lx]\n", "qla27xx_fwdt_entry_t258", *len); qla27xx_write_reg(reg, (uint )ent->__annonCompField128.t258.banksel_offset, ent->__annonCompField128.t258.bank, buf); qla27xx_read_window(reg, ent->__annonCompField128.t258.base_addr, (uint )ent->__annonCompField128.t258.pci_offset, (uint )ent->__annonCompField128.t258.reg_count, (uint )ent->__annonCompField128.t258.reg_width, buf, len); return (0); } } static int qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { struct device_reg_24xx *reg ; void *tmp ; { tmp = qla27xx_isp_reg(vha); reg = (struct device_reg_24xx *)tmp; ql_dbg(65536U, vha, 53763, "%s: wrio t2 [%lx]\n", "qla27xx_fwdt_entry_t259", *len); qla27xx_write_reg(reg, 84U, ent->__annonCompField128.t259.base_addr, buf); qla27xx_write_reg(reg, (uint )ent->__annonCompField128.t259.banksel_offset, ent->__annonCompField128.t259.bank, buf); qla27xx_write_reg(reg, (uint )ent->__annonCompField128.t259.pci_offset, ent->__annonCompField128.t259.write_data, buf); return (0); } } static int qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { struct device_reg_24xx *reg ; void *tmp ; { tmp = qla27xx_isp_reg(vha); reg = (struct device_reg_24xx *)tmp; ql_dbg(65536U, vha, 53764, "%s: rdpci [%lx]\n", "qla27xx_fwdt_entry_t260", *len); qla27xx_insert32((uint32_t )ent->__annonCompField128.t260.pci_offset, buf, len); qla27xx_read_reg(reg, (uint )ent->__annonCompField128.t260.pci_offset, buf, len); return (0); } } static int qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { struct device_reg_24xx *reg ; void *tmp ; { tmp = qla27xx_isp_reg(vha); reg = (struct device_reg_24xx *)tmp; ql_dbg(65536U, vha, 53765, "%s: wrpci [%lx]\n", "qla27xx_fwdt_entry_t261", *len); qla27xx_write_reg(reg, (uint )ent->__annonCompField128.t261.pci_offset, ent->__annonCompField128.t261.write_data, buf); return (0); } } static int qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { ulong dwords ; ulong start ; ulong end ; { ql_dbg(65536U, vha, 53766, "%s: rdram(%x) [%lx]\n", "qla27xx_fwdt_entry_t262", (int )ent->__annonCompField128.t262.ram_area, *len); start = (ulong )ent->__annonCompField128.t262.start_addr; end = (ulong )ent->__annonCompField128.t262.end_addr; if ((unsigned int )ent->__annonCompField128.t262.ram_area == 1U) { } else if ((unsigned int )ent->__annonCompField128.t262.ram_area == 2U) { end = (ulong )(vha->hw)->fw_memory_size; if ((unsigned long )buf != (unsigned long )((void *)0)) { ent->__annonCompField128.t262.end_addr = (uint32_t )end; } else { } } else if ((unsigned int )ent->__annonCompField128.t262.ram_area == 3U) { start = (ulong )(vha->hw)->fw_shared_ram_start; end = (ulong )(vha->hw)->fw_shared_ram_end; if ((unsigned long )buf != (unsigned long )((void *)0)) { ent->__annonCompField128.t262.start_addr = (uint32_t )start; ent->__annonCompField128.t262.end_addr = (uint32_t )end; } else { } } else { ql_dbg(65536U, vha, 53282, "%s: unknown area %x\n", "qla27xx_fwdt_entry_t262", (int )ent->__annonCompField128.t262.ram_area); qla27xx_skip_entry(ent, buf); goto done; } if (end < start || end == 0UL) { ql_dbg(65536U, vha, 53283, "%s: unusable range (start=%x end=%x)\n", "qla27xx_fwdt_entry_t262", ent->__annonCompField128.t262.end_addr, ent->__annonCompField128.t262.start_addr); qla27xx_skip_entry(ent, buf); goto done; } else { } dwords = (end - start) + 1UL; if ((unsigned long )buf != (unsigned long )((void *)0)) { buf = buf + *len; qla24xx_dump_ram(vha->hw, (uint32_t )start, (uint32_t *)buf, (uint32_t )dwords, & buf); } else { } *len = *len + dwords * 4UL; done: ; return (0); } } static int qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { uint count ; uint i ; uint length ; struct req_que *req ; struct rsp_que *rsp ; { count = 0U; ql_dbg(65536U, vha, 53767, "%s: getq(%x) [%lx]\n", "qla27xx_fwdt_entry_t263", (int )ent->__annonCompField128.t263.queue_type, *len); if ((unsigned int )ent->__annonCompField128.t263.queue_type == 1U) { i = 0U; goto ldv_66047; ldv_66046: req = *((vha->hw)->req_q_map + (unsigned long )i); if ((unsigned long )req != (unsigned long )((struct req_que *)0) || (unsigned long )buf == (unsigned long )((void *)0)) { length = (unsigned long )req != (unsigned long )((struct req_que *)0) ? (uint )req->length : 2048U; qla27xx_insert16((int )((uint16_t )i), buf, len); qla27xx_insert16((int )((uint16_t )length), buf, len); qla27xx_insertbuf((unsigned long )req != (unsigned long )((struct req_que *)0) ? (void *)req->ring : (void *)0, (unsigned long )length * 64UL, buf, len); count = count + 1U; } else { } i = i + 1U; ldv_66047: ; if ((uint )(vha->hw)->max_req_queues > i) { goto ldv_66046; } else { } } else if ((unsigned int )ent->__annonCompField128.t263.queue_type == 2U) { i = 0U; goto ldv_66051; ldv_66050: rsp = *((vha->hw)->rsp_q_map + (unsigned long )i); if ((unsigned long )rsp != (unsigned long )((struct rsp_que *)0) || (unsigned long )buf == (unsigned long )((void *)0)) { length = (unsigned long )rsp != (unsigned long )((struct rsp_que *)0) ? (uint )rsp->length : 128U; qla27xx_insert16((int )((uint16_t )i), buf, len); qla27xx_insert16((int )((uint16_t )length), buf, len); qla27xx_insertbuf((unsigned long )rsp != (unsigned long )((struct rsp_que *)0) ? (void *)rsp->ring : (void *)0, (unsigned long )length * 64UL, buf, len); count = count + 1U; } else { } i = i + 1U; ldv_66051: ; if ((uint )(vha->hw)->max_rsp_queues > i) { goto ldv_66050; } else { } } else { ql_dbg(65536U, vha, 53286, "%s: unknown queue %x\n", "qla27xx_fwdt_entry_t263", (int )ent->__annonCompField128.t263.queue_type); qla27xx_skip_entry(ent, buf); } if ((unsigned long )buf != (unsigned long )((void *)0)) { ent->__annonCompField128.t263.num_queues = count; } else { } return (0); } } static int qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { { ql_dbg(65536U, vha, 53768, "%s: getfce [%lx]\n", "qla27xx_fwdt_entry_t264", *len); if ((unsigned long )(vha->hw)->fce != (unsigned long )((void *)0)) { if ((unsigned long )buf != (unsigned long )((void *)0)) { ent->__annonCompField128.t264.fce_trace_size = 65536U; ent->__annonCompField128.t264.write_pointer = (vha->hw)->fce_wr; ent->__annonCompField128.t264.base_pointer = (vha->hw)->fce_dma; ent->__annonCompField128.t264.fce_enable_mb0 = (uint32_t )(vha->hw)->fce_mb[0]; ent->__annonCompField128.t264.fce_enable_mb2 = (uint32_t )(vha->hw)->fce_mb[2]; ent->__annonCompField128.t264.fce_enable_mb3 = (uint32_t )(vha->hw)->fce_mb[3]; ent->__annonCompField128.t264.fce_enable_mb4 = (uint32_t )(vha->hw)->fce_mb[4]; ent->__annonCompField128.t264.fce_enable_mb5 = (uint32_t )(vha->hw)->fce_mb[5]; ent->__annonCompField128.t264.fce_enable_mb6 = (uint32_t )(vha->hw)->fce_mb[6]; } else { } qla27xx_insertbuf((vha->hw)->fce, 65536UL, buf, len); } else { ql_dbg(65536U, vha, 53287, "%s: missing fce\n", "qla27xx_fwdt_entry_t264"); qla27xx_skip_entry(ent, buf); } return (0); } } static int qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { struct device_reg_24xx *reg ; void *tmp ; { tmp = qla27xx_isp_reg(vha); reg = (struct device_reg_24xx *)tmp; ql_dbg(65536U, vha, 53769, "%s: pause risc [%lx]\n", "qla27xx_fwdt_entry_t265", *len); if ((unsigned long )buf != (unsigned long )((void *)0)) { qla24xx_pause_risc(reg, vha->hw); } else { } return (0); } } static int qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { { ql_dbg(65536U, vha, 53770, "%s: reset risc [%lx]\n", "qla27xx_fwdt_entry_t266", *len); if ((unsigned long )buf != (unsigned long )((void *)0)) { qla24xx_soft_reset(vha->hw); } else { } return (0); } } static int qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { struct device_reg_24xx *reg ; void *tmp ; { tmp = qla27xx_isp_reg(vha); reg = (struct device_reg_24xx *)tmp; ql_dbg(65536U, vha, 53771, "%s: dis intr [%lx]\n", "qla27xx_fwdt_entry_t267", *len); qla27xx_write_reg(reg, (uint )ent->__annonCompField128.t267.pci_offset, ent->__annonCompField128.t267.data, buf); return (0); } } static int qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { { ql_dbg(65536U, vha, 53772, "%s: gethb(%x) [%lx]\n", "qla27xx_fwdt_entry_t268", (int )ent->__annonCompField128.t268.buf_type, *len); if ((unsigned int )ent->__annonCompField128.t268.buf_type == 1U) { if ((unsigned long )(vha->hw)->eft != (unsigned long )((void *)0)) { if ((unsigned long )buf != (unsigned long )((void *)0)) { ent->__annonCompField128.t268.buf_size = 65536U; ent->__annonCompField128.t268.start_addr = (vha->hw)->eft_dma; } else { } qla27xx_insertbuf((vha->hw)->eft, 65536UL, buf, len); } else { ql_dbg(65536U, vha, 53288, "%s: missing eft\n", "qla27xx_fwdt_entry_t268"); qla27xx_skip_entry(ent, buf); } } else { ql_dbg(65536U, vha, 53291, "%s: unknown buffer %x\n", "qla27xx_fwdt_entry_t268", (int )ent->__annonCompField128.t268.buf_type); qla27xx_skip_entry(ent, buf); } return (0); } } static int qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { { ql_dbg(65536U, vha, 53773, "%s: scratch [%lx]\n", "qla27xx_fwdt_entry_t269", *len); qla27xx_insert32(2863311530U, buf, len); qla27xx_insert32(3149642683U, buf, len); qla27xx_insert32(3435973836U, buf, len); qla27xx_insert32(3722304989U, buf, len); qla27xx_insert32((uint32_t )*len + 4U, buf, len); if ((unsigned long )buf != (unsigned long )((void *)0)) { ent->__annonCompField128.t269.scratch_size = 20U; } else { } return (0); } } static int qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { struct device_reg_24xx *reg ; void *tmp ; ulong dwords ; ulong addr ; ulong tmp___0 ; { tmp = qla27xx_isp_reg(vha); reg = (struct device_reg_24xx *)tmp; dwords = (ulong )ent->__annonCompField128.t270.count; addr = (ulong )ent->__annonCompField128.t270.addr; ql_dbg(65536U, vha, 53774, "%s: rdremreg [%lx]\n", "qla27xx_fwdt_entry_t270", *len); qla27xx_write_reg(reg, 84U, 64U, buf); goto ldv_66108; ldv_66107: qla27xx_write_reg(reg, 192U, (uint32_t )addr | 2147483648U, buf); qla27xx_insert32((uint32_t )addr, buf, len); qla27xx_read_reg(reg, 196U, buf, len); addr = addr + 4UL; ldv_66108: tmp___0 = dwords; dwords = dwords - 1UL; if (tmp___0 != 0UL) { goto ldv_66107; } else { } return (0); } } static int qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { struct device_reg_24xx *reg ; void *tmp ; ulong addr ; ulong data ; { tmp = qla27xx_isp_reg(vha); reg = (struct device_reg_24xx *)tmp; addr = (ulong )ent->__annonCompField128.t271.addr; data = (ulong )ent->__annonCompField128.t271.data; ql_dbg(65536U, vha, 53775, "%s: wrremreg [%lx]\n", "qla27xx_fwdt_entry_t271", *len); qla27xx_write_reg(reg, 84U, 64U, buf); qla27xx_write_reg(reg, 196U, (uint32_t )data, buf); qla27xx_write_reg(reg, 192U, (uint32_t )addr, buf); return (0); } } static int qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { ulong dwords ; ulong start ; { dwords = (ulong )ent->__annonCompField128.t272.count; start = (ulong )ent->__annonCompField128.t272.addr; ql_dbg(65536U, vha, 53776, "%s: rdremram [%lx]\n", "qla27xx_fwdt_entry_t272", *len); if ((unsigned long )buf != (unsigned long )((void *)0)) { ql_dbg(65536U, vha, 53292, "%s: @%lx -> (%lx dwords)\n", "qla27xx_fwdt_entry_t272", start, dwords); buf = buf + *len; qla27xx_dump_mpi_ram(vha->hw, (uint32_t )start, (uint32_t *)buf, (uint32_t )dwords, & buf); } else { } *len = *len + dwords * 4UL; return (0); } } static int qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { ulong dwords ; ulong addr ; uint32_t value ; int tmp ; ulong tmp___0 ; { dwords = (ulong )ent->__annonCompField128.t273.count; addr = (ulong )ent->__annonCompField128.t273.addr; ql_dbg(65536U, vha, 53777, "%s: pcicfg [%lx]\n", "qla27xx_fwdt_entry_t273", *len); goto ldv_66140; ldv_66139: value = 4294967295U; tmp = pci_read_config_dword((struct pci_dev const *)(vha->hw)->pdev, (int )addr, & value); if (tmp != 0) { ql_dbg(65536U, vha, 53293, "%s: failed pcicfg read at %lx\n", "qla27xx_fwdt_entry_t273", addr); } else { } qla27xx_insert32((uint32_t )addr, buf, len); qla27xx_insert32(value, buf, len); addr = addr + 4UL; ldv_66140: tmp___0 = dwords; dwords = dwords - 1UL; if (tmp___0 != 0UL) { goto ldv_66139; } else { } return (0); } } static int qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { uint count ; uint i ; struct req_que *req ; struct rsp_que *rsp ; { count = 0U; ql_dbg(65536U, vha, 53778, "%s: getqsh(%x) [%lx]\n", "qla27xx_fwdt_entry_t274", (int )ent->__annonCompField128.t274.queue_type, *len); if ((unsigned int )ent->__annonCompField128.t274.queue_type == 1U) { i = 0U; goto ldv_66153; ldv_66152: req = *((vha->hw)->req_q_map + (unsigned long )i); if ((unsigned long )req != (unsigned long )((struct req_que *)0) || (unsigned long )buf == (unsigned long )((void *)0)) { qla27xx_insert16((int )((uint16_t )i), buf, len); qla27xx_insert16(1, buf, len); qla27xx_insert32((unsigned long )req != (unsigned long )((struct req_que *)0) && (unsigned long )req->out_ptr != (unsigned long )((uint16_t *)0U) ? (uint32_t )*(req->out_ptr) : 0U, buf, len); count = count + 1U; } else { } i = i + 1U; ldv_66153: ; if ((uint )(vha->hw)->max_req_queues > i) { goto ldv_66152; } else { } } else if ((unsigned int )ent->__annonCompField128.t274.queue_type == 2U) { i = 0U; goto ldv_66157; ldv_66156: rsp = *((vha->hw)->rsp_q_map + (unsigned long )i); if ((unsigned long )rsp != (unsigned long )((struct rsp_que *)0) || (unsigned long )buf == (unsigned long )((void *)0)) { qla27xx_insert16((int )((uint16_t )i), buf, len); qla27xx_insert16(1, buf, len); qla27xx_insert32((unsigned long )rsp != (unsigned long )((struct rsp_que *)0) && (unsigned long )rsp->in_ptr != (unsigned long )((uint16_t *)0U) ? (uint32_t )*(rsp->in_ptr) : 0U, buf, len); count = count + 1U; } else { } i = i + 1U; ldv_66157: ; if ((uint )(vha->hw)->max_rsp_queues > i) { goto ldv_66156; } else { } } else { ql_dbg(65536U, vha, 53295, "%s: unknown queue %x\n", "qla27xx_fwdt_entry_t274", (int )ent->__annonCompField128.t274.queue_type); qla27xx_skip_entry(ent, buf); } if ((unsigned long )buf != (unsigned long )((void *)0)) { ent->__annonCompField128.t274.num_queues = count; } else { } if (count == 0U) { qla27xx_skip_entry(ent, buf); } else { } return (0); } } static int qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { ulong offset ; { offset = 20UL; ql_dbg(65536U, vha, 53779, "%s: buffer(%x) [%lx]\n", "qla27xx_fwdt_entry_t275", ent->__annonCompField128.t275.length, *len); if (ent->__annonCompField128.t275.length == 0U) { ql_dbg(65536U, vha, 53280, "%s: buffer zero length\n", "qla27xx_fwdt_entry_t275"); qla27xx_skip_entry(ent, buf); goto done; } else { } if ((ulong )ent->__annonCompField128.t275.length + offset > (ulong )ent->hdr.entry_size) { ql_dbg(65536U, vha, 53296, "%s: buffer overflow\n", "qla27xx_fwdt_entry_t275"); qla27xx_skip_entry(ent, buf); goto done; } else { } qla27xx_insertbuf((void *)(& ent->__annonCompField128.t275.buffer), (ulong )ent->__annonCompField128.t275.length, buf, len); done: ; return (0); } } static int qla27xx_fwdt_entry_other(struct scsi_qla_host *vha , struct qla27xx_fwdt_entry *ent , void *buf , ulong *len ) { { ql_dbg(65536U, vha, 54015, "%s: type %x [%lx]\n", "qla27xx_fwdt_entry_other", ent->hdr.entry_type, *len); qla27xx_skip_entry(ent, buf); return (0); } } static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[23U] = { {0U, & qla27xx_fwdt_entry_t0}, {255U, & qla27xx_fwdt_entry_t255}, {256U, & qla27xx_fwdt_entry_t256}, {257U, & qla27xx_fwdt_entry_t257}, {258U, & qla27xx_fwdt_entry_t258}, {259U, & qla27xx_fwdt_entry_t259}, {260U, & qla27xx_fwdt_entry_t260}, {261U, & qla27xx_fwdt_entry_t261}, {262U, & qla27xx_fwdt_entry_t262}, {263U, & qla27xx_fwdt_entry_t263}, {264U, & qla27xx_fwdt_entry_t264}, {265U, & qla27xx_fwdt_entry_t265}, {266U, & qla27xx_fwdt_entry_t266}, {267U, & qla27xx_fwdt_entry_t267}, {268U, & qla27xx_fwdt_entry_t268}, {269U, & qla27xx_fwdt_entry_t269}, {270U, & qla27xx_fwdt_entry_t270}, {271U, & qla27xx_fwdt_entry_t271}, {272U, & qla27xx_fwdt_entry_t272}, {273U, & qla27xx_fwdt_entry_t273}, {274U, & qla27xx_fwdt_entry_t274}, {275U, & qla27xx_fwdt_entry_t275}, {4294967295U, & qla27xx_fwdt_entry_other}}; __inline static int (*qla27xx_find_entry(uint type ))(struct scsi_qla_host * , struct qla27xx_fwdt_entry * , void * , ulong * ) { struct qla27xx_fwdt_entry_call *list ; { list = (struct qla27xx_fwdt_entry_call *)(& ql27xx_fwdt_entry_call_list); goto ldv_66192; ldv_66191: list = list + 1; ldv_66192: ; if (list->type < type) { goto ldv_66191; } else { } if (list->type == type) { return (list->call); } else { } return (& qla27xx_fwdt_entry_other); } } __inline static void *qla27xx_next_entry(void *p ) { struct qla27xx_fwdt_entry *ent ; { ent = (struct qla27xx_fwdt_entry *)p; return (p + (unsigned long )ent->hdr.entry_size); } } static void qla27xx_walk_template(struct scsi_qla_host *vha , struct qla27xx_fwdt_template *tmp , void *buf , ulong *len ) { struct qla27xx_fwdt_entry *ent ; ulong count ; int (*tmp___0)(struct scsi_qla_host * , struct qla27xx_fwdt_entry * , void * , ulong * ) ; int tmp___1 ; void *tmp___2 ; ulong tmp___3 ; { ent = (struct qla27xx_fwdt_entry *)tmp + (unsigned long )tmp->entry_offset; count = (ulong )tmp->entry_count; ql_dbg(65536U, vha, 53274, "%s: entry count %lx\n", "qla27xx_walk_template", count); goto ldv_66209; ldv_66208: tmp___0 = qla27xx_find_entry(ent->hdr.entry_type); tmp___1 = (*tmp___0)(vha, ent, buf, len); if (tmp___1 != 0) { goto ldv_66207; } else { } tmp___2 = qla27xx_next_entry((void *)ent); ent = (struct qla27xx_fwdt_entry *)tmp___2; ldv_66209: tmp___3 = count; count = count - 1UL; if (tmp___3 != 0UL) { goto ldv_66208; } else { } ldv_66207: ; if (count != 0UL) { ql_dbg(65536U, vha, 53272, "%s: residual count (%lx)\n", "qla27xx_walk_template", count); } else { } if (ent->hdr.entry_type != 255U) { ql_dbg(65536U, vha, 53273, "%s: missing end (%lx)\n", "qla27xx_walk_template", count); } else { } ql_dbg(65536U, vha, 53275, "%s: len=%lx\n", "qla27xx_walk_template", *len); if ((unsigned long )buf != (unsigned long )((void *)0)) { ql_log(1U, vha, 53269, "Firmware dump saved to temp buffer (%ld/%p)\n", vha->host_no, (vha->hw)->fw_dump); qla2x00_post_uevent_work(vha, 0U); } else { } return; } } static void qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp ) { { tmp->capture_timestamp = (uint32_t )jiffies; return; } } static void qla27xx_driver_info(struct qla27xx_fwdt_template *tmp ) { uint8_t v[6U] ; int rval ; { v[0] = 0U; v[1] = 0U; v[2] = 0U; v[3] = 0U; v[4] = 0U; v[5] = 0U; rval = 0; rval = sscanf((char const *)(& qla2x00_version_str), "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu", (uint8_t *)(& v), (uint8_t *)(& v) + 1UL, (uint8_t *)(& v) + 2UL, (uint8_t *)(& v) + 3UL, (uint8_t *)(& v) + 4UL, (uint8_t *)(& v) + 5UL); tmp->driver_info[0] = (uint32_t )(((((int )v[3] << 24) | ((int )v[2] << 16)) | ((int )v[1] << 8)) | (int )v[0]); tmp->driver_info[1] = (uint32_t )(((int )v[5] << 8) | (int )v[4]); tmp->driver_info[2] = 305419896U; return; } } static void qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp , struct scsi_qla_host *vha ) { { tmp->firmware_version[0] = (uint32_t )(vha->hw)->fw_major_version; tmp->firmware_version[1] = (uint32_t )(vha->hw)->fw_minor_version; tmp->firmware_version[2] = (uint32_t )(vha->hw)->fw_subminor_version; tmp->firmware_version[3] = (uint32_t )(((int )(vha->hw)->fw_attributes_h << 16) | (int )(vha->hw)->fw_attributes); tmp->firmware_version[4] = (uint32_t )(((int )(vha->hw)->fw_attributes_ext[1] << 16) | (int )(vha->hw)->fw_attributes_ext[0]); return; } } static void ql27xx_edit_template(struct scsi_qla_host *vha , struct qla27xx_fwdt_template *tmp ) { { qla27xx_time_stamp(tmp); qla27xx_driver_info(tmp); qla27xx_firmware_info(tmp, vha); return; } } __inline static uint32_t qla27xx_template_checksum(void *p , ulong size ) { uint32_t *buf ; uint64_t sum ; uint32_t *tmp ; ulong tmp___0 ; { buf = (uint32_t *)p; sum = 0ULL; size = size / 4UL; goto ldv_66233; ldv_66232: tmp = buf; buf = buf + 1; sum = (uint64_t )*tmp + sum; ldv_66233: tmp___0 = size; size = size - 1UL; if (tmp___0 != 0UL) { goto ldv_66232; } else { } sum = (sum & 4294967295ULL) + (sum >> 32); return (~ ((uint32_t )sum)); } } __inline static int qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp ) { uint32_t tmp___0 ; { tmp___0 = qla27xx_template_checksum((void *)tmp, (ulong )tmp->template_size); return (tmp___0 == 0U); } } __inline static int qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp ) { { return (tmp->template_type == 99U); } } static void qla27xx_execute_fwdt_template(struct scsi_qla_host *vha ) { struct qla27xx_fwdt_template *tmp ; ulong len ; void *tmp___0 ; int tmp___1 ; { tmp = (struct qla27xx_fwdt_template *)(vha->hw)->fw_dump_template; tmp___1 = qla27xx_fwdt_template_valid((void *)tmp); if (tmp___1 != 0) { len = (ulong )tmp->template_size; tmp___0 = memcpy((void *)(vha->hw)->fw_dump, (void const *)tmp, len); tmp = (struct qla27xx_fwdt_template *)tmp___0; ql27xx_edit_template(vha, tmp); qla27xx_walk_template(vha, tmp, (void *)tmp, & len); (vha->hw)->fw_dump_len = (uint32_t )len; (vha->hw)->fw_dumped = 1; } else { } return; } } ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha ) { struct qla27xx_fwdt_template *tmp ; ulong len ; int tmp___0 ; { tmp = (struct qla27xx_fwdt_template *)(vha->hw)->fw_dump_template; len = 0UL; tmp___0 = qla27xx_fwdt_template_valid((void *)tmp); if (tmp___0 != 0) { len = (ulong )tmp->template_size; qla27xx_walk_template(vha, tmp, (void *)0, & len); } else { } return (len); } } ulong qla27xx_fwdt_template_size(void *p ) { struct qla27xx_fwdt_template *tmp ; { tmp = (struct qla27xx_fwdt_template *)p; return ((ulong )tmp->template_size); } } ulong qla27xx_fwdt_template_default_size(void) { { return (1404UL); } } void const *qla27xx_fwdt_template_default(void) { { return ((void const *)(& ql27xx_fwdt_default_template)); } } int qla27xx_fwdt_template_valid(void *p ) { struct qla27xx_fwdt_template *tmp ; int tmp___0 ; int tmp___1 ; { tmp = (struct qla27xx_fwdt_template *)p; tmp___0 = qla27xx_verify_template_header(tmp); if (tmp___0 == 0) { ql_log(1U, (scsi_qla_host_t *)0, 53276, "%s: template type %x\n", "qla27xx_fwdt_template_valid", tmp->template_type); return (0); } else { } tmp___1 = qla27xx_verify_template_checksum(tmp); if (tmp___1 == 0) { ql_log(1U, (scsi_qla_host_t *)0, 53277, "%s: failed template checksum\n", "qla27xx_fwdt_template_valid"); return (0); } else { } return (1); } } void qla27xx_fwdump(scsi_qla_host_t *vha , int hardware_locked ) { ulong flags ; raw_spinlock_t *tmp ; { flags = 0UL; if (hardware_locked == 0) { tmp = spinlock_check(& (vha->hw)->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); } else { } if ((unsigned long )(vha->hw)->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 53278, "fwdump buffer missing.\n"); } else if ((unsigned long )(vha->hw)->fw_dump_template == (unsigned long )((void *)0)) { ql_log(1U, vha, 53279, "fwdump template missing.\n"); } else if ((vha->hw)->fw_dumped != 0) { ql_log(1U, vha, 54016, "Firmware has been previously dumped (%p), -- ignoring request\n", (vha->hw)->fw_dump); } else { qla27xx_execute_fwdt_template(vha); } if (hardware_locked == 0) { spin_unlock_irqrestore(& (vha->hw)->hardware_lock, flags); } else { } return; } } void disable_suitable_timer_30(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_30) { ldv_timer_state_30 = 0; return; } else { } return; } } void choose_timer_30(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_30 = 2; return; } } int reg_timer_30(struct timer_list *timer ) { { ldv_timer_list_30 = timer; ldv_timer_state_30 = 1; return (0); } } void activate_pending_timer_30(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_30 == (unsigned long )timer) { if (ldv_timer_state_30 == 2 || pending_flag != 0) { ldv_timer_list_30 = timer; ldv_timer_list_30->data = data; ldv_timer_state_30 = 1; } else { } return; } else { } reg_timer_30(timer); ldv_timer_list_30->data = data; return; } } bool ldv_queue_work_on_315(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_316(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_317(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_7(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_318(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_7(2); return; } } bool ldv_queue_delayed_work_on_319(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_7(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_320(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_88 = 1; ldv_initialize_scsi_host_template_88(); } else { } return (ldv_func_res); } } extern void *memset(void * , int , size_t ) ; bool ldv_is_err(void const *ptr ) { { return ((unsigned long )ptr > 2012UL); } } void *ldv_err_ptr(long error ) { { return ((void *)(2012L - error)); } } long ldv_ptr_err(void const *ptr ) { { return ((long )(2012UL - (unsigned long )ptr)); } } bool ldv_is_err_or_null(void const *ptr ) { bool tmp ; int tmp___0 ; { if ((unsigned long )ptr == (unsigned long )((void const *)0)) { tmp___0 = 1; } else { tmp = ldv_is_err(ptr); if ((int )tmp) { tmp___0 = 1; } else { tmp___0 = 0; } } return ((bool )tmp___0); } } int ldv_module_refcounter = 1; void ldv_module_get(struct module *module ) { { if ((unsigned long )module != (unsigned long )((struct module *)0)) { ldv_module_refcounter = ldv_module_refcounter + 1; } else { } return; } } int ldv_try_module_get(struct module *module ) { int module_get_succeeded ; { if ((unsigned long )module != (unsigned long )((struct module *)0)) { module_get_succeeded = ldv_undef_int(); if (module_get_succeeded == 1) { ldv_module_refcounter = ldv_module_refcounter + 1; return (1); } else { return (0); } } else { } return (0); } } void ldv_module_put(struct module *module ) { { if ((unsigned long )module != (unsigned long )((struct module *)0)) { if (ldv_module_refcounter <= 1) { ldv_error(); } else { } ldv_module_refcounter = ldv_module_refcounter - 1; } else { } return; } } void ldv_module_put_and_exit(void) { { ldv_module_put((struct module *)1); LDV_STOP: ; goto LDV_STOP; } } unsigned int ldv_module_refcount(void) { { return ((unsigned int )(ldv_module_refcounter + -1)); } } void ldv_check_final_state(void) { { if (ldv_module_refcounter != 1) { ldv_error(); } else { } return; } }