extern void __VERIFIER_error() __attribute__ ((__noreturn__)); /* Generated by CIL v. 1.5.1 */ /* print_CIL_Input is false */ struct kernel_symbol { unsigned long value ; char const *name ; }; struct module; typedef signed char __s8; typedef unsigned char __u8; typedef short __s16; typedef unsigned short __u16; typedef int __s32; typedef unsigned int __u32; typedef unsigned long long __u64; typedef signed char s8; typedef unsigned char u8; typedef short s16; typedef unsigned short u16; typedef int s32; typedef unsigned int u32; typedef long long s64; typedef unsigned long long u64; typedef long __kernel_long_t; typedef unsigned long __kernel_ulong_t; typedef int __kernel_pid_t; typedef unsigned int __kernel_uid32_t; typedef unsigned int __kernel_gid32_t; typedef __kernel_ulong_t __kernel_size_t; typedef __kernel_long_t __kernel_ssize_t; typedef long long __kernel_loff_t; typedef __kernel_long_t __kernel_time_t; typedef __kernel_long_t __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef __u16 __le16; typedef __u16 __be16; typedef __u32 __le32; typedef __u32 __be32; typedef __u32 __wsum; typedef __u32 __kernel_dev_t; typedef __kernel_dev_t dev_t; typedef unsigned short umode_t; typedef __kernel_pid_t pid_t; typedef __kernel_clockid_t clockid_t; typedef _Bool bool; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_loff_t loff_t; typedef __kernel_size_t size_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_time_t time_t; typedef unsigned int uint; typedef unsigned long ulong; typedef __s32 int32_t; typedef __u8 uint8_t; typedef __u16 uint16_t; typedef __u32 uint32_t; typedef __u64 uint64_t; typedef unsigned long sector_t; typedef unsigned long blkcnt_t; typedef u64 dma_addr_t; typedef unsigned int gfp_t; typedef unsigned int fmode_t; typedef unsigned int oom_flags_t; typedef u64 phys_addr_t; typedef phys_addr_t resource_size_t; struct __anonstruct_atomic_t_6 { int counter ; }; typedef struct __anonstruct_atomic_t_6 atomic_t; struct __anonstruct_atomic64_t_7 { long counter ; }; typedef struct __anonstruct_atomic64_t_7 atomic64_t; struct list_head { struct list_head *next ; struct list_head *prev ; }; struct hlist_node; struct hlist_head { struct hlist_node *first ; }; struct hlist_node { struct hlist_node *next ; struct hlist_node **pprev ; }; struct callback_head { struct callback_head *next ; void (*func)(struct callback_head * ) ; }; struct pt_regs { unsigned long r15 ; unsigned long r14 ; unsigned long r13 ; unsigned long r12 ; unsigned long bp ; unsigned long bx ; unsigned long r11 ; unsigned long r10 ; unsigned long r9 ; unsigned long r8 ; unsigned long ax ; unsigned long cx ; unsigned long dx ; unsigned long si ; unsigned long di ; unsigned long orig_ax ; unsigned long ip ; unsigned long cs ; unsigned long flags ; unsigned long sp ; unsigned long ss ; }; typedef void (*ctor_fn_t)(void); struct __anonstruct_ldv_1042_9 { unsigned int a ; unsigned int b ; }; struct __anonstruct_ldv_1057_10 { u16 limit0 ; u16 base0 ; unsigned char base1 ; unsigned char type : 4 ; unsigned char s : 1 ; unsigned char dpl : 2 ; unsigned char p : 1 ; unsigned char limit : 4 ; unsigned char avl : 1 ; unsigned char l : 1 ; unsigned char d : 1 ; unsigned char g : 1 ; unsigned char base2 ; }; union __anonunion_ldv_1058_8 { struct __anonstruct_ldv_1042_9 ldv_1042 ; struct __anonstruct_ldv_1057_10 ldv_1057 ; }; struct desc_struct { union __anonunion_ldv_1058_8 ldv_1058 ; }; typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; struct pgprot { pgprotval_t pgprot ; }; typedef struct pgprot pgprot_t; struct __anonstruct_pgd_t_12 { pgdval_t pgd ; }; typedef struct __anonstruct_pgd_t_12 pgd_t; struct page; typedef struct page *pgtable_t; struct file; struct seq_file; struct thread_struct; struct mm_struct; struct task_struct; struct cpumask; struct paravirt_callee_save { void *func ; }; struct pv_irq_ops { struct paravirt_callee_save save_fl ; struct paravirt_callee_save restore_fl ; struct paravirt_callee_save irq_disable ; struct paravirt_callee_save irq_enable ; void (*safe_halt)(void) ; void (*halt)(void) ; void (*adjust_exception_frame)(void) ; }; struct arch_spinlock; typedef u16 __ticket_t; typedef u32 __ticketpair_t; struct __raw_tickets { __ticket_t head ; __ticket_t tail ; }; union __anonunion_ldv_1464_15 { __ticketpair_t head_tail ; struct __raw_tickets tickets ; }; struct arch_spinlock { union __anonunion_ldv_1464_15 ldv_1464 ; }; typedef struct arch_spinlock arch_spinlock_t; struct __anonstruct_ldv_1471_17 { u32 read ; s32 write ; }; union __anonunion_arch_rwlock_t_16 { s64 lock ; struct __anonstruct_ldv_1471_17 ldv_1471 ; }; typedef union __anonunion_arch_rwlock_t_16 arch_rwlock_t; struct file_operations; struct device; struct net_device; struct completion; struct pid; struct bug_entry { int bug_addr_disp ; int file_disp ; unsigned short line ; unsigned short flags ; }; struct timespec; struct compat_timespec; struct __anonstruct_futex_19 { u32 *uaddr ; u32 val ; u32 flags ; u32 bitset ; u64 time ; u32 *uaddr2 ; }; struct __anonstruct_nanosleep_20 { clockid_t clockid ; struct timespec *rmtp ; struct compat_timespec *compat_rmtp ; u64 expires ; }; struct pollfd; struct __anonstruct_poll_21 { struct pollfd *ufds ; int nfds ; int has_timeout ; unsigned long tv_sec ; unsigned long tv_nsec ; }; union __anonunion_ldv_2616_18 { struct __anonstruct_futex_19 futex ; struct __anonstruct_nanosleep_20 nanosleep ; struct __anonstruct_poll_21 poll ; }; struct restart_block { long (*fn)(struct restart_block * ) ; union __anonunion_ldv_2616_18 ldv_2616 ; }; struct exec_domain; struct kernel_vm86_regs { struct pt_regs pt ; unsigned short es ; unsigned short __esh ; unsigned short ds ; unsigned short __dsh ; unsigned short fs ; unsigned short __fsh ; unsigned short gs ; unsigned short __gsh ; }; union __anonunion_ldv_2764_22 { struct pt_regs *regs ; struct kernel_vm86_regs *vm86 ; }; struct math_emu_info { long ___orig_eip ; union __anonunion_ldv_2764_22 ldv_2764 ; }; struct cpumask { unsigned long bits[64U] ; }; typedef struct cpumask cpumask_t; typedef struct cpumask *cpumask_var_t; struct static_key; struct map_segment; struct exec_domain { char const *name ; void (*handler)(int , struct pt_regs * ) ; unsigned char pers_low ; unsigned char pers_high ; unsigned long *signal_map ; unsigned long *signal_invmap ; struct map_segment *err_map ; struct map_segment *socktype_map ; struct map_segment *sockopt_map ; struct map_segment *af_map ; struct module *module ; struct exec_domain *next ; }; struct seq_operations; struct i387_fsave_struct { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u32 status ; }; struct __anonstruct_ldv_5041_27 { u64 rip ; u64 rdp ; }; struct __anonstruct_ldv_5047_28 { u32 fip ; u32 fcs ; u32 foo ; u32 fos ; }; union __anonunion_ldv_5048_26 { struct __anonstruct_ldv_5041_27 ldv_5041 ; struct __anonstruct_ldv_5047_28 ldv_5047 ; }; union __anonunion_ldv_5057_29 { u32 padding1[12U] ; u32 sw_reserved[12U] ; }; struct i387_fxsave_struct { u16 cwd ; u16 swd ; u16 twd ; u16 fop ; union __anonunion_ldv_5048_26 ldv_5048 ; u32 mxcsr ; u32 mxcsr_mask ; u32 st_space[32U] ; u32 xmm_space[64U] ; u32 padding[12U] ; union __anonunion_ldv_5057_29 ldv_5057 ; }; struct i387_soft_struct { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u8 ftop ; u8 changed ; u8 lookahead ; u8 no_update ; u8 rm ; u8 alimit ; struct math_emu_info *info ; u32 entry_eip ; }; struct ymmh_struct { u32 ymmh_space[64U] ; }; struct xsave_hdr_struct { u64 xstate_bv ; u64 reserved1[2U] ; u64 reserved2[5U] ; }; struct xsave_struct { struct i387_fxsave_struct i387 ; struct xsave_hdr_struct xsave_hdr ; struct ymmh_struct ymmh ; }; union thread_xstate { struct i387_fsave_struct fsave ; struct i387_fxsave_struct fxsave ; struct i387_soft_struct soft ; struct xsave_struct xsave ; }; struct fpu { unsigned int last_cpu ; unsigned int has_fpu ; union thread_xstate *state ; }; struct kmem_cache; struct perf_event; struct thread_struct { struct desc_struct tls_array[3U] ; unsigned long sp0 ; unsigned long sp ; unsigned long usersp ; unsigned short es ; unsigned short ds ; unsigned short fsindex ; unsigned short gsindex ; unsigned long fs ; unsigned long gs ; struct perf_event *ptrace_bps[4U] ; unsigned long debugreg6 ; unsigned long ptrace_dr7 ; unsigned long cr2 ; unsigned long trap_nr ; unsigned long error_code ; struct fpu fpu ; unsigned long *io_bitmap_ptr ; unsigned long iopl ; unsigned int io_bitmap_max ; }; struct __anonstruct_mm_segment_t_31 { unsigned long seg ; }; typedef struct __anonstruct_mm_segment_t_31 mm_segment_t; typedef atomic64_t atomic_long_t; struct thread_info { struct task_struct *task ; struct exec_domain *exec_domain ; __u32 flags ; __u32 status ; __u32 cpu ; int preempt_count ; mm_segment_t addr_limit ; struct restart_block restart_block ; void *sysenter_return ; unsigned char sig_on_uaccess_error : 1 ; unsigned char uaccess_err : 1 ; }; struct lockdep_map; struct stack_trace { unsigned int nr_entries ; unsigned int max_entries ; unsigned long *entries ; int skip ; }; struct lockdep_subclass_key { char __one_byte ; } __attribute__((__packed__)) ; struct lock_class_key { struct lockdep_subclass_key subkeys[8U] ; }; struct lock_class { struct list_head hash_entry ; struct list_head lock_entry ; struct lockdep_subclass_key *key ; unsigned int subclass ; unsigned int dep_gen_id ; unsigned long usage_mask ; struct stack_trace usage_traces[13U] ; struct list_head locks_after ; struct list_head locks_before ; unsigned int version ; unsigned long ops ; char const *name ; int name_version ; unsigned long contention_point[4U] ; unsigned long contending_point[4U] ; }; struct lockdep_map { struct lock_class_key *key ; struct lock_class *class_cache[2U] ; char const *name ; int cpu ; unsigned long ip ; }; struct held_lock { u64 prev_chain_key ; unsigned long acquire_ip ; struct lockdep_map *instance ; struct lockdep_map *nest_lock ; u64 waittime_stamp ; u64 holdtime_stamp ; unsigned short class_idx : 13 ; unsigned char irq_context : 2 ; unsigned char trylock : 1 ; unsigned char read : 2 ; unsigned char check : 2 ; unsigned char hardirqs_off : 1 ; unsigned short references : 11 ; }; struct raw_spinlock { arch_spinlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct raw_spinlock raw_spinlock_t; struct __anonstruct_ldv_6104_33 { u8 __padding[24U] ; struct lockdep_map dep_map ; }; union __anonunion_ldv_6105_32 { struct raw_spinlock rlock ; struct __anonstruct_ldv_6104_33 ldv_6104 ; }; struct spinlock { union __anonunion_ldv_6105_32 ldv_6105 ; }; typedef struct spinlock spinlock_t; struct __anonstruct_rwlock_t_34 { arch_rwlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_rwlock_t_34 rwlock_t; struct static_key { atomic_t enabled ; }; struct __wait_queue_head { spinlock_t lock ; struct list_head task_list ; }; typedef struct __wait_queue_head wait_queue_head_t; struct seqcount { unsigned int sequence ; }; typedef struct seqcount seqcount_t; struct __anonstruct_seqlock_t_35 { struct seqcount seqcount ; spinlock_t lock ; }; typedef struct __anonstruct_seqlock_t_35 seqlock_t; struct __anonstruct_nodemask_t_36 { unsigned long bits[16U] ; }; typedef struct __anonstruct_nodemask_t_36 nodemask_t; struct mutex { atomic_t count ; spinlock_t wait_lock ; struct list_head wait_list ; struct task_struct *owner ; char const *name ; void *magic ; struct lockdep_map dep_map ; }; struct mutex_waiter { struct list_head list ; struct task_struct *task ; void *magic ; }; struct rw_semaphore; struct rw_semaphore { long count ; raw_spinlock_t wait_lock ; struct list_head wait_list ; struct lockdep_map dep_map ; }; struct completion { unsigned int done ; wait_queue_head_t wait ; }; struct notifier_block; struct timespec { __kernel_time_t tv_sec ; long tv_nsec ; }; union ktime { s64 tv64 ; }; typedef union ktime ktime_t; struct tvec_base; struct timer_list { struct list_head entry ; unsigned long expires ; struct tvec_base *base ; void (*function)(unsigned long ) ; unsigned long data ; int slack ; int start_pid ; void *start_site ; char start_comm[16U] ; struct lockdep_map lockdep_map ; }; struct hrtimer; enum hrtimer_restart; struct workqueue_struct; struct work_struct; struct work_struct { atomic_long_t data ; struct list_head entry ; void (*func)(struct work_struct * ) ; struct lockdep_map lockdep_map ; }; struct delayed_work { struct work_struct work ; struct timer_list timer ; struct workqueue_struct *wq ; int cpu ; }; struct execute_work { struct work_struct work ; }; struct notifier_block { int (*notifier_call)(struct notifier_block * , unsigned long , void * ) ; struct notifier_block *next ; int priority ; }; struct blocking_notifier_head { struct rw_semaphore rwsem ; struct notifier_block *head ; }; struct ctl_table; struct resource { resource_size_t start ; resource_size_t end ; char const *name ; unsigned long flags ; struct resource *parent ; struct resource *sibling ; struct resource *child ; }; struct pci_dev; struct pm_message { int event ; }; typedef struct pm_message pm_message_t; struct dev_pm_ops { int (*prepare)(struct device * ) ; void (*complete)(struct device * ) ; int (*suspend)(struct device * ) ; int (*resume)(struct device * ) ; int (*freeze)(struct device * ) ; int (*thaw)(struct device * ) ; int (*poweroff)(struct device * ) ; int (*restore)(struct device * ) ; int (*suspend_late)(struct device * ) ; int (*resume_early)(struct device * ) ; int (*freeze_late)(struct device * ) ; int (*thaw_early)(struct device * ) ; int (*poweroff_late)(struct device * ) ; int (*restore_early)(struct device * ) ; int (*suspend_noirq)(struct device * ) ; int (*resume_noirq)(struct device * ) ; int (*freeze_noirq)(struct device * ) ; int (*thaw_noirq)(struct device * ) ; int (*poweroff_noirq)(struct device * ) ; int (*restore_noirq)(struct device * ) ; int (*runtime_suspend)(struct device * ) ; int (*runtime_resume)(struct device * ) ; int (*runtime_idle)(struct device * ) ; }; enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; struct wakeup_source; struct pm_subsys_data { spinlock_t lock ; unsigned int refcount ; struct list_head clock_list ; }; struct dev_pm_qos; struct dev_pm_info { pm_message_t power_state ; unsigned char can_wakeup : 1 ; unsigned char async_suspend : 1 ; bool is_prepared ; bool is_suspended ; bool ignore_children ; bool early_init ; spinlock_t lock ; struct list_head entry ; struct completion completion ; struct wakeup_source *wakeup ; bool wakeup_path ; bool syscore ; struct timer_list suspend_timer ; unsigned long timer_expires ; struct work_struct work ; wait_queue_head_t wait_queue ; atomic_t usage_count ; atomic_t child_count ; unsigned char disable_depth : 3 ; unsigned char idle_notification : 1 ; unsigned char request_pending : 1 ; unsigned char deferred_resume : 1 ; unsigned char run_wake : 1 ; unsigned char runtime_auto : 1 ; unsigned char no_callbacks : 1 ; unsigned char irq_safe : 1 ; unsigned char use_autosuspend : 1 ; unsigned char timer_autosuspends : 1 ; unsigned char memalloc_noio : 1 ; enum rpm_request request ; enum rpm_status runtime_status ; int runtime_error ; int autosuspend_delay ; unsigned long last_busy ; unsigned long active_jiffies ; unsigned long suspended_jiffies ; unsigned long accounting_timestamp ; struct pm_subsys_data *subsys_data ; struct dev_pm_qos *qos ; }; struct dev_pm_domain { struct dev_pm_ops ops ; }; struct pci_bus; struct __anonstruct_mm_context_t_101 { void *ldt ; int size ; unsigned short ia32_compat ; struct mutex lock ; void *vdso ; }; typedef struct __anonstruct_mm_context_t_101 mm_context_t; struct rb_node { unsigned long __rb_parent_color ; struct rb_node *rb_right ; struct rb_node *rb_left ; }; struct rb_root { struct rb_node *rb_node ; }; struct vm_area_struct; struct bio_vec; struct device_node; struct call_single_data { struct list_head list ; void (*func)(void * ) ; void *info ; u16 flags ; }; struct mem_cgroup; struct sock; struct kobject; enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; struct kobj_ns_type_operations { enum kobj_ns_type type ; bool (*current_may_mount)(void) ; void *(*grab_current_ns)(void) ; void const *(*netlink_ns)(struct sock * ) ; void const *(*initial_ns)(void) ; void (*drop_ns)(void * ) ; }; struct user_namespace; struct __anonstruct_kuid_t_128 { uid_t val ; }; typedef struct __anonstruct_kuid_t_128 kuid_t; struct __anonstruct_kgid_t_129 { gid_t val ; }; typedef struct __anonstruct_kgid_t_129 kgid_t; struct kstat { u64 ino ; dev_t dev ; umode_t mode ; unsigned int nlink ; kuid_t uid ; kgid_t gid ; dev_t rdev ; loff_t size ; struct timespec atime ; struct timespec mtime ; struct timespec ctime ; unsigned long blksize ; unsigned long long blocks ; }; struct bin_attribute; struct attribute { char const *name ; umode_t mode ; bool ignore_lockdep ; struct lock_class_key *key ; struct lock_class_key skey ; }; struct attribute_group { char const *name ; umode_t (*is_visible)(struct kobject * , struct attribute * , int ) ; struct attribute **attrs ; struct bin_attribute **bin_attrs ; }; struct bin_attribute { struct attribute attr ; size_t size ; void *private ; ssize_t (*read)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; ssize_t (*write)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; int (*mmap)(struct file * , struct kobject * , struct bin_attribute * , struct vm_area_struct * ) ; }; struct sysfs_ops { ssize_t (*show)(struct kobject * , struct attribute * , char * ) ; ssize_t (*store)(struct kobject * , struct attribute * , char const * , size_t ) ; void const *(*namespace)(struct kobject * , struct attribute const * ) ; }; struct sysfs_dirent; struct kref { atomic_t refcount ; }; enum kobject_action { KOBJ_ADD = 0, KOBJ_REMOVE = 1, KOBJ_CHANGE = 2, KOBJ_MOVE = 3, KOBJ_ONLINE = 4, KOBJ_OFFLINE = 5, KOBJ_MAX = 6 } ; struct kset; struct kobj_type; struct kobject { char const *name ; struct list_head entry ; struct kobject *parent ; struct kset *kset ; struct kobj_type *ktype ; struct sysfs_dirent *sd ; struct kref kref ; struct delayed_work release ; unsigned char state_initialized : 1 ; unsigned char state_in_sysfs : 1 ; unsigned char state_add_uevent_sent : 1 ; unsigned char state_remove_uevent_sent : 1 ; unsigned char uevent_suppress : 1 ; }; struct kobj_type { void (*release)(struct kobject * ) ; struct sysfs_ops const *sysfs_ops ; struct attribute **default_attrs ; struct kobj_ns_type_operations const *(*child_ns_type)(struct kobject * ) ; void const *(*namespace)(struct kobject * ) ; }; struct kobj_uevent_env { char *envp[32U] ; int envp_idx ; char buf[2048U] ; int buflen ; }; struct kset_uevent_ops { int (* const filter)(struct kset * , struct kobject * ) ; char const *(* const name)(struct kset * , struct kobject * ) ; int (* const uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ; }; struct kset { struct list_head list ; spinlock_t list_lock ; struct kobject kobj ; struct kset_uevent_ops const *uevent_ops ; }; struct kmem_cache_cpu { void **freelist ; unsigned long tid ; struct page *page ; struct page *partial ; unsigned int stat[26U] ; }; struct kmem_cache_order_objects { unsigned long x ; }; struct memcg_cache_params; struct kmem_cache_node; struct kmem_cache { struct kmem_cache_cpu *cpu_slab ; unsigned long flags ; unsigned long min_partial ; int size ; int object_size ; int offset ; int cpu_partial ; struct kmem_cache_order_objects oo ; struct kmem_cache_order_objects max ; struct kmem_cache_order_objects min ; gfp_t allocflags ; int refcount ; void (*ctor)(void * ) ; int inuse ; int align ; int reserved ; char const *name ; struct list_head list ; struct kobject kobj ; struct memcg_cache_params *memcg_params ; int max_attr_size ; int remote_node_defrag_ratio ; struct kmem_cache_node *node[1024U] ; }; struct __anonstruct_ldv_13198_131 { struct mem_cgroup *memcg ; struct list_head list ; struct kmem_cache *root_cache ; bool dead ; atomic_t nr_pages ; struct work_struct destroy ; }; union __anonunion_ldv_13199_130 { struct kmem_cache *memcg_caches[0U] ; struct __anonstruct_ldv_13198_131 ldv_13198 ; }; struct memcg_cache_params { bool is_root_cache ; union __anonunion_ldv_13199_130 ldv_13199 ; }; struct scsi_qla_host; struct qla_hw_data; struct fc_rport; struct fc_bsg_job; struct Scsi_Host; struct fc_port; struct device_attribute; struct inode; struct scsi_device; struct scsi_target; struct scsi_cmnd; struct fc_vport; struct nsproxy; struct ctl_table_root; struct ctl_table_header; struct ctl_dir; typedef int proc_handler(struct ctl_table * , int , void * , size_t * , loff_t * ); struct ctl_table_poll { atomic_t event ; wait_queue_head_t wait ; }; struct ctl_table { char const *procname ; void *data ; int maxlen ; umode_t mode ; struct ctl_table *child ; proc_handler *proc_handler ; struct ctl_table_poll *poll ; void *extra1 ; void *extra2 ; }; struct ctl_node { struct rb_node node ; struct ctl_table_header *header ; }; struct __anonstruct_ldv_14519_133 { struct ctl_table *ctl_table ; int used ; int count ; int nreg ; }; union __anonunion_ldv_14521_132 { struct __anonstruct_ldv_14519_133 ldv_14519 ; struct callback_head rcu ; }; struct ctl_table_set; struct ctl_table_header { union __anonunion_ldv_14521_132 ldv_14521 ; struct completion *unregistering ; struct ctl_table *ctl_table_arg ; struct ctl_table_root *root ; struct ctl_table_set *set ; struct ctl_dir *parent ; struct ctl_node *node ; }; struct ctl_dir { struct ctl_table_header header ; struct rb_root root ; }; struct ctl_table_set { int (*is_seen)(struct ctl_table_set * ) ; struct ctl_dir dir ; }; struct ctl_table_root { struct ctl_table_set default_set ; struct ctl_table_set *(*lookup)(struct ctl_table_root * , struct nsproxy * ) ; int (*permissions)(struct ctl_table_header * , struct ctl_table * ) ; }; struct cred; typedef __u64 Elf64_Addr; typedef __u16 Elf64_Half; typedef __u32 Elf64_Word; typedef __u64 Elf64_Xword; struct elf64_sym { Elf64_Word st_name ; unsigned char st_info ; unsigned char st_other ; Elf64_Half st_shndx ; Elf64_Addr st_value ; Elf64_Xword st_size ; }; typedef struct elf64_sym Elf64_Sym; struct kernel_param; struct kernel_param_ops { unsigned int flags ; int (*set)(char const * , struct kernel_param const * ) ; int (*get)(char * , struct kernel_param const * ) ; void (*free)(void * ) ; }; struct kparam_string; struct kparam_array; union __anonunion_ldv_14937_138 { void *arg ; struct kparam_string const *str ; struct kparam_array const *arr ; }; struct kernel_param { char const *name ; struct kernel_param_ops const *ops ; u16 perm ; s16 level ; union __anonunion_ldv_14937_138 ldv_14937 ; }; struct kparam_string { unsigned int maxlen ; char *string ; }; struct kparam_array { unsigned int max ; unsigned int elemsize ; unsigned int *num ; struct kernel_param_ops const *ops ; void *elem ; }; struct tracepoint; struct tracepoint_func { void *func ; void *data ; }; struct tracepoint { char const *name ; struct static_key key ; void (*regfunc)(void) ; void (*unregfunc)(void) ; struct tracepoint_func *funcs ; }; struct mod_arch_specific { }; struct module_param_attrs; struct module_kobject { struct kobject kobj ; struct module *mod ; struct kobject *drivers_dir ; struct module_param_attrs *mp ; struct completion *kobj_completion ; }; struct module_attribute { struct attribute attr ; ssize_t (*show)(struct module_attribute * , struct module_kobject * , char * ) ; ssize_t (*store)(struct module_attribute * , struct module_kobject * , char const * , size_t ) ; void (*setup)(struct module * , char const * ) ; int (*test)(struct module * ) ; void (*free)(struct module * ) ; }; struct exception_table_entry; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; struct module_ref { unsigned long incs ; unsigned long decs ; }; struct module_sect_attrs; struct module_notes_attrs; struct ftrace_event_call; struct module { enum module_state state ; struct list_head list ; char name[56U] ; struct module_kobject mkobj ; struct module_attribute *modinfo_attrs ; char const *version ; char const *srcversion ; struct kobject *holders_dir ; struct kernel_symbol const *syms ; unsigned long const *crcs ; unsigned int num_syms ; struct kernel_param *kp ; unsigned int num_kp ; unsigned int num_gpl_syms ; struct kernel_symbol const *gpl_syms ; unsigned long const *gpl_crcs ; struct kernel_symbol const *unused_syms ; unsigned long const *unused_crcs ; unsigned int num_unused_syms ; unsigned int num_unused_gpl_syms ; struct kernel_symbol const *unused_gpl_syms ; unsigned long const *unused_gpl_crcs ; bool sig_ok ; struct kernel_symbol const *gpl_future_syms ; unsigned long const *gpl_future_crcs ; unsigned int num_gpl_future_syms ; unsigned int num_exentries ; struct exception_table_entry *extable ; int (*init)(void) ; void *module_init ; void *module_core ; unsigned int init_size ; unsigned int core_size ; unsigned int init_text_size ; unsigned int core_text_size ; unsigned int init_ro_size ; unsigned int core_ro_size ; struct mod_arch_specific arch ; unsigned int taints ; unsigned int num_bugs ; struct list_head bug_list ; struct bug_entry *bug_table ; Elf64_Sym *symtab ; Elf64_Sym *core_symtab ; unsigned int num_symtab ; unsigned int core_num_syms ; char *strtab ; char *core_strtab ; struct module_sect_attrs *sect_attrs ; struct module_notes_attrs *notes_attrs ; char *args ; void *percpu ; unsigned int percpu_size ; unsigned int num_tracepoints ; struct tracepoint * const *tracepoints_ptrs ; unsigned int num_trace_bprintk_fmt ; char const **trace_bprintk_fmt_start ; struct ftrace_event_call **trace_events ; unsigned int num_trace_events ; struct list_head source_list ; struct list_head target_list ; struct task_struct *waiter ; void (*exit)(void) ; struct module_ref *refptr ; ctor_fn_t (**ctors)(void) ; unsigned int num_ctors ; }; typedef unsigned long kernel_ulong_t; struct pci_device_id { __u32 vendor ; __u32 device ; __u32 subvendor ; __u32 subdevice ; __u32 class ; __u32 class_mask ; kernel_ulong_t driver_data ; }; struct acpi_device_id { __u8 id[9U] ; kernel_ulong_t driver_data ; }; struct of_device_id { char name[32U] ; char type[32U] ; char compatible[128U] ; void const *data ; }; struct klist_node; struct klist { spinlock_t k_lock ; struct list_head k_list ; void (*get)(struct klist_node * ) ; void (*put)(struct klist_node * ) ; }; struct klist_node { void *n_klist ; struct list_head n_node ; struct kref n_ref ; }; struct dma_map_ops; struct dev_archdata { struct dma_map_ops *dma_ops ; void *iommu ; }; struct device_private; struct device_driver; struct driver_private; struct class; struct subsys_private; struct bus_type; struct iommu_ops; struct iommu_group; struct bus_attribute { struct attribute attr ; ssize_t (*show)(struct bus_type * , char * ) ; ssize_t (*store)(struct bus_type * , char const * , size_t ) ; }; struct driver_attribute; struct bus_type { char const *name ; char const *dev_name ; struct device *dev_root ; struct bus_attribute *bus_attrs ; struct device_attribute *dev_attrs ; struct driver_attribute *drv_attrs ; struct attribute_group const **bus_groups ; struct attribute_group const **dev_groups ; struct attribute_group const **drv_groups ; int (*match)(struct device * , struct device_driver * ) ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*online)(struct device * ) ; int (*offline)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct dev_pm_ops const *pm ; struct iommu_ops *iommu_ops ; struct subsys_private *p ; struct lock_class_key lock_key ; }; struct device_type; struct device_driver { char const *name ; struct bus_type *bus ; struct module *owner ; char const *mod_name ; bool suppress_bind_attrs ; struct of_device_id const *of_match_table ; struct acpi_device_id const *acpi_match_table ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct attribute_group const **groups ; struct dev_pm_ops const *pm ; struct driver_private *p ; }; struct driver_attribute { struct attribute attr ; ssize_t (*show)(struct device_driver * , char * ) ; ssize_t (*store)(struct device_driver * , char const * , size_t ) ; }; struct class_attribute; struct class { char const *name ; struct module *owner ; struct class_attribute *class_attrs ; struct device_attribute *dev_attrs ; struct attribute_group const **dev_groups ; struct bin_attribute *dev_bin_attrs ; struct kobject *dev_kobj ; int (*dev_uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * ) ; void (*class_release)(struct class * ) ; void (*dev_release)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct kobj_ns_type_operations const *ns_type ; void const *(*namespace)(struct device * ) ; struct dev_pm_ops const *pm ; struct subsys_private *p ; }; struct class_attribute { struct attribute attr ; ssize_t (*show)(struct class * , struct class_attribute * , char * ) ; ssize_t (*store)(struct class * , struct class_attribute * , char const * , size_t ) ; void const *(*namespace)(struct class * , struct class_attribute const * ) ; }; struct device_type { char const *name ; struct attribute_group const **groups ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * , kuid_t * , kgid_t * ) ; void (*release)(struct device * ) ; struct dev_pm_ops const *pm ; }; struct device_attribute { struct attribute attr ; ssize_t (*show)(struct device * , struct device_attribute * , char * ) ; ssize_t (*store)(struct device * , struct device_attribute * , char const * , size_t ) ; }; struct device_dma_parameters { unsigned int max_segment_size ; unsigned long segment_boundary_mask ; }; struct acpi_dev_node { void *handle ; }; struct dma_coherent_mem; struct device { struct device *parent ; struct device_private *p ; struct kobject kobj ; char const *init_name ; struct device_type const *type ; struct mutex mutex ; struct bus_type *bus ; struct device_driver *driver ; void *platform_data ; struct dev_pm_info power ; struct dev_pm_domain *pm_domain ; int numa_node ; u64 *dma_mask ; u64 coherent_dma_mask ; struct device_dma_parameters *dma_parms ; struct list_head dma_pools ; struct dma_coherent_mem *dma_mem ; struct dev_archdata archdata ; struct device_node *of_node ; struct acpi_dev_node acpi_node ; dev_t devt ; u32 id ; spinlock_t devres_lock ; struct list_head devres_head ; struct klist_node knode_class ; struct class *class ; struct attribute_group const **groups ; void (*release)(struct device * ) ; struct iommu_group *iommu_group ; bool offline_disabled ; bool offline ; }; struct wakeup_source { char const *name ; struct list_head entry ; spinlock_t lock ; struct timer_list timer ; unsigned long timer_expires ; ktime_t total_time ; ktime_t max_time ; ktime_t last_time ; ktime_t start_prevent_time ; ktime_t prevent_sleep_time ; unsigned long event_count ; unsigned long active_count ; unsigned long relax_count ; unsigned long expire_count ; unsigned long wakeup_count ; bool active ; bool autosleep_enabled ; }; enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; typedef enum irqreturn irqreturn_t; struct hotplug_slot; struct pci_slot { struct pci_bus *bus ; struct list_head list ; struct hotplug_slot *hotplug ; unsigned char number ; struct kobject kobj ; }; typedef int pci_power_t; typedef unsigned int pci_channel_state_t; enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ; typedef unsigned short pci_dev_flags_t; typedef unsigned short pci_bus_flags_t; struct pcie_link_state; struct pci_vpd; struct pci_sriov; struct pci_ats; struct proc_dir_entry; struct pci_driver; union __anonunion_ldv_16690_142 { struct pci_sriov *sriov ; struct pci_dev *physfn ; }; struct pci_dev { struct list_head bus_list ; struct pci_bus *bus ; struct pci_bus *subordinate ; void *sysdata ; struct proc_dir_entry *procent ; struct pci_slot *slot ; unsigned int devfn ; unsigned short vendor ; unsigned short device ; unsigned short subsystem_vendor ; unsigned short subsystem_device ; unsigned int class ; u8 revision ; u8 hdr_type ; u8 pcie_cap ; u8 msi_cap ; u8 msix_cap ; unsigned char pcie_mpss : 3 ; u8 rom_base_reg ; u8 pin ; u16 pcie_flags_reg ; struct pci_driver *driver ; u64 dma_mask ; struct device_dma_parameters dma_parms ; pci_power_t current_state ; u8 pm_cap ; unsigned char pme_support : 5 ; unsigned char pme_interrupt : 1 ; unsigned char pme_poll : 1 ; unsigned char d1_support : 1 ; unsigned char d2_support : 1 ; unsigned char no_d1d2 : 1 ; unsigned char no_d3cold : 1 ; unsigned char d3cold_allowed : 1 ; unsigned char mmio_always_on : 1 ; unsigned char wakeup_prepared : 1 ; unsigned char runtime_d3cold : 1 ; unsigned int d3_delay ; unsigned int d3cold_delay ; struct pcie_link_state *link_state ; pci_channel_state_t error_state ; struct device dev ; int cfg_size ; unsigned int irq ; struct resource resource[17U] ; bool match_driver ; unsigned char transparent : 1 ; unsigned char multifunction : 1 ; unsigned char is_added : 1 ; unsigned char is_busmaster : 1 ; unsigned char no_msi : 1 ; unsigned char block_cfg_access : 1 ; unsigned char broken_parity_status : 1 ; unsigned char irq_reroute_variant : 2 ; unsigned char msi_enabled : 1 ; unsigned char msix_enabled : 1 ; unsigned char ari_enabled : 1 ; unsigned char is_managed : 1 ; unsigned char is_pcie : 1 ; unsigned char needs_freset : 1 ; unsigned char state_saved : 1 ; unsigned char is_physfn : 1 ; unsigned char is_virtfn : 1 ; unsigned char reset_fn : 1 ; unsigned char is_hotplug_bridge : 1 ; unsigned char __aer_firmware_first_valid : 1 ; unsigned char __aer_firmware_first : 1 ; unsigned char broken_intx_masking : 1 ; unsigned char io_window_1k : 1 ; pci_dev_flags_t dev_flags ; atomic_t enable_cnt ; u32 saved_config_space[16U] ; struct hlist_head saved_cap_space ; struct bin_attribute *rom_attr ; int rom_attr_enabled ; struct bin_attribute *res_attr[17U] ; struct bin_attribute *res_attr_wc[17U] ; struct list_head msi_list ; struct kset *msi_kset ; struct pci_vpd *vpd ; union __anonunion_ldv_16690_142 ldv_16690 ; struct pci_ats *ats ; phys_addr_t rom ; size_t romlen ; }; struct pci_ops; struct msi_chip; struct pci_bus { struct list_head node ; struct pci_bus *parent ; struct list_head children ; struct list_head devices ; struct pci_dev *self ; struct list_head slots ; struct resource *resource[4U] ; struct list_head resources ; struct resource busn_res ; struct pci_ops *ops ; struct msi_chip *msi ; void *sysdata ; struct proc_dir_entry *procdir ; unsigned char number ; unsigned char primary ; unsigned char max_bus_speed ; unsigned char cur_bus_speed ; char name[48U] ; unsigned short bridge_ctl ; pci_bus_flags_t bus_flags ; struct device *bridge ; struct device dev ; struct bin_attribute *legacy_io ; struct bin_attribute *legacy_mem ; unsigned char is_added : 1 ; }; struct pci_ops { int (*read)(struct pci_bus * , unsigned int , int , int , u32 * ) ; int (*write)(struct pci_bus * , unsigned int , int , int , u32 ) ; }; struct pci_dynids { spinlock_t lock ; struct list_head list ; }; typedef unsigned int pci_ers_result_t; struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev * , enum pci_channel_state ) ; pci_ers_result_t (*mmio_enabled)(struct pci_dev * ) ; pci_ers_result_t (*link_reset)(struct pci_dev * ) ; pci_ers_result_t (*slot_reset)(struct pci_dev * ) ; void (*resume)(struct pci_dev * ) ; }; struct pci_driver { struct list_head node ; char const *name ; struct pci_device_id const *id_table ; int (*probe)(struct pci_dev * , struct pci_device_id const * ) ; void (*remove)(struct pci_dev * ) ; int (*suspend)(struct pci_dev * , pm_message_t ) ; int (*suspend_late)(struct pci_dev * , pm_message_t ) ; int (*resume_early)(struct pci_dev * ) ; int (*resume)(struct pci_dev * ) ; void (*shutdown)(struct pci_dev * ) ; int (*sriov_configure)(struct pci_dev * , int ) ; struct pci_error_handlers const *err_handler ; struct device_driver driver ; struct pci_dynids dynids ; }; struct scatterlist { unsigned long sg_magic ; unsigned long page_link ; unsigned int offset ; unsigned int length ; dma_addr_t dma_address ; unsigned int dma_length ; }; struct dma_pool; struct arch_uprobe_task { unsigned long saved_scratch_register ; unsigned int saved_trap_nr ; unsigned int saved_tf ; }; enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; struct return_instance; struct uprobe; struct uprobe_task { enum uprobe_task_state state ; struct arch_uprobe_task autask ; struct return_instance *return_instances ; unsigned int depth ; struct uprobe *active_uprobe ; unsigned long xol_vaddr ; unsigned long vaddr ; }; struct xol_area { wait_queue_head_t wq ; atomic_t slot_count ; unsigned long *bitmap ; struct page *page ; unsigned long vaddr ; }; struct uprobes_state { struct xol_area *xol_area ; }; struct address_space; union __anonunion_ldv_17779_144 { unsigned long index ; void *freelist ; bool pfmemalloc ; }; struct __anonstruct_ldv_17789_148 { unsigned short inuse ; unsigned short objects : 15 ; unsigned char frozen : 1 ; }; union __anonunion_ldv_17791_147 { atomic_t _mapcount ; struct __anonstruct_ldv_17789_148 ldv_17789 ; int units ; }; struct __anonstruct_ldv_17793_146 { union __anonunion_ldv_17791_147 ldv_17791 ; atomic_t _count ; }; union __anonunion_ldv_17794_145 { unsigned long counters ; struct __anonstruct_ldv_17793_146 ldv_17793 ; }; struct __anonstruct_ldv_17795_143 { union __anonunion_ldv_17779_144 ldv_17779 ; union __anonunion_ldv_17794_145 ldv_17794 ; }; struct __anonstruct_ldv_17802_150 { struct page *next ; int pages ; int pobjects ; }; struct slab; union __anonunion_ldv_17806_149 { struct list_head lru ; struct __anonstruct_ldv_17802_150 ldv_17802 ; struct list_head list ; struct slab *slab_page ; }; union __anonunion_ldv_17811_151 { unsigned long private ; struct kmem_cache *slab_cache ; struct page *first_page ; }; struct page { unsigned long flags ; struct address_space *mapping ; struct __anonstruct_ldv_17795_143 ldv_17795 ; union __anonunion_ldv_17806_149 ldv_17806 ; union __anonunion_ldv_17811_151 ldv_17811 ; unsigned long debug_flags ; }; struct page_frag { struct page *page ; __u32 offset ; __u32 size ; }; struct __anonstruct_linear_153 { struct rb_node rb ; unsigned long rb_subtree_last ; }; union __anonunion_shared_152 { struct __anonstruct_linear_153 linear ; struct list_head nonlinear ; }; struct anon_vma; struct vm_operations_struct; struct mempolicy; struct vm_area_struct { unsigned long vm_start ; unsigned long vm_end ; struct vm_area_struct *vm_next ; struct vm_area_struct *vm_prev ; struct rb_node vm_rb ; unsigned long rb_subtree_gap ; struct mm_struct *vm_mm ; pgprot_t vm_page_prot ; unsigned long vm_flags ; union __anonunion_shared_152 shared ; struct list_head anon_vma_chain ; struct anon_vma *anon_vma ; struct vm_operations_struct const *vm_ops ; unsigned long vm_pgoff ; struct file *vm_file ; void *vm_private_data ; struct mempolicy *vm_policy ; }; struct core_thread { struct task_struct *task ; struct core_thread *next ; }; struct core_state { atomic_t nr_threads ; struct core_thread dumper ; struct completion startup ; }; struct mm_rss_stat { atomic_long_t count[3U] ; }; struct kioctx_table; struct linux_binfmt; struct mmu_notifier_mm; struct mm_struct { struct vm_area_struct *mmap ; struct rb_root mm_rb ; struct vm_area_struct *mmap_cache ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; unsigned long mmap_base ; unsigned long mmap_legacy_base ; unsigned long task_size ; unsigned long highest_vm_end ; pgd_t *pgd ; atomic_t mm_users ; atomic_t mm_count ; int map_count ; spinlock_t page_table_lock ; struct rw_semaphore mmap_sem ; struct list_head mmlist ; unsigned long hiwater_rss ; unsigned long hiwater_vm ; unsigned long total_vm ; unsigned long locked_vm ; unsigned long pinned_vm ; unsigned long shared_vm ; unsigned long exec_vm ; unsigned long stack_vm ; unsigned long def_flags ; unsigned long nr_ptes ; unsigned long start_code ; unsigned long end_code ; unsigned long start_data ; unsigned long end_data ; unsigned long start_brk ; unsigned long brk ; unsigned long start_stack ; unsigned long arg_start ; unsigned long arg_end ; unsigned long env_start ; unsigned long env_end ; unsigned long saved_auxv[44U] ; struct mm_rss_stat rss_stat ; struct linux_binfmt *binfmt ; cpumask_var_t cpu_vm_mask_var ; mm_context_t context ; unsigned long flags ; struct core_state *core_state ; spinlock_t ioctx_lock ; struct kioctx_table *ioctx_table ; struct task_struct *owner ; struct file *exe_file ; struct mmu_notifier_mm *mmu_notifier_mm ; pgtable_t pmd_huge_pte ; struct cpumask cpumask_allocation ; unsigned long numa_next_scan ; unsigned long numa_next_reset ; unsigned long numa_scan_offset ; int numa_scan_seq ; int first_nid ; struct uprobes_state uprobes_state ; }; struct shrink_control { gfp_t gfp_mask ; unsigned long nr_to_scan ; nodemask_t nodes_to_scan ; int nid ; }; struct shrinker { unsigned long (*count_objects)(struct shrinker * , struct shrink_control * ) ; unsigned long (*scan_objects)(struct shrinker * , struct shrink_control * ) ; int seeks ; long batch ; unsigned long flags ; struct list_head list ; atomic_long_t *nr_deferred ; }; struct file_ra_state; struct user_struct; struct writeback_control; struct vm_fault { unsigned int flags ; unsigned long pgoff ; void *virtual_address ; struct page *page ; }; struct vm_operations_struct { void (*open)(struct vm_area_struct * ) ; void (*close)(struct vm_area_struct * ) ; int (*fault)(struct vm_area_struct * , struct vm_fault * ) ; int (*page_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*access)(struct vm_area_struct * , unsigned long , void * , int , int ) ; int (*set_policy)(struct vm_area_struct * , struct mempolicy * ) ; struct mempolicy *(*get_policy)(struct vm_area_struct * , unsigned long ) ; int (*migrate)(struct vm_area_struct * , nodemask_t const * , nodemask_t const * , unsigned long ) ; int (*remap_pages)(struct vm_area_struct * , unsigned long , unsigned long , unsigned long ) ; }; struct pci_sysdata { int domain ; int node ; void *acpi ; void *iommu ; }; struct dma_attrs { unsigned long flags[1U] ; }; enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; struct sg_table { struct scatterlist *sgl ; unsigned int nents ; unsigned int orig_nents ; }; struct dma_map_ops { void *(*alloc)(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ) ; void (*free)(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ) ; int (*mmap)(struct device * , struct vm_area_struct * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; int (*get_sgtable)(struct device * , struct sg_table * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; dma_addr_t (*map_page)(struct device * , struct page * , unsigned long , size_t , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_page)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ) ; int (*map_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*sync_single_for_cpu)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_single_for_device)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_sg_for_cpu)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; void (*sync_sg_for_device)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; int (*mapping_error)(struct device * , dma_addr_t ) ; int (*dma_supported)(struct device * , u64 ) ; int (*set_dma_mask)(struct device * , u64 ) ; int is_phys ; }; struct kernel_cap_struct { __u32 cap[2U] ; }; typedef struct kernel_cap_struct kernel_cap_t; struct dentry; typedef unsigned long cputime_t; struct sem_undo_list; struct sysv_sem { struct sem_undo_list *undo_list ; }; struct __anonstruct_sigset_t_155 { unsigned long sig[1U] ; }; typedef struct __anonstruct_sigset_t_155 sigset_t; struct siginfo; typedef void __signalfn_t(int ); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; union sigval { int sival_int ; void *sival_ptr ; }; typedef union sigval sigval_t; struct __anonstruct__kill_157 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; }; struct __anonstruct__timer_158 { __kernel_timer_t _tid ; int _overrun ; char _pad[0U] ; sigval_t _sigval ; int _sys_private ; }; struct __anonstruct__rt_159 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; sigval_t _sigval ; }; struct __anonstruct__sigchld_160 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; int _status ; __kernel_clock_t _utime ; __kernel_clock_t _stime ; }; struct __anonstruct__sigfault_161 { void *_addr ; short _addr_lsb ; }; struct __anonstruct__sigpoll_162 { long _band ; int _fd ; }; struct __anonstruct__sigsys_163 { void *_call_addr ; int _syscall ; unsigned int _arch ; }; union __anonunion__sifields_156 { int _pad[28U] ; struct __anonstruct__kill_157 _kill ; struct __anonstruct__timer_158 _timer ; struct __anonstruct__rt_159 _rt ; struct __anonstruct__sigchld_160 _sigchld ; struct __anonstruct__sigfault_161 _sigfault ; struct __anonstruct__sigpoll_162 _sigpoll ; struct __anonstruct__sigsys_163 _sigsys ; }; struct siginfo { int si_signo ; int si_errno ; int si_code ; union __anonunion__sifields_156 _sifields ; }; typedef struct siginfo siginfo_t; struct sigpending { struct list_head list ; sigset_t signal ; }; struct sigaction { __sighandler_t sa_handler ; unsigned long sa_flags ; __sigrestore_t sa_restorer ; sigset_t sa_mask ; }; struct k_sigaction { struct sigaction sa ; }; enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; struct pid_namespace; struct upid { int nr ; struct pid_namespace *ns ; struct hlist_node pid_chain ; }; struct pid { atomic_t count ; unsigned int level ; struct hlist_head tasks[3U] ; struct callback_head rcu ; struct upid numbers[1U] ; }; struct pid_link { struct hlist_node node ; struct pid *pid ; }; struct percpu_counter { raw_spinlock_t lock ; s64 count ; struct list_head list ; s32 *counters ; }; struct seccomp_filter; struct seccomp { int mode ; struct seccomp_filter *filter ; }; struct plist_head { struct list_head node_list ; }; struct plist_node { int prio ; struct list_head prio_list ; struct list_head node_list ; }; struct rt_mutex_waiter; struct rlimit { unsigned long rlim_cur ; unsigned long rlim_max ; }; struct timerqueue_node { struct rb_node node ; ktime_t expires ; }; struct timerqueue_head { struct rb_root head ; struct timerqueue_node *next ; }; struct hrtimer_clock_base; struct hrtimer_cpu_base; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; struct hrtimer { struct timerqueue_node node ; ktime_t _softexpires ; enum hrtimer_restart (*function)(struct hrtimer * ) ; struct hrtimer_clock_base *base ; unsigned long state ; int start_pid ; void *start_site ; char start_comm[16U] ; }; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base ; int index ; clockid_t clockid ; struct timerqueue_head active ; ktime_t resolution ; ktime_t (*get_time)(void) ; ktime_t softirq_time ; ktime_t offset ; }; struct hrtimer_cpu_base { raw_spinlock_t lock ; unsigned int active_bases ; unsigned int clock_was_set ; ktime_t expires_next ; int hres_active ; int hang_detected ; unsigned long nr_events ; unsigned long nr_retries ; unsigned long nr_hangs ; ktime_t max_hang_time ; struct hrtimer_clock_base clock_base[4U] ; }; struct task_io_accounting { u64 rchar ; u64 wchar ; u64 syscr ; u64 syscw ; u64 read_bytes ; u64 write_bytes ; u64 cancelled_write_bytes ; }; struct latency_record { unsigned long backtrace[12U] ; unsigned int count ; unsigned long time ; unsigned long max ; }; typedef int32_t key_serial_t; typedef uint32_t key_perm_t; struct key; struct signal_struct; struct key_type; struct keyring_list; union __anonunion_ldv_23135_166 { struct list_head graveyard_link ; struct rb_node serial_node ; }; struct key_user; union __anonunion_ldv_23144_167 { time_t expiry ; time_t revoked_at ; }; union __anonunion_type_data_168 { struct list_head link ; unsigned long x[2U] ; void *p[2U] ; int reject_error ; }; union __anonunion_payload_169 { unsigned long value ; void *rcudata ; void *data ; struct keyring_list *subscriptions ; }; struct key { atomic_t usage ; key_serial_t serial ; union __anonunion_ldv_23135_166 ldv_23135 ; struct key_type *type ; struct rw_semaphore sem ; struct key_user *user ; void *security ; union __anonunion_ldv_23144_167 ldv_23144 ; time_t last_used_at ; kuid_t uid ; kgid_t gid ; key_perm_t perm ; unsigned short quotalen ; unsigned short datalen ; unsigned long flags ; char *description ; union __anonunion_type_data_168 type_data ; union __anonunion_payload_169 payload ; }; struct audit_context; struct group_info { atomic_t usage ; int ngroups ; int nblocks ; kgid_t small_block[32U] ; kgid_t *blocks[0U] ; }; struct cred { atomic_t usage ; atomic_t subscribers ; void *put_addr ; unsigned int magic ; kuid_t uid ; kgid_t gid ; kuid_t suid ; kgid_t sgid ; kuid_t euid ; kgid_t egid ; kuid_t fsuid ; kgid_t fsgid ; unsigned int securebits ; kernel_cap_t cap_inheritable ; kernel_cap_t cap_permitted ; kernel_cap_t cap_effective ; kernel_cap_t cap_bset ; unsigned char jit_keyring ; struct key *session_keyring ; struct key *process_keyring ; struct key *thread_keyring ; struct key *request_key_auth ; void *security ; struct user_struct *user ; struct user_namespace *user_ns ; struct group_info *group_info ; struct callback_head rcu ; }; struct llist_node; struct llist_node { struct llist_node *next ; }; struct futex_pi_state; struct robust_list_head; struct bio_list; struct fs_struct; struct perf_event_context; struct blk_plug; struct cfs_rq; struct task_group; struct sighand_struct { atomic_t count ; struct k_sigaction action[64U] ; spinlock_t siglock ; wait_queue_head_t signalfd_wqh ; }; struct pacct_struct { int ac_flag ; long ac_exitcode ; unsigned long ac_mem ; cputime_t ac_utime ; cputime_t ac_stime ; unsigned long ac_minflt ; unsigned long ac_majflt ; }; struct cpu_itimer { cputime_t expires ; cputime_t incr ; u32 error ; u32 incr_error ; }; struct cputime { cputime_t utime ; cputime_t stime ; }; struct task_cputime { cputime_t utime ; cputime_t stime ; unsigned long long sum_exec_runtime ; }; struct thread_group_cputimer { struct task_cputime cputime ; int running ; raw_spinlock_t lock ; }; struct autogroup; struct tty_struct; struct taskstats; struct tty_audit_buf; struct signal_struct { atomic_t sigcnt ; atomic_t live ; int nr_threads ; wait_queue_head_t wait_chldexit ; struct task_struct *curr_target ; struct sigpending shared_pending ; int group_exit_code ; int notify_count ; struct task_struct *group_exit_task ; int group_stop_count ; unsigned int flags ; unsigned char is_child_subreaper : 1 ; unsigned char has_child_subreaper : 1 ; int posix_timer_id ; struct list_head posix_timers ; struct hrtimer real_timer ; struct pid *leader_pid ; ktime_t it_real_incr ; struct cpu_itimer it[2U] ; struct thread_group_cputimer cputimer ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct pid *tty_old_pgrp ; int leader ; struct tty_struct *tty ; struct autogroup *autogroup ; cputime_t utime ; cputime_t stime ; cputime_t cutime ; cputime_t cstime ; cputime_t gtime ; cputime_t cgtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; unsigned long cnvcsw ; unsigned long cnivcsw ; unsigned long min_flt ; unsigned long maj_flt ; unsigned long cmin_flt ; unsigned long cmaj_flt ; unsigned long inblock ; unsigned long oublock ; unsigned long cinblock ; unsigned long coublock ; unsigned long maxrss ; unsigned long cmaxrss ; struct task_io_accounting ioac ; unsigned long long sum_sched_runtime ; struct rlimit rlim[16U] ; struct pacct_struct pacct ; struct taskstats *stats ; unsigned int audit_tty ; unsigned int audit_tty_log_passwd ; struct tty_audit_buf *tty_audit_buf ; struct rw_semaphore group_rwsem ; oom_flags_t oom_flags ; short oom_score_adj ; short oom_score_adj_min ; struct mutex cred_guard_mutex ; }; struct user_struct { atomic_t __count ; atomic_t processes ; atomic_t files ; atomic_t sigpending ; atomic_t inotify_watches ; atomic_t inotify_devs ; atomic_t fanotify_listeners ; atomic_long_t epoll_watches ; unsigned long mq_bytes ; unsigned long locked_shm ; struct key *uid_keyring ; struct key *session_keyring ; struct hlist_node uidhash_node ; kuid_t uid ; atomic_long_t locked_vm ; }; struct backing_dev_info; struct reclaim_state; struct sched_info { unsigned long pcount ; unsigned long long run_delay ; unsigned long long last_arrival ; unsigned long long last_queued ; }; struct task_delay_info { spinlock_t lock ; unsigned int flags ; struct timespec blkio_start ; struct timespec blkio_end ; u64 blkio_delay ; u64 swapin_delay ; u32 blkio_count ; u32 swapin_count ; struct timespec freepages_start ; struct timespec freepages_end ; u64 freepages_delay ; u32 freepages_count ; }; struct io_context; struct pipe_inode_info; struct uts_namespace; struct load_weight { unsigned long weight ; unsigned long inv_weight ; }; struct sched_avg { u32 runnable_avg_sum ; u32 runnable_avg_period ; u64 last_runnable_update ; s64 decay_count ; unsigned long load_avg_contrib ; }; struct sched_statistics { u64 wait_start ; u64 wait_max ; u64 wait_count ; u64 wait_sum ; u64 iowait_count ; u64 iowait_sum ; u64 sleep_start ; u64 sleep_max ; s64 sum_sleep_runtime ; u64 block_start ; u64 block_max ; u64 exec_max ; u64 slice_max ; u64 nr_migrations_cold ; u64 nr_failed_migrations_affine ; u64 nr_failed_migrations_running ; u64 nr_failed_migrations_hot ; u64 nr_forced_migrations ; u64 nr_wakeups ; u64 nr_wakeups_sync ; u64 nr_wakeups_migrate ; u64 nr_wakeups_local ; u64 nr_wakeups_remote ; u64 nr_wakeups_affine ; u64 nr_wakeups_affine_attempts ; u64 nr_wakeups_passive ; u64 nr_wakeups_idle ; }; struct sched_entity { struct load_weight load ; struct rb_node run_node ; struct list_head group_node ; unsigned int on_rq ; u64 exec_start ; u64 sum_exec_runtime ; u64 vruntime ; u64 prev_sum_exec_runtime ; u64 nr_migrations ; struct sched_statistics statistics ; struct sched_entity *parent ; struct cfs_rq *cfs_rq ; struct cfs_rq *my_q ; struct sched_avg avg ; }; struct rt_rq; struct sched_rt_entity { struct list_head run_list ; unsigned long timeout ; unsigned long watchdog_stamp ; unsigned int time_slice ; struct sched_rt_entity *back ; struct sched_rt_entity *parent ; struct rt_rq *rt_rq ; struct rt_rq *my_q ; }; struct memcg_batch_info { int do_batch ; struct mem_cgroup *memcg ; unsigned long nr_pages ; unsigned long memsw_nr_pages ; }; struct memcg_oom_info { unsigned char may_oom : 1 ; unsigned char in_memcg_oom : 1 ; unsigned char oom_locked : 1 ; int wakeups ; struct mem_cgroup *wait_on_memcg ; }; struct sched_class; struct files_struct; struct css_set; struct compat_robust_list_head; struct task_struct { long volatile state ; void *stack ; atomic_t usage ; unsigned int flags ; unsigned int ptrace ; struct llist_node wake_entry ; int on_cpu ; struct task_struct *last_wakee ; unsigned long wakee_flips ; unsigned long wakee_flip_decay_ts ; int on_rq ; int prio ; int static_prio ; int normal_prio ; unsigned int rt_priority ; struct sched_class const *sched_class ; struct sched_entity se ; struct sched_rt_entity rt ; struct task_group *sched_task_group ; struct hlist_head preempt_notifiers ; unsigned char fpu_counter ; unsigned int policy ; int nr_cpus_allowed ; cpumask_t cpus_allowed ; struct sched_info sched_info ; struct list_head tasks ; struct plist_node pushable_tasks ; struct mm_struct *mm ; struct mm_struct *active_mm ; unsigned char brk_randomized : 1 ; int exit_state ; int exit_code ; int exit_signal ; int pdeath_signal ; unsigned int jobctl ; unsigned int personality ; unsigned char did_exec : 1 ; unsigned char in_execve : 1 ; unsigned char in_iowait : 1 ; unsigned char no_new_privs : 1 ; unsigned char sched_reset_on_fork : 1 ; unsigned char sched_contributes_to_load : 1 ; pid_t pid ; pid_t tgid ; unsigned long stack_canary ; struct task_struct *real_parent ; struct task_struct *parent ; struct list_head children ; struct list_head sibling ; struct task_struct *group_leader ; struct list_head ptraced ; struct list_head ptrace_entry ; struct pid_link pids[3U] ; struct list_head thread_group ; struct completion *vfork_done ; int *set_child_tid ; int *clear_child_tid ; cputime_t utime ; cputime_t stime ; cputime_t utimescaled ; cputime_t stimescaled ; cputime_t gtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; struct timespec start_time ; struct timespec real_start_time ; unsigned long min_flt ; unsigned long maj_flt ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct cred const *real_cred ; struct cred const *cred ; char comm[16U] ; int link_count ; int total_link_count ; struct sysv_sem sysvsem ; unsigned long last_switch_count ; struct thread_struct thread ; struct fs_struct *fs ; struct files_struct *files ; struct nsproxy *nsproxy ; struct signal_struct *signal ; struct sighand_struct *sighand ; sigset_t blocked ; sigset_t real_blocked ; sigset_t saved_sigmask ; struct sigpending pending ; unsigned long sas_ss_sp ; size_t sas_ss_size ; int (*notifier)(void * ) ; void *notifier_data ; sigset_t *notifier_mask ; struct callback_head *task_works ; struct audit_context *audit_context ; kuid_t loginuid ; unsigned int sessionid ; struct seccomp seccomp ; u32 parent_exec_id ; u32 self_exec_id ; spinlock_t alloc_lock ; raw_spinlock_t pi_lock ; struct plist_head pi_waiters ; struct rt_mutex_waiter *pi_blocked_on ; struct mutex_waiter *blocked_on ; unsigned int irq_events ; unsigned long hardirq_enable_ip ; unsigned long hardirq_disable_ip ; unsigned int hardirq_enable_event ; unsigned int hardirq_disable_event ; int hardirqs_enabled ; int hardirq_context ; unsigned long softirq_disable_ip ; unsigned long softirq_enable_ip ; unsigned int softirq_disable_event ; unsigned int softirq_enable_event ; int softirqs_enabled ; int softirq_context ; u64 curr_chain_key ; int lockdep_depth ; unsigned int lockdep_recursion ; struct held_lock held_locks[48U] ; gfp_t lockdep_reclaim_gfp ; void *journal_info ; struct bio_list *bio_list ; struct blk_plug *plug ; struct reclaim_state *reclaim_state ; struct backing_dev_info *backing_dev_info ; struct io_context *io_context ; unsigned long ptrace_message ; siginfo_t *last_siginfo ; struct task_io_accounting ioac ; u64 acct_rss_mem1 ; u64 acct_vm_mem1 ; cputime_t acct_timexpd ; nodemask_t mems_allowed ; seqcount_t mems_allowed_seq ; int cpuset_mem_spread_rotor ; int cpuset_slab_spread_rotor ; struct css_set *cgroups ; struct list_head cg_list ; struct robust_list_head *robust_list ; struct compat_robust_list_head *compat_robust_list ; struct list_head pi_state_list ; struct futex_pi_state *pi_state_cache ; struct perf_event_context *perf_event_ctxp[2U] ; struct mutex perf_event_mutex ; struct list_head perf_event_list ; struct mempolicy *mempolicy ; short il_next ; short pref_node_fork ; int numa_scan_seq ; int numa_migrate_seq ; unsigned int numa_scan_period ; u64 node_stamp ; struct callback_head numa_work ; struct callback_head rcu ; struct pipe_inode_info *splice_pipe ; struct page_frag task_frag ; struct task_delay_info *delays ; int make_it_fail ; int nr_dirtied ; int nr_dirtied_pause ; unsigned long dirty_paused_when ; int latency_record_count ; struct latency_record latency_record[32U] ; unsigned long timer_slack_ns ; unsigned long default_timer_slack_ns ; unsigned long trace ; unsigned long trace_recursion ; struct memcg_batch_info memcg_batch ; unsigned int memcg_kmem_skip_account ; struct memcg_oom_info memcg_oom ; struct uprobe_task *utask ; unsigned int sequential_io ; unsigned int sequential_io_avg ; }; typedef void *mempool_alloc_t(gfp_t , void * ); typedef void mempool_free_t(void * , void * ); struct mempool_s { spinlock_t lock ; int min_nr ; int curr_nr ; void **elements ; void *pool_data ; mempool_alloc_t *alloc ; mempool_free_t *free ; wait_queue_head_t wait ; }; typedef struct mempool_s mempool_t; struct exception_table_entry { int insn ; int fixup ; }; struct firmware { size_t size ; u8 const *data ; struct page **pages ; void *priv ; }; struct scsi_lun { __u8 scsi_lun[8U] ; }; struct path; struct seq_file { char *buf ; size_t size ; size_t from ; size_t count ; loff_t index ; loff_t read_pos ; u64 version ; struct mutex lock ; struct seq_operations const *op ; int poll_event ; struct user_namespace *user_ns ; void *private ; }; struct seq_operations { void *(*start)(struct seq_file * , loff_t * ) ; void (*stop)(struct seq_file * , void * ) ; void *(*next)(struct seq_file * , void * , loff_t * ) ; int (*show)(struct seq_file * , void * ) ; }; struct request_queue; struct block_device; struct scsi_host_cmd_pool; struct scsi_transport_template; enum blk_eh_timer_return; struct scsi_host_template { struct module *module ; char const *name ; int (*detect)(struct scsi_host_template * ) ; int (*release)(struct Scsi_Host * ) ; char const *(*info)(struct Scsi_Host * ) ; int (*ioctl)(struct scsi_device * , int , void * ) ; int (*compat_ioctl)(struct scsi_device * , int , void * ) ; int (*queuecommand)(struct Scsi_Host * , struct scsi_cmnd * ) ; int (*transfer_response)(struct scsi_cmnd * , void (*)(struct scsi_cmnd * ) ) ; int (*eh_abort_handler)(struct scsi_cmnd * ) ; int (*eh_device_reset_handler)(struct scsi_cmnd * ) ; int (*eh_target_reset_handler)(struct scsi_cmnd * ) ; int (*eh_bus_reset_handler)(struct scsi_cmnd * ) ; int (*eh_host_reset_handler)(struct scsi_cmnd * ) ; int (*slave_alloc)(struct scsi_device * ) ; int (*slave_configure)(struct scsi_device * ) ; void (*slave_destroy)(struct scsi_device * ) ; int (*target_alloc)(struct scsi_target * ) ; void (*target_destroy)(struct scsi_target * ) ; int (*scan_finished)(struct Scsi_Host * , unsigned long ) ; void (*scan_start)(struct Scsi_Host * ) ; int (*change_queue_depth)(struct scsi_device * , int , int ) ; int (*change_queue_type)(struct scsi_device * , int ) ; int (*bios_param)(struct scsi_device * , struct block_device * , sector_t , int * ) ; void (*unlock_native_capacity)(struct scsi_device * ) ; int (*show_info)(struct seq_file * , struct Scsi_Host * ) ; int (*write_info)(struct Scsi_Host * , char * , int ) ; enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd * ) ; int (*host_reset)(struct Scsi_Host * , int ) ; char const *proc_name ; struct proc_dir_entry *proc_dir ; int can_queue ; int this_id ; unsigned short sg_tablesize ; unsigned short sg_prot_tablesize ; unsigned short max_sectors ; unsigned long dma_boundary ; short cmd_per_lun ; unsigned char present ; unsigned char supported_mode : 2 ; unsigned char unchecked_isa_dma : 1 ; unsigned char use_clustering : 1 ; unsigned char emulated : 1 ; unsigned char skip_settle_delay : 1 ; unsigned char ordered_tag : 1 ; unsigned int max_host_blocked ; struct device_attribute **shost_attrs ; struct device_attribute **sdev_attrs ; struct list_head legacy_hosts ; u64 vendor_id ; }; enum scsi_host_state { SHOST_CREATED = 1, SHOST_RUNNING = 2, SHOST_CANCEL = 3, SHOST_DEL = 4, SHOST_RECOVERY = 5, SHOST_CANCEL_RECOVERY = 6, SHOST_DEL_RECOVERY = 7 } ; struct blk_queue_tag; struct Scsi_Host { struct list_head __devices ; struct list_head __targets ; struct scsi_host_cmd_pool *cmd_pool ; spinlock_t free_list_lock ; struct list_head free_list ; struct list_head starved_list ; spinlock_t default_lock ; spinlock_t *host_lock ; struct mutex scan_mutex ; struct list_head eh_cmd_q ; struct task_struct *ehandler ; struct completion *eh_action ; wait_queue_head_t host_wait ; struct scsi_host_template *hostt ; struct scsi_transport_template *transportt ; struct blk_queue_tag *bqt ; unsigned int host_busy ; unsigned int host_failed ; unsigned int host_eh_scheduled ; unsigned int host_no ; int resetting ; unsigned long last_reset ; unsigned int max_id ; unsigned int max_lun ; unsigned int max_channel ; unsigned int unique_id ; unsigned short max_cmd_len ; int this_id ; int can_queue ; short cmd_per_lun ; unsigned short sg_tablesize ; unsigned short sg_prot_tablesize ; unsigned short max_sectors ; unsigned long dma_boundary ; unsigned long cmd_serial_number ; unsigned char active_mode : 2 ; unsigned char unchecked_isa_dma : 1 ; unsigned char use_clustering : 1 ; unsigned char use_blk_tcq : 1 ; unsigned char host_self_blocked : 1 ; unsigned char reverse_ordering : 1 ; unsigned char ordered_tag : 1 ; unsigned char tmf_in_progress : 1 ; unsigned char async_scan : 1 ; unsigned char eh_noresume : 1 ; char work_q_name[20U] ; struct workqueue_struct *work_q ; unsigned int host_blocked ; unsigned int max_host_blocked ; unsigned int prot_capabilities ; unsigned char prot_guard_type ; struct request_queue *uspace_req_q ; unsigned long base ; unsigned long io_port ; unsigned char n_io_port ; unsigned char dma_channel ; unsigned int irq ; enum scsi_host_state shost_state ; struct device shost_gendev ; struct device shost_dev ; struct list_head sht_legacy_list ; void *shost_data ; struct device *dma_dev ; unsigned long hostdata[0U] ; }; struct hlist_bl_node; struct hlist_bl_head { struct hlist_bl_node *first ; }; struct hlist_bl_node { struct hlist_bl_node *next ; struct hlist_bl_node **pprev ; }; struct __anonstruct_ldv_27182_176 { spinlock_t lock ; unsigned int count ; }; union __anonunion_ldv_27183_175 { struct __anonstruct_ldv_27182_176 ldv_27182 ; }; struct lockref { union __anonunion_ldv_27183_175 ldv_27183 ; }; struct nameidata; struct vfsmount; struct __anonstruct_ldv_27203_178 { u32 hash ; u32 len ; }; union __anonunion_ldv_27205_177 { struct __anonstruct_ldv_27203_178 ldv_27203 ; u64 hash_len ; }; struct qstr { union __anonunion_ldv_27205_177 ldv_27205 ; unsigned char const *name ; }; struct dentry_operations; struct super_block; union __anonunion_d_u_179 { struct list_head d_child ; struct callback_head d_rcu ; }; struct dentry { unsigned int d_flags ; seqcount_t d_seq ; struct hlist_bl_node d_hash ; struct dentry *d_parent ; struct qstr d_name ; struct inode *d_inode ; unsigned char d_iname[32U] ; struct lockref d_lockref ; struct dentry_operations const *d_op ; struct super_block *d_sb ; unsigned long d_time ; void *d_fsdata ; struct list_head d_lru ; union __anonunion_d_u_179 d_u ; struct list_head d_subdirs ; struct hlist_node d_alias ; }; struct dentry_operations { int (*d_revalidate)(struct dentry * , unsigned int ) ; int (*d_weak_revalidate)(struct dentry * , unsigned int ) ; int (*d_hash)(struct dentry const * , struct qstr * ) ; int (*d_compare)(struct dentry const * , struct dentry const * , unsigned int , char const * , struct qstr const * ) ; int (*d_delete)(struct dentry const * ) ; void (*d_release)(struct dentry * ) ; void (*d_prune)(struct dentry * ) ; void (*d_iput)(struct dentry * , struct inode * ) ; char *(*d_dname)(struct dentry * , char * , int ) ; struct vfsmount *(*d_automount)(struct path * ) ; int (*d_manage)(struct dentry * , bool ) ; }; struct path { struct vfsmount *mnt ; struct dentry *dentry ; }; struct list_lru_node { spinlock_t lock ; struct list_head list ; long nr_items ; }; struct list_lru { struct list_lru_node *node ; nodemask_t active_nodes ; }; struct radix_tree_node; struct radix_tree_root { unsigned int height ; gfp_t gfp_mask ; struct radix_tree_node *rnode ; }; struct semaphore { raw_spinlock_t lock ; unsigned int count ; struct list_head wait_list ; }; struct fiemap_extent { __u64 fe_logical ; __u64 fe_physical ; __u64 fe_length ; __u64 fe_reserved64[2U] ; __u32 fe_flags ; __u32 fe_reserved[3U] ; }; enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; struct bio_set; struct bio; struct bio_integrity_payload; struct cgroup_subsys_state; typedef void bio_end_io_t(struct bio * , int ); struct bio_vec { struct page *bv_page ; unsigned int bv_len ; unsigned int bv_offset ; }; struct bio { sector_t bi_sector ; struct bio *bi_next ; struct block_device *bi_bdev ; unsigned long bi_flags ; unsigned long bi_rw ; unsigned short bi_vcnt ; unsigned short bi_idx ; unsigned int bi_phys_segments ; unsigned int bi_size ; unsigned int bi_seg_front_size ; unsigned int bi_seg_back_size ; bio_end_io_t *bi_end_io ; void *bi_private ; struct io_context *bi_ioc ; struct cgroup_subsys_state *bi_css ; struct bio_integrity_payload *bi_integrity ; unsigned int bi_max_vecs ; atomic_t bi_cnt ; struct bio_vec *bi_io_vec ; struct bio_set *bi_pool ; struct bio_vec bi_inline_vecs[0U] ; }; struct export_operations; struct hd_geometry; struct iovec; struct kiocb; struct poll_table_struct; struct kstatfs; struct swap_info_struct; struct iattr { unsigned int ia_valid ; umode_t ia_mode ; kuid_t ia_uid ; kgid_t ia_gid ; loff_t ia_size ; struct timespec ia_atime ; struct timespec ia_mtime ; struct timespec ia_ctime ; struct file *ia_file ; }; struct fs_disk_quota { __s8 d_version ; __s8 d_flags ; __u16 d_fieldmask ; __u32 d_id ; __u64 d_blk_hardlimit ; __u64 d_blk_softlimit ; __u64 d_ino_hardlimit ; __u64 d_ino_softlimit ; __u64 d_bcount ; __u64 d_icount ; __s32 d_itimer ; __s32 d_btimer ; __u16 d_iwarns ; __u16 d_bwarns ; __s32 d_padding2 ; __u64 d_rtb_hardlimit ; __u64 d_rtb_softlimit ; __u64 d_rtbcount ; __s32 d_rtbtimer ; __u16 d_rtbwarns ; __s16 d_padding3 ; char d_padding4[8U] ; }; struct fs_qfilestat { __u64 qfs_ino ; __u64 qfs_nblks ; __u32 qfs_nextents ; }; typedef struct fs_qfilestat fs_qfilestat_t; struct fs_quota_stat { __s8 qs_version ; __u16 qs_flags ; __s8 qs_pad ; fs_qfilestat_t qs_uquota ; fs_qfilestat_t qs_gquota ; __u32 qs_incoredqs ; __s32 qs_btimelimit ; __s32 qs_itimelimit ; __s32 qs_rtbtimelimit ; __u16 qs_bwarnlimit ; __u16 qs_iwarnlimit ; }; struct fs_qfilestatv { __u64 qfs_ino ; __u64 qfs_nblks ; __u32 qfs_nextents ; __u32 qfs_pad ; }; struct fs_quota_statv { __s8 qs_version ; __u8 qs_pad1 ; __u16 qs_flags ; __u32 qs_incoredqs ; struct fs_qfilestatv qs_uquota ; struct fs_qfilestatv qs_gquota ; struct fs_qfilestatv qs_pquota ; __s32 qs_btimelimit ; __s32 qs_itimelimit ; __s32 qs_rtbtimelimit ; __u16 qs_bwarnlimit ; __u16 qs_iwarnlimit ; __u64 qs_pad2[8U] ; }; struct dquot; typedef __kernel_uid32_t projid_t; struct __anonstruct_kprojid_t_180 { projid_t val ; }; typedef struct __anonstruct_kprojid_t_180 kprojid_t; struct if_dqinfo { __u64 dqi_bgrace ; __u64 dqi_igrace ; __u32 dqi_flags ; __u32 dqi_valid ; }; enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; typedef long long qsize_t; union __anonunion_ldv_28019_181 { kuid_t uid ; kgid_t gid ; kprojid_t projid ; }; struct kqid { union __anonunion_ldv_28019_181 ldv_28019 ; enum quota_type type ; }; struct mem_dqblk { qsize_t dqb_bhardlimit ; qsize_t dqb_bsoftlimit ; qsize_t dqb_curspace ; qsize_t dqb_rsvspace ; qsize_t dqb_ihardlimit ; qsize_t dqb_isoftlimit ; qsize_t dqb_curinodes ; time_t dqb_btime ; time_t dqb_itime ; }; struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format ; int dqi_fmt_id ; struct list_head dqi_dirty_list ; unsigned long dqi_flags ; unsigned int dqi_bgrace ; unsigned int dqi_igrace ; qsize_t dqi_maxblimit ; qsize_t dqi_maxilimit ; void *dqi_priv ; }; struct dquot { struct hlist_node dq_hash ; struct list_head dq_inuse ; struct list_head dq_free ; struct list_head dq_dirty ; struct mutex dq_lock ; atomic_t dq_count ; wait_queue_head_t dq_wait_unused ; struct super_block *dq_sb ; struct kqid dq_id ; loff_t dq_off ; unsigned long dq_flags ; struct mem_dqblk dq_dqb ; }; struct quota_format_ops { int (*check_quota_file)(struct super_block * , int ) ; int (*read_file_info)(struct super_block * , int ) ; int (*write_file_info)(struct super_block * , int ) ; int (*free_file_info)(struct super_block * , int ) ; int (*read_dqblk)(struct dquot * ) ; int (*commit_dqblk)(struct dquot * ) ; int (*release_dqblk)(struct dquot * ) ; }; struct dquot_operations { int (*write_dquot)(struct dquot * ) ; struct dquot *(*alloc_dquot)(struct super_block * , int ) ; void (*destroy_dquot)(struct dquot * ) ; int (*acquire_dquot)(struct dquot * ) ; int (*release_dquot)(struct dquot * ) ; int (*mark_dirty)(struct dquot * ) ; int (*write_info)(struct super_block * , int ) ; qsize_t *(*get_reserved_space)(struct inode * ) ; }; struct quotactl_ops { int (*quota_on)(struct super_block * , int , int , struct path * ) ; int (*quota_on_meta)(struct super_block * , int , int ) ; int (*quota_off)(struct super_block * , int ) ; int (*quota_sync)(struct super_block * , int ) ; int (*get_info)(struct super_block * , int , struct if_dqinfo * ) ; int (*set_info)(struct super_block * , int , struct if_dqinfo * ) ; int (*get_dqblk)(struct super_block * , struct kqid , struct fs_disk_quota * ) ; int (*set_dqblk)(struct super_block * , struct kqid , struct fs_disk_quota * ) ; int (*get_xstate)(struct super_block * , struct fs_quota_stat * ) ; int (*set_xstate)(struct super_block * , unsigned int , int ) ; int (*get_xstatev)(struct super_block * , struct fs_quota_statv * ) ; }; struct quota_format_type { int qf_fmt_id ; struct quota_format_ops const *qf_ops ; struct module *qf_owner ; struct quota_format_type *qf_next ; }; struct quota_info { unsigned int flags ; struct mutex dqio_mutex ; struct mutex dqonoff_mutex ; struct rw_semaphore dqptr_sem ; struct inode *files[2U] ; struct mem_dqinfo info[2U] ; struct quota_format_ops const *ops[2U] ; }; union __anonunion_arg_183 { char *buf ; void *data ; }; struct __anonstruct_read_descriptor_t_182 { size_t written ; size_t count ; union __anonunion_arg_183 arg ; int error ; }; typedef struct __anonstruct_read_descriptor_t_182 read_descriptor_t; struct address_space_operations { int (*writepage)(struct page * , struct writeback_control * ) ; int (*readpage)(struct file * , struct page * ) ; int (*writepages)(struct address_space * , struct writeback_control * ) ; int (*set_page_dirty)(struct page * ) ; int (*readpages)(struct file * , struct address_space * , struct list_head * , unsigned int ) ; int (*write_begin)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page ** , void ** ) ; int (*write_end)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page * , void * ) ; sector_t (*bmap)(struct address_space * , sector_t ) ; void (*invalidatepage)(struct page * , unsigned int , unsigned int ) ; int (*releasepage)(struct page * , gfp_t ) ; void (*freepage)(struct page * ) ; ssize_t (*direct_IO)(int , struct kiocb * , struct iovec const * , loff_t , unsigned long ) ; int (*get_xip_mem)(struct address_space * , unsigned long , int , void ** , unsigned long * ) ; int (*migratepage)(struct address_space * , struct page * , struct page * , enum migrate_mode ) ; int (*launder_page)(struct page * ) ; int (*is_partially_uptodate)(struct page * , read_descriptor_t * , unsigned long ) ; void (*is_dirty_writeback)(struct page * , bool * , bool * ) ; int (*error_remove_page)(struct address_space * , struct page * ) ; int (*swap_activate)(struct swap_info_struct * , struct file * , sector_t * ) ; void (*swap_deactivate)(struct file * ) ; }; struct address_space { struct inode *host ; struct radix_tree_root page_tree ; spinlock_t tree_lock ; unsigned int i_mmap_writable ; struct rb_root i_mmap ; struct list_head i_mmap_nonlinear ; struct mutex i_mmap_mutex ; unsigned long nrpages ; unsigned long writeback_index ; struct address_space_operations const *a_ops ; unsigned long flags ; struct backing_dev_info *backing_dev_info ; spinlock_t private_lock ; struct list_head private_list ; void *private_data ; }; struct hd_struct; struct gendisk; struct block_device { dev_t bd_dev ; int bd_openers ; struct inode *bd_inode ; struct super_block *bd_super ; struct mutex bd_mutex ; struct list_head bd_inodes ; void *bd_claiming ; void *bd_holder ; int bd_holders ; bool bd_write_holder ; struct list_head bd_holder_disks ; struct block_device *bd_contains ; unsigned int bd_block_size ; struct hd_struct *bd_part ; unsigned int bd_part_count ; int bd_invalidated ; struct gendisk *bd_disk ; struct request_queue *bd_queue ; struct list_head bd_list ; unsigned long bd_private ; int bd_fsfreeze_count ; struct mutex bd_fsfreeze_mutex ; }; struct posix_acl; struct inode_operations; union __anonunion_ldv_28462_184 { unsigned int const i_nlink ; unsigned int __i_nlink ; }; union __anonunion_ldv_28482_185 { struct hlist_head i_dentry ; struct callback_head i_rcu ; }; struct file_lock; struct cdev; union __anonunion_ldv_28498_186 { struct pipe_inode_info *i_pipe ; struct block_device *i_bdev ; struct cdev *i_cdev ; }; struct inode { umode_t i_mode ; unsigned short i_opflags ; kuid_t i_uid ; kgid_t i_gid ; unsigned int i_flags ; struct posix_acl *i_acl ; struct posix_acl *i_default_acl ; struct inode_operations const *i_op ; struct super_block *i_sb ; struct address_space *i_mapping ; void *i_security ; unsigned long i_ino ; union __anonunion_ldv_28462_184 ldv_28462 ; dev_t i_rdev ; loff_t i_size ; struct timespec i_atime ; struct timespec i_mtime ; struct timespec i_ctime ; spinlock_t i_lock ; unsigned short i_bytes ; unsigned int i_blkbits ; blkcnt_t i_blocks ; unsigned long i_state ; struct mutex i_mutex ; unsigned long dirtied_when ; struct hlist_node i_hash ; struct list_head i_wb_list ; struct list_head i_lru ; struct list_head i_sb_list ; union __anonunion_ldv_28482_185 ldv_28482 ; u64 i_version ; atomic_t i_count ; atomic_t i_dio_count ; atomic_t i_writecount ; struct file_operations const *i_fop ; struct file_lock *i_flock ; struct address_space i_data ; struct dquot *i_dquot[2U] ; struct list_head i_devices ; union __anonunion_ldv_28498_186 ldv_28498 ; __u32 i_generation ; __u32 i_fsnotify_mask ; struct hlist_head i_fsnotify_marks ; atomic_t i_readcount ; void *i_private ; }; struct fown_struct { rwlock_t lock ; struct pid *pid ; enum pid_type pid_type ; kuid_t uid ; kuid_t euid ; int signum ; }; struct file_ra_state { unsigned long start ; unsigned int size ; unsigned int async_size ; unsigned int ra_pages ; unsigned int mmap_miss ; loff_t prev_pos ; }; union __anonunion_f_u_187 { struct list_head fu_list ; struct llist_node fu_llist ; struct callback_head fu_rcuhead ; }; struct file { union __anonunion_f_u_187 f_u ; struct path f_path ; struct inode *f_inode ; struct file_operations const *f_op ; spinlock_t f_lock ; int f_sb_list_cpu ; atomic_long_t f_count ; unsigned int f_flags ; fmode_t f_mode ; loff_t f_pos ; struct fown_struct f_owner ; struct cred const *f_cred ; struct file_ra_state f_ra ; u64 f_version ; void *f_security ; void *private_data ; struct list_head f_ep_links ; struct list_head f_tfile_llink ; struct address_space *f_mapping ; unsigned long f_mnt_write_state ; }; typedef struct files_struct *fl_owner_t; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock * , struct file_lock * ) ; void (*fl_release_private)(struct file_lock * ) ; }; struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock * , struct file_lock * ) ; unsigned long (*lm_owner_key)(struct file_lock * ) ; void (*lm_notify)(struct file_lock * ) ; int (*lm_grant)(struct file_lock * , struct file_lock * , int ) ; void (*lm_break)(struct file_lock * ) ; int (*lm_change)(struct file_lock ** , int ) ; }; struct net; struct nlm_lockowner; struct nfs_lock_info { u32 state ; struct nlm_lockowner *owner ; struct list_head list ; }; struct nfs4_lock_state; struct nfs4_lock_info { struct nfs4_lock_state *owner ; }; struct fasync_struct; struct __anonstruct_afs_189 { struct list_head link ; int state ; }; union __anonunion_fl_u_188 { struct nfs_lock_info nfs_fl ; struct nfs4_lock_info nfs4_fl ; struct __anonstruct_afs_189 afs ; }; struct file_lock { struct file_lock *fl_next ; struct hlist_node fl_link ; struct list_head fl_block ; fl_owner_t fl_owner ; unsigned int fl_flags ; unsigned char fl_type ; unsigned int fl_pid ; int fl_link_cpu ; struct pid *fl_nspid ; wait_queue_head_t fl_wait ; struct file *fl_file ; loff_t fl_start ; loff_t fl_end ; struct fasync_struct *fl_fasync ; unsigned long fl_break_time ; unsigned long fl_downgrade_time ; struct file_lock_operations const *fl_ops ; struct lock_manager_operations const *fl_lmops ; union __anonunion_fl_u_188 fl_u ; }; struct fasync_struct { spinlock_t fa_lock ; int magic ; int fa_fd ; struct fasync_struct *fa_next ; struct file *fa_file ; struct callback_head fa_rcu ; }; struct sb_writers { struct percpu_counter counter[3U] ; wait_queue_head_t wait ; int frozen ; wait_queue_head_t wait_unfrozen ; struct lockdep_map lock_map[3U] ; }; struct file_system_type; struct super_operations; struct xattr_handler; struct mtd_info; struct super_block { struct list_head s_list ; dev_t s_dev ; unsigned char s_blocksize_bits ; unsigned long s_blocksize ; loff_t s_maxbytes ; struct file_system_type *s_type ; struct super_operations const *s_op ; struct dquot_operations const *dq_op ; struct quotactl_ops const *s_qcop ; struct export_operations const *s_export_op ; unsigned long s_flags ; unsigned long s_magic ; struct dentry *s_root ; struct rw_semaphore s_umount ; int s_count ; atomic_t s_active ; void *s_security ; struct xattr_handler const **s_xattr ; struct list_head s_inodes ; struct hlist_bl_head s_anon ; struct list_head *s_files ; struct list_head s_mounts ; struct block_device *s_bdev ; struct backing_dev_info *s_bdi ; struct mtd_info *s_mtd ; struct hlist_node s_instances ; struct quota_info s_dquot ; struct sb_writers s_writers ; char s_id[32U] ; u8 s_uuid[16U] ; void *s_fs_info ; unsigned int s_max_links ; fmode_t s_mode ; u32 s_time_gran ; struct mutex s_vfs_rename_mutex ; char *s_subtype ; char *s_options ; struct dentry_operations const *s_d_op ; int cleancache_poolid ; struct shrinker s_shrink ; atomic_long_t s_remove_count ; int s_readonly_remount ; struct workqueue_struct *s_dio_done_wq ; struct list_lru s_dentry_lru ; struct list_lru s_inode_lru ; }; struct fiemap_extent_info { unsigned int fi_flags ; unsigned int fi_extents_mapped ; unsigned int fi_extents_max ; struct fiemap_extent *fi_extents_start ; }; struct dir_context { int (*actor)(void * , char const * , int , loff_t , u64 , unsigned int ) ; loff_t pos ; }; struct block_device_operations; struct file_operations { struct module *owner ; loff_t (*llseek)(struct file * , loff_t , int ) ; ssize_t (*read)(struct file * , char * , size_t , loff_t * ) ; ssize_t (*write)(struct file * , char const * , size_t , loff_t * ) ; ssize_t (*aio_read)(struct kiocb * , struct iovec const * , unsigned long , loff_t ) ; ssize_t (*aio_write)(struct kiocb * , struct iovec const * , unsigned long , loff_t ) ; int (*iterate)(struct file * , struct dir_context * ) ; unsigned int (*poll)(struct file * , struct poll_table_struct * ) ; long (*unlocked_ioctl)(struct file * , unsigned int , unsigned long ) ; long (*compat_ioctl)(struct file * , unsigned int , unsigned long ) ; int (*mmap)(struct file * , struct vm_area_struct * ) ; int (*open)(struct inode * , struct file * ) ; int (*flush)(struct file * , fl_owner_t ) ; int (*release)(struct inode * , struct file * ) ; int (*fsync)(struct file * , loff_t , loff_t , int ) ; int (*aio_fsync)(struct kiocb * , int ) ; int (*fasync)(int , struct file * , int ) ; int (*lock)(struct file * , int , struct file_lock * ) ; ssize_t (*sendpage)(struct file * , struct page * , int , size_t , loff_t * , int ) ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; int (*check_flags)(int ) ; int (*flock)(struct file * , int , struct file_lock * ) ; ssize_t (*splice_write)(struct pipe_inode_info * , struct file * , loff_t * , size_t , unsigned int ) ; ssize_t (*splice_read)(struct file * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*setlease)(struct file * , long , struct file_lock ** ) ; long (*fallocate)(struct file * , int , loff_t , loff_t ) ; int (*show_fdinfo)(struct seq_file * , struct file * ) ; }; struct inode_operations { struct dentry *(*lookup)(struct inode * , struct dentry * , unsigned int ) ; void *(*follow_link)(struct dentry * , struct nameidata * ) ; int (*permission)(struct inode * , int ) ; struct posix_acl *(*get_acl)(struct inode * , int ) ; int (*readlink)(struct dentry * , char * , int ) ; void (*put_link)(struct dentry * , struct nameidata * , void * ) ; int (*create)(struct inode * , struct dentry * , umode_t , bool ) ; int (*link)(struct dentry * , struct inode * , struct dentry * ) ; int (*unlink)(struct inode * , struct dentry * ) ; int (*symlink)(struct inode * , struct dentry * , char const * ) ; int (*mkdir)(struct inode * , struct dentry * , umode_t ) ; int (*rmdir)(struct inode * , struct dentry * ) ; int (*mknod)(struct inode * , struct dentry * , umode_t , dev_t ) ; int (*rename)(struct inode * , struct dentry * , struct inode * , struct dentry * ) ; int (*setattr)(struct dentry * , struct iattr * ) ; int (*getattr)(struct vfsmount * , struct dentry * , struct kstat * ) ; int (*setxattr)(struct dentry * , char const * , void const * , size_t , int ) ; ssize_t (*getxattr)(struct dentry * , char const * , void * , size_t ) ; ssize_t (*listxattr)(struct dentry * , char * , size_t ) ; int (*removexattr)(struct dentry * , char const * ) ; int (*fiemap)(struct inode * , struct fiemap_extent_info * , u64 , u64 ) ; int (*update_time)(struct inode * , struct timespec * , int ) ; int (*atomic_open)(struct inode * , struct dentry * , struct file * , unsigned int , umode_t , int * ) ; int (*tmpfile)(struct inode * , struct dentry * , umode_t ) ; }; struct super_operations { struct inode *(*alloc_inode)(struct super_block * ) ; void (*destroy_inode)(struct inode * ) ; void (*dirty_inode)(struct inode * , int ) ; int (*write_inode)(struct inode * , struct writeback_control * ) ; int (*drop_inode)(struct inode * ) ; void (*evict_inode)(struct inode * ) ; void (*put_super)(struct super_block * ) ; int (*sync_fs)(struct super_block * , int ) ; int (*freeze_fs)(struct super_block * ) ; int (*unfreeze_fs)(struct super_block * ) ; int (*statfs)(struct dentry * , struct kstatfs * ) ; int (*remount_fs)(struct super_block * , int * , char * ) ; void (*umount_begin)(struct super_block * ) ; int (*show_options)(struct seq_file * , struct dentry * ) ; int (*show_devname)(struct seq_file * , struct dentry * ) ; int (*show_path)(struct seq_file * , struct dentry * ) ; int (*show_stats)(struct seq_file * , struct dentry * ) ; ssize_t (*quota_read)(struct super_block * , int , char * , size_t , loff_t ) ; ssize_t (*quota_write)(struct super_block * , int , char const * , size_t , loff_t ) ; int (*bdev_try_to_free_page)(struct super_block * , struct page * , gfp_t ) ; long (*nr_cached_objects)(struct super_block * , int ) ; long (*free_cached_objects)(struct super_block * , long , int ) ; }; struct file_system_type { char const *name ; int fs_flags ; struct dentry *(*mount)(struct file_system_type * , int , char const * , void * ) ; void (*kill_sb)(struct super_block * ) ; struct module *owner ; struct file_system_type *next ; struct hlist_head fs_supers ; struct lock_class_key s_lock_key ; struct lock_class_key s_umount_key ; struct lock_class_key s_vfs_rename_key ; struct lock_class_key s_writers_key[3U] ; struct lock_class_key i_lock_key ; struct lock_class_key i_mutex_key ; struct lock_class_key i_mutex_dir_key ; }; struct disk_stats { unsigned long sectors[2U] ; unsigned long ios[2U] ; unsigned long merges[2U] ; unsigned long ticks[2U] ; unsigned long io_ticks ; unsigned long time_in_queue ; }; struct partition_meta_info { char uuid[37U] ; u8 volname[64U] ; }; struct hd_struct { sector_t start_sect ; sector_t nr_sects ; seqcount_t nr_sects_seq ; sector_t alignment_offset ; unsigned int discard_alignment ; struct device __dev ; struct kobject *holder_dir ; int policy ; int partno ; struct partition_meta_info *info ; int make_it_fail ; unsigned long stamp ; atomic_t in_flight[2U] ; struct disk_stats *dkstats ; atomic_t ref ; struct callback_head callback_head ; }; struct disk_part_tbl { struct callback_head callback_head ; int len ; struct hd_struct *last_lookup ; struct hd_struct *part[] ; }; struct disk_events; struct timer_rand_state; struct blk_integrity; struct gendisk { int major ; int first_minor ; int minors ; char disk_name[32U] ; char *(*devnode)(struct gendisk * , umode_t * ) ; unsigned int events ; unsigned int async_events ; struct disk_part_tbl *part_tbl ; struct hd_struct part0 ; struct block_device_operations const *fops ; struct request_queue *queue ; void *private_data ; int flags ; struct device *driverfs_dev ; struct kobject *slave_dir ; struct timer_rand_state *random ; atomic_t sync_io ; struct disk_events *ev ; struct blk_integrity *integrity ; int node_id ; }; struct fprop_local_percpu { struct percpu_counter events ; unsigned int period ; raw_spinlock_t lock ; }; enum writeback_sync_modes { WB_SYNC_NONE = 0, WB_SYNC_ALL = 1 } ; struct writeback_control { long nr_to_write ; long pages_skipped ; loff_t range_start ; loff_t range_end ; enum writeback_sync_modes sync_mode ; unsigned char for_kupdate : 1 ; unsigned char for_background : 1 ; unsigned char tagged_writepages : 1 ; unsigned char for_reclaim : 1 ; unsigned char range_cyclic : 1 ; unsigned char for_sync : 1 ; }; struct bdi_writeback; typedef int congested_fn(void * , int ); struct bdi_writeback { struct backing_dev_info *bdi ; unsigned int nr ; unsigned long last_old_flush ; struct delayed_work dwork ; struct list_head b_dirty ; struct list_head b_io ; struct list_head b_more_io ; spinlock_t list_lock ; }; struct backing_dev_info { struct list_head bdi_list ; unsigned long ra_pages ; unsigned long state ; unsigned int capabilities ; congested_fn *congested_fn ; void *congested_data ; char *name ; struct percpu_counter bdi_stat[4U] ; unsigned long bw_time_stamp ; unsigned long dirtied_stamp ; unsigned long written_stamp ; unsigned long write_bandwidth ; unsigned long avg_write_bandwidth ; unsigned long dirty_ratelimit ; unsigned long balanced_dirty_ratelimit ; struct fprop_local_percpu completions ; int dirty_exceeded ; unsigned int min_ratio ; unsigned int max_ratio ; unsigned int max_prop_frac ; struct bdi_writeback wb ; spinlock_t wb_lock ; struct list_head work_list ; struct device *dev ; struct timer_list laptop_mode_wb_timer ; struct dentry *debug_dir ; struct dentry *debug_stats ; }; union __anonunion_ldv_31959_190 { struct list_head q_node ; struct kmem_cache *__rcu_icq_cache ; }; union __anonunion_ldv_31963_191 { struct hlist_node ioc_node ; struct callback_head __rcu_head ; }; struct io_cq { struct request_queue *q ; struct io_context *ioc ; union __anonunion_ldv_31959_190 ldv_31959 ; union __anonunion_ldv_31963_191 ldv_31963 ; unsigned int flags ; }; struct io_context { atomic_long_t refcount ; atomic_t active_ref ; atomic_t nr_tasks ; spinlock_t lock ; unsigned short ioprio ; int nr_batch_requests ; unsigned long last_waited ; struct radix_tree_root icq_tree ; struct io_cq *icq_hint ; struct hlist_head icq_list ; struct work_struct release_work ; }; struct bio_integrity_payload { struct bio *bip_bio ; sector_t bip_sector ; void *bip_buf ; bio_end_io_t *bip_end_io ; unsigned int bip_size ; unsigned short bip_slab ; unsigned short bip_vcnt ; unsigned short bip_idx ; unsigned char bip_owns_buf : 1 ; struct work_struct bip_work ; struct bio_vec *bip_vec ; struct bio_vec bip_inline_vecs[0U] ; }; struct bio_list { struct bio *head ; struct bio *tail ; }; struct bio_set { struct kmem_cache *bio_slab ; unsigned int front_pad ; mempool_t *bio_pool ; mempool_t *bvec_pool ; mempool_t *bio_integrity_pool ; mempool_t *bvec_integrity_pool ; spinlock_t rescue_lock ; struct bio_list rescue_list ; struct work_struct rescue_work ; struct workqueue_struct *rescue_workqueue ; }; struct bsg_class_device { struct device *class_dev ; struct device *parent ; int minor ; struct request_queue *queue ; struct kref ref ; void (*release)(struct device * ) ; }; struct elevator_queue; struct request; struct bsg_job; struct blkcg_gq; typedef void rq_end_io_fn(struct request * , int ); struct request_list { struct request_queue *q ; struct blkcg_gq *blkg ; int count[2U] ; int starved[2U] ; mempool_t *rq_pool ; wait_queue_head_t wait[2U] ; unsigned int flags ; }; enum rq_cmd_type_bits { REQ_TYPE_FS = 1, REQ_TYPE_BLOCK_PC = 2, REQ_TYPE_SENSE = 3, REQ_TYPE_PM_SUSPEND = 4, REQ_TYPE_PM_RESUME = 5, REQ_TYPE_PM_SHUTDOWN = 6, REQ_TYPE_SPECIAL = 7, REQ_TYPE_ATA_TASKFILE = 8, REQ_TYPE_ATA_PC = 9 } ; union __anonunion_ldv_32429_192 { struct rb_node rb_node ; void *completion_data ; }; struct __anonstruct_elv_194 { struct io_cq *icq ; void *priv[2U] ; }; struct __anonstruct_flush_195 { unsigned int seq ; struct list_head list ; rq_end_io_fn *saved_end_io ; }; union __anonunion_ldv_32440_193 { struct __anonstruct_elv_194 elv ; struct __anonstruct_flush_195 flush ; }; struct request { struct list_head queuelist ; struct call_single_data csd ; struct request_queue *q ; unsigned int cmd_flags ; enum rq_cmd_type_bits cmd_type ; unsigned long atomic_flags ; int cpu ; unsigned int __data_len ; sector_t __sector ; struct bio *bio ; struct bio *biotail ; struct hlist_node hash ; union __anonunion_ldv_32429_192 ldv_32429 ; union __anonunion_ldv_32440_193 ldv_32440 ; struct gendisk *rq_disk ; struct hd_struct *part ; unsigned long start_time ; struct request_list *rl ; unsigned long long start_time_ns ; unsigned long long io_start_time_ns ; unsigned short nr_phys_segments ; unsigned short nr_integrity_segments ; unsigned short ioprio ; int ref_count ; void *special ; char *buffer ; int tag ; int errors ; unsigned char __cmd[16U] ; unsigned char *cmd ; unsigned short cmd_len ; unsigned int extra_len ; unsigned int sense_len ; unsigned int resid_len ; void *sense ; unsigned long deadline ; struct list_head timeout_list ; unsigned int timeout ; int retries ; rq_end_io_fn *end_io ; void *end_io_data ; struct request *next_rq ; }; struct elevator_type; typedef int elevator_merge_fn(struct request_queue * , struct request ** , struct bio * ); typedef void elevator_merge_req_fn(struct request_queue * , struct request * , struct request * ); typedef void elevator_merged_fn(struct request_queue * , struct request * , int ); typedef int elevator_allow_merge_fn(struct request_queue * , struct request * , struct bio * ); typedef void elevator_bio_merged_fn(struct request_queue * , struct request * , struct bio * ); typedef int elevator_dispatch_fn(struct request_queue * , int ); typedef void elevator_add_req_fn(struct request_queue * , struct request * ); typedef struct request *elevator_request_list_fn(struct request_queue * , struct request * ); typedef void elevator_completed_req_fn(struct request_queue * , struct request * ); typedef int elevator_may_queue_fn(struct request_queue * , int ); typedef void elevator_init_icq_fn(struct io_cq * ); typedef void elevator_exit_icq_fn(struct io_cq * ); typedef int elevator_set_req_fn(struct request_queue * , struct request * , struct bio * , gfp_t ); typedef void elevator_put_req_fn(struct request * ); typedef void elevator_activate_req_fn(struct request_queue * , struct request * ); typedef void elevator_deactivate_req_fn(struct request_queue * , struct request * ); typedef int elevator_init_fn(struct request_queue * , struct elevator_type * ); typedef void elevator_exit_fn(struct elevator_queue * ); struct elevator_ops { elevator_merge_fn *elevator_merge_fn ; elevator_merged_fn *elevator_merged_fn ; elevator_merge_req_fn *elevator_merge_req_fn ; elevator_allow_merge_fn *elevator_allow_merge_fn ; elevator_bio_merged_fn *elevator_bio_merged_fn ; elevator_dispatch_fn *elevator_dispatch_fn ; elevator_add_req_fn *elevator_add_req_fn ; elevator_activate_req_fn *elevator_activate_req_fn ; elevator_deactivate_req_fn *elevator_deactivate_req_fn ; elevator_completed_req_fn *elevator_completed_req_fn ; elevator_request_list_fn *elevator_former_req_fn ; elevator_request_list_fn *elevator_latter_req_fn ; elevator_init_icq_fn *elevator_init_icq_fn ; elevator_exit_icq_fn *elevator_exit_icq_fn ; elevator_set_req_fn *elevator_set_req_fn ; elevator_put_req_fn *elevator_put_req_fn ; elevator_may_queue_fn *elevator_may_queue_fn ; elevator_init_fn *elevator_init_fn ; elevator_exit_fn *elevator_exit_fn ; }; struct elv_fs_entry { struct attribute attr ; ssize_t (*show)(struct elevator_queue * , char * ) ; ssize_t (*store)(struct elevator_queue * , char const * , size_t ) ; }; struct elevator_type { struct kmem_cache *icq_cache ; struct elevator_ops ops ; size_t icq_size ; size_t icq_align ; struct elv_fs_entry *elevator_attrs ; char elevator_name[16U] ; struct module *elevator_owner ; char icq_cache_name[21U] ; struct list_head list ; }; struct elevator_queue { struct elevator_type *type ; void *elevator_data ; struct kobject kobj ; struct mutex sysfs_lock ; unsigned char registered : 1 ; struct hlist_head hash[64U] ; }; typedef void request_fn_proc(struct request_queue * ); typedef void make_request_fn(struct request_queue * , struct bio * ); typedef int prep_rq_fn(struct request_queue * , struct request * ); typedef void unprep_rq_fn(struct request_queue * , struct request * ); struct bvec_merge_data { struct block_device *bi_bdev ; sector_t bi_sector ; unsigned int bi_size ; unsigned long bi_rw ; }; typedef int merge_bvec_fn(struct request_queue * , struct bvec_merge_data * , struct bio_vec * ); typedef void softirq_done_fn(struct request * ); typedef int dma_drain_needed_fn(struct request * ); typedef int lld_busy_fn(struct request_queue * ); typedef int bsg_job_fn(struct bsg_job * ); enum blk_eh_timer_return { BLK_EH_NOT_HANDLED = 0, BLK_EH_HANDLED = 1, BLK_EH_RESET_TIMER = 2 } ; typedef enum blk_eh_timer_return rq_timed_out_fn(struct request * ); struct blk_queue_tag { struct request **tag_index ; unsigned long *tag_map ; int busy ; int max_depth ; int real_max_depth ; atomic_t refcnt ; }; struct queue_limits { unsigned long bounce_pfn ; unsigned long seg_boundary_mask ; unsigned int max_hw_sectors ; unsigned int max_sectors ; unsigned int max_segment_size ; unsigned int physical_block_size ; unsigned int alignment_offset ; unsigned int io_min ; unsigned int io_opt ; unsigned int max_discard_sectors ; unsigned int max_write_same_sectors ; unsigned int discard_granularity ; unsigned int discard_alignment ; unsigned short logical_block_size ; unsigned short max_segments ; unsigned short max_integrity_segments ; unsigned char misaligned ; unsigned char discard_misaligned ; unsigned char cluster ; unsigned char discard_zeroes_data ; }; struct throtl_data; struct request_queue { struct list_head queue_head ; struct request *last_merge ; struct elevator_queue *elevator ; int nr_rqs[2U] ; int nr_rqs_elvpriv ; struct request_list root_rl ; request_fn_proc *request_fn ; make_request_fn *make_request_fn ; prep_rq_fn *prep_rq_fn ; unprep_rq_fn *unprep_rq_fn ; merge_bvec_fn *merge_bvec_fn ; softirq_done_fn *softirq_done_fn ; rq_timed_out_fn *rq_timed_out_fn ; dma_drain_needed_fn *dma_drain_needed ; lld_busy_fn *lld_busy_fn ; sector_t end_sector ; struct request *boundary_rq ; struct delayed_work delay_work ; struct backing_dev_info backing_dev_info ; void *queuedata ; unsigned long queue_flags ; int id ; gfp_t bounce_gfp ; spinlock_t __queue_lock ; spinlock_t *queue_lock ; struct kobject kobj ; struct device *dev ; int rpm_status ; unsigned int nr_pending ; unsigned long nr_requests ; unsigned int nr_congestion_on ; unsigned int nr_congestion_off ; unsigned int nr_batching ; unsigned int dma_drain_size ; void *dma_drain_buffer ; unsigned int dma_pad_mask ; unsigned int dma_alignment ; struct blk_queue_tag *queue_tags ; struct list_head tag_busy_list ; unsigned int nr_sorted ; unsigned int in_flight[2U] ; unsigned int request_fn_active ; unsigned int rq_timeout ; struct timer_list timeout ; struct list_head timeout_list ; struct list_head icq_list ; unsigned long blkcg_pols[1U] ; struct blkcg_gq *root_blkg ; struct list_head blkg_list ; struct queue_limits limits ; unsigned int sg_timeout ; unsigned int sg_reserved_size ; int node ; unsigned int flush_flags ; unsigned char flush_not_queueable : 1 ; unsigned char flush_queue_delayed : 1 ; unsigned char flush_pending_idx : 1 ; unsigned char flush_running_idx : 1 ; unsigned long flush_pending_since ; struct list_head flush_queue[2U] ; struct list_head flush_data_in_flight ; struct request flush_rq ; struct mutex sysfs_lock ; int bypass_depth ; bsg_job_fn *bsg_job_fn ; int bsg_job_size ; struct bsg_class_device bsg_dev ; struct list_head all_q_node ; struct throtl_data *td ; struct callback_head callback_head ; }; struct blk_plug { unsigned long magic ; struct list_head list ; struct list_head cb_list ; }; struct blk_integrity_exchg { void *prot_buf ; void *data_buf ; sector_t sector ; unsigned int data_size ; unsigned short sector_size ; char const *disk_name ; }; typedef void integrity_gen_fn(struct blk_integrity_exchg * ); typedef int integrity_vrfy_fn(struct blk_integrity_exchg * ); typedef void integrity_set_tag_fn(void * , void * , unsigned int ); typedef void integrity_get_tag_fn(void * , void * , unsigned int ); struct blk_integrity { integrity_gen_fn *generate_fn ; integrity_vrfy_fn *verify_fn ; integrity_set_tag_fn *set_tag_fn ; integrity_get_tag_fn *get_tag_fn ; unsigned short flags ; unsigned short tuple_size ; unsigned short sector_size ; unsigned short tag_size ; char const *name ; struct kobject kobj ; }; struct block_device_operations { int (*open)(struct block_device * , fmode_t ) ; void (*release)(struct gendisk * , fmode_t ) ; int (*ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; int (*direct_access)(struct block_device * , sector_t , void ** , unsigned long * ) ; unsigned int (*check_events)(struct gendisk * , unsigned int ) ; int (*media_changed)(struct gendisk * ) ; void (*unlock_native_capacity)(struct gendisk * ) ; int (*revalidate_disk)(struct gendisk * ) ; int (*getgeo)(struct block_device * , struct hd_geometry * ) ; void (*swap_slot_free_notify)(struct block_device * , unsigned long ) ; struct module *owner ; }; struct scsi_sense_hdr; enum scsi_device_state { SDEV_CREATED = 1, SDEV_RUNNING = 2, SDEV_CANCEL = 3, SDEV_DEL = 4, SDEV_QUIESCE = 5, SDEV_OFFLINE = 6, SDEV_TRANSPORT_OFFLINE = 7, SDEV_BLOCK = 8, SDEV_CREATED_BLOCK = 9 } ; struct scsi_dh_data; struct scsi_device { struct Scsi_Host *host ; struct request_queue *request_queue ; struct list_head siblings ; struct list_head same_target_siblings ; unsigned int device_busy ; spinlock_t list_lock ; struct list_head cmd_list ; struct list_head starved_entry ; struct scsi_cmnd *current_cmnd ; unsigned short queue_depth ; unsigned short max_queue_depth ; unsigned short last_queue_full_depth ; unsigned short last_queue_full_count ; unsigned long last_queue_full_time ; unsigned long queue_ramp_up_period ; unsigned long last_queue_ramp_up ; unsigned int id ; unsigned int lun ; unsigned int channel ; unsigned int manufacturer ; unsigned int sector_size ; void *hostdata ; char type ; char scsi_level ; char inq_periph_qual ; unsigned char inquiry_len ; unsigned char *inquiry ; char const *vendor ; char const *model ; char const *rev ; unsigned char current_tag ; struct scsi_target *sdev_target ; unsigned int sdev_bflags ; unsigned int eh_timeout ; unsigned char writeable : 1 ; unsigned char removable : 1 ; unsigned char changed : 1 ; unsigned char busy : 1 ; unsigned char lockable : 1 ; unsigned char locked : 1 ; unsigned char borken : 1 ; unsigned char disconnect : 1 ; unsigned char soft_reset : 1 ; unsigned char sdtr : 1 ; unsigned char wdtr : 1 ; unsigned char ppr : 1 ; unsigned char tagged_supported : 1 ; unsigned char simple_tags : 1 ; unsigned char ordered_tags : 1 ; unsigned char was_reset : 1 ; unsigned char expecting_cc_ua : 1 ; unsigned char use_10_for_rw : 1 ; unsigned char use_10_for_ms : 1 ; unsigned char no_report_opcodes : 1 ; unsigned char no_write_same : 1 ; unsigned char use_16_for_rw : 1 ; unsigned char skip_ms_page_8 : 1 ; unsigned char skip_ms_page_3f : 1 ; unsigned char skip_vpd_pages : 1 ; unsigned char use_192_bytes_for_3f : 1 ; unsigned char no_start_on_add : 1 ; unsigned char allow_restart : 1 ; unsigned char manage_start_stop : 1 ; unsigned char start_stop_pwr_cond : 1 ; unsigned char no_uld_attach : 1 ; unsigned char select_no_atn : 1 ; unsigned char fix_capacity : 1 ; unsigned char guess_capacity : 1 ; unsigned char retry_hwerror : 1 ; unsigned char last_sector_bug : 1 ; unsigned char no_read_disc_info : 1 ; unsigned char no_read_capacity_16 : 1 ; unsigned char try_rc_10_first : 1 ; unsigned char is_visible : 1 ; unsigned char wce_default_on : 1 ; unsigned char no_dif : 1 ; atomic_t disk_events_disable_depth ; unsigned long supported_events[1U] ; unsigned long pending_events[1U] ; struct list_head event_list ; struct work_struct event_work ; unsigned int device_blocked ; unsigned int max_device_blocked ; atomic_t iorequest_cnt ; atomic_t iodone_cnt ; atomic_t ioerr_cnt ; struct device sdev_gendev ; struct device sdev_dev ; struct execute_work ew ; struct work_struct requeue_work ; struct scsi_dh_data *scsi_dh_data ; enum scsi_device_state sdev_state ; unsigned long sdev_data[0U] ; }; struct scsi_dh_devlist { char *vendor ; char *model ; }; struct scsi_device_handler { struct list_head list ; struct module *module ; char const *name ; struct scsi_dh_devlist const *devlist ; int (*check_sense)(struct scsi_device * , struct scsi_sense_hdr * ) ; int (*attach)(struct scsi_device * ) ; void (*detach)(struct scsi_device * ) ; int (*activate)(struct scsi_device * , void (*)(void * , int ) , void * ) ; int (*prep_fn)(struct scsi_device * , struct request * ) ; int (*set_params)(struct scsi_device * , char const * ) ; bool (*match)(struct scsi_device * ) ; }; struct scsi_dh_data { struct scsi_device_handler *scsi_dh ; struct scsi_device *sdev ; struct kref kref ; char buf[0U] ; }; enum scsi_target_state { STARGET_CREATED = 1, STARGET_RUNNING = 2, STARGET_DEL = 3 } ; struct scsi_target { struct scsi_device *starget_sdev_user ; struct list_head siblings ; struct list_head devices ; struct device dev ; unsigned int reap_ref ; unsigned int channel ; unsigned int id ; unsigned char create : 1 ; unsigned char single_lun : 1 ; unsigned char pdt_1f_for_no_lun : 1 ; unsigned char no_report_luns : 1 ; unsigned char expecting_lun_change : 1 ; unsigned int target_busy ; unsigned int can_queue ; unsigned int target_blocked ; unsigned int max_target_blocked ; char scsi_level ; struct execute_work ew ; enum scsi_target_state state ; void *hostdata ; unsigned long starget_data[0U] ; }; struct scsi_data_buffer { struct sg_table table ; unsigned int length ; int resid ; }; struct scsi_pointer { char *ptr ; int this_residual ; struct scatterlist *buffer ; int buffers_residual ; dma_addr_t dma_handle ; int volatile Status ; int volatile Message ; int volatile have_data_in ; int volatile sent_command ; int volatile phase ; }; struct scsi_cmnd { struct scsi_device *device ; struct list_head list ; struct list_head eh_entry ; int eh_eflags ; unsigned long serial_number ; unsigned long jiffies_at_alloc ; int retries ; int allowed ; unsigned char prot_op ; unsigned char prot_type ; unsigned short cmd_len ; enum dma_data_direction sc_data_direction ; unsigned char *cmnd ; struct scsi_data_buffer sdb ; struct scsi_data_buffer *prot_sdb ; unsigned int underflow ; unsigned int transfersize ; struct request *request ; unsigned char *sense_buffer ; void (*scsi_done)(struct scsi_cmnd * ) ; struct scsi_pointer SCp ; unsigned char *host_scribble ; int result ; unsigned char tag ; }; struct iovec { void *iov_base ; __kernel_size_t iov_len ; }; typedef unsigned short __kernel_sa_family_t; typedef __kernel_sa_family_t sa_family_t; struct sockaddr { sa_family_t sa_family ; char sa_data[14U] ; }; struct msghdr { void *msg_name ; int msg_namelen ; struct iovec *msg_iov ; __kernel_size_t msg_iovlen ; void *msg_control ; __kernel_size_t msg_controllen ; unsigned int msg_flags ; }; enum ldv_25421 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ; typedef enum ldv_25421 socket_state; struct socket_wq { wait_queue_head_t wait ; struct fasync_struct *fasync_list ; struct callback_head rcu ; }; struct proto_ops; struct socket { socket_state state ; short type ; unsigned long flags ; struct socket_wq *wq ; struct file *file ; struct sock *sk ; struct proto_ops const *ops ; }; struct proto_ops { int family ; struct module *owner ; int (*release)(struct socket * ) ; int (*bind)(struct socket * , struct sockaddr * , int ) ; int (*connect)(struct socket * , struct sockaddr * , int , int ) ; int (*socketpair)(struct socket * , struct socket * ) ; int (*accept)(struct socket * , struct socket * , int ) ; int (*getname)(struct socket * , struct sockaddr * , int * , int ) ; unsigned int (*poll)(struct file * , struct socket * , struct poll_table_struct * ) ; int (*ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*listen)(struct socket * , int ) ; int (*shutdown)(struct socket * , int ) ; int (*setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct socket * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct socket * , int , int , char * , int * ) ; int (*sendmsg)(struct kiocb * , struct socket * , struct msghdr * , size_t ) ; int (*recvmsg)(struct kiocb * , struct socket * , struct msghdr * , size_t , int ) ; int (*mmap)(struct file * , struct socket * , struct vm_area_struct * ) ; ssize_t (*sendpage)(struct socket * , struct page * , int , size_t , int ) ; ssize_t (*splice_read)(struct socket * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; void (*set_peek_off)(struct sock * , int ) ; }; struct in6_addr; struct sk_buff; typedef s32 dma_cookie_t; typedef u64 netdev_features_t; struct nf_conntrack { atomic_t use ; }; struct nf_bridge_info { atomic_t use ; unsigned int mask ; struct net_device *physindev ; struct net_device *physoutdev ; unsigned long data[4U] ; }; struct sk_buff_head { struct sk_buff *next ; struct sk_buff *prev ; __u32 qlen ; spinlock_t lock ; }; typedef unsigned int sk_buff_data_t; struct sec_path; struct __anonstruct_ldv_35549_201 { __u16 csum_start ; __u16 csum_offset ; }; union __anonunion_ldv_35550_200 { __wsum csum ; struct __anonstruct_ldv_35549_201 ldv_35549 ; }; union __anonunion_ldv_35587_202 { unsigned int napi_id ; dma_cookie_t dma_cookie ; }; union __anonunion_ldv_35593_203 { __u32 mark ; __u32 dropcount ; __u32 reserved_tailroom ; }; struct sk_buff { struct sk_buff *next ; struct sk_buff *prev ; ktime_t tstamp ; struct sock *sk ; struct net_device *dev ; char cb[48U] ; unsigned long _skb_refdst ; struct sec_path *sp ; unsigned int len ; unsigned int data_len ; __u16 mac_len ; __u16 hdr_len ; union __anonunion_ldv_35550_200 ldv_35550 ; __u32 priority ; unsigned char local_df : 1 ; unsigned char cloned : 1 ; unsigned char ip_summed : 2 ; unsigned char nohdr : 1 ; unsigned char nfctinfo : 3 ; unsigned char pkt_type : 3 ; unsigned char fclone : 2 ; unsigned char ipvs_property : 1 ; unsigned char peeked : 1 ; unsigned char nf_trace : 1 ; __be16 protocol ; void (*destructor)(struct sk_buff * ) ; struct nf_conntrack *nfct ; struct sk_buff *nfct_reasm ; struct nf_bridge_info *nf_bridge ; int skb_iif ; __u32 rxhash ; __be16 vlan_proto ; __u16 vlan_tci ; __u16 tc_index ; __u16 tc_verd ; __u16 queue_mapping ; unsigned char ndisc_nodetype : 2 ; unsigned char pfmemalloc : 1 ; unsigned char ooo_okay : 1 ; unsigned char l4_rxhash : 1 ; unsigned char wifi_acked_valid : 1 ; unsigned char wifi_acked : 1 ; unsigned char no_fcs : 1 ; unsigned char head_frag : 1 ; unsigned char encapsulation : 1 ; union __anonunion_ldv_35587_202 ldv_35587 ; __u32 secmark ; union __anonunion_ldv_35593_203 ldv_35593 ; __be16 inner_protocol ; __u16 inner_transport_header ; __u16 inner_network_header ; __u16 inner_mac_header ; __u16 transport_header ; __u16 network_header ; __u16 mac_header ; sk_buff_data_t tail ; sk_buff_data_t end ; unsigned char *head ; unsigned char *data ; unsigned int truesize ; atomic_t users ; }; struct dst_entry; struct rtable; struct xfrm_policy; struct xfrm_state; struct request_sock; struct mnt_namespace; struct ipc_namespace; struct nsproxy { atomic_t count ; struct uts_namespace *uts_ns ; struct ipc_namespace *ipc_ns ; struct mnt_namespace *mnt_ns ; struct pid_namespace *pid_ns_for_children ; struct net *net_ns ; }; struct nlmsghdr { __u32 nlmsg_len ; __u16 nlmsg_type ; __u16 nlmsg_flags ; __u32 nlmsg_seq ; __u32 nlmsg_pid ; }; struct nlattr { __u16 nla_len ; __u16 nla_type ; }; struct netlink_callback { struct sk_buff *skb ; struct nlmsghdr const *nlh ; int (*dump)(struct sk_buff * , struct netlink_callback * ) ; int (*done)(struct netlink_callback * ) ; void *data ; struct module *module ; u16 family ; u16 min_dump_alloc ; unsigned int prev_seq ; unsigned int seq ; long args[6U] ; }; enum fc_port_type { FC_PORTTYPE_UNKNOWN = 0, FC_PORTTYPE_OTHER = 1, FC_PORTTYPE_NOTPRESENT = 2, FC_PORTTYPE_NPORT = 3, FC_PORTTYPE_NLPORT = 4, FC_PORTTYPE_LPORT = 5, FC_PORTTYPE_PTP = 6, FC_PORTTYPE_NPIV = 7 } ; enum fc_port_state { FC_PORTSTATE_UNKNOWN = 0, FC_PORTSTATE_NOTPRESENT = 1, FC_PORTSTATE_ONLINE = 2, FC_PORTSTATE_OFFLINE = 3, FC_PORTSTATE_BLOCKED = 4, FC_PORTSTATE_BYPASSED = 5, FC_PORTSTATE_DIAGNOSTICS = 6, FC_PORTSTATE_LINKDOWN = 7, FC_PORTSTATE_ERROR = 8, FC_PORTSTATE_LOOPBACK = 9, FC_PORTSTATE_DELETED = 10 } ; enum fc_vport_state { FC_VPORT_UNKNOWN = 0, FC_VPORT_ACTIVE = 1, FC_VPORT_DISABLED = 2, FC_VPORT_LINKDOWN = 3, FC_VPORT_INITIALIZING = 4, FC_VPORT_NO_FABRIC_SUPP = 5, FC_VPORT_NO_FABRIC_RSCS = 6, FC_VPORT_FABRIC_LOGOUT = 7, FC_VPORT_FABRIC_REJ_WWN = 8, FC_VPORT_FAILED = 9 } ; struct fc_vport { enum fc_vport_state vport_state ; enum fc_vport_state vport_last_state ; u64 node_name ; u64 port_name ; u32 roles ; u32 vport_id ; enum fc_port_type vport_type ; char symbolic_name[64U] ; void *dd_data ; struct Scsi_Host *shost ; unsigned int channel ; u32 number ; u8 flags ; struct list_head peers ; struct device dev ; struct work_struct vport_delete_work ; }; struct fc_rport { u32 maxframe_size ; u32 supported_classes ; u32 dev_loss_tmo ; u64 node_name ; u64 port_name ; u32 port_id ; u32 roles ; enum fc_port_state port_state ; u32 scsi_target_id ; u32 fast_io_fail_tmo ; void *dd_data ; unsigned int channel ; u32 number ; u8 flags ; struct list_head peers ; struct device dev ; struct delayed_work dev_loss_work ; struct work_struct scan_work ; struct delayed_work fail_io_work ; struct work_struct stgt_delete_work ; struct work_struct rport_delete_work ; struct request_queue *rqst_q ; }; struct fc_host_statistics { u64 seconds_since_last_reset ; u64 tx_frames ; u64 tx_words ; u64 rx_frames ; u64 rx_words ; u64 lip_count ; u64 nos_count ; u64 error_frames ; u64 dumped_frames ; u64 link_failure_count ; u64 loss_of_sync_count ; u64 loss_of_signal_count ; u64 prim_seq_protocol_err_count ; u64 invalid_tx_word_count ; u64 invalid_crc_count ; u64 fcp_input_requests ; u64 fcp_output_requests ; u64 fcp_control_requests ; u64 fcp_input_megabytes ; u64 fcp_output_megabytes ; u64 fcp_packet_alloc_failures ; u64 fcp_packet_aborts ; u64 fcp_frame_alloc_failures ; u64 fc_no_free_exch ; u64 fc_no_free_exch_xid ; u64 fc_xid_not_found ; u64 fc_xid_busy ; u64 fc_seq_not_found ; u64 fc_non_bls_resp ; }; enum fc_host_event_code { FCH_EVT_LIP = 1, FCH_EVT_LINKUP = 2, FCH_EVT_LINKDOWN = 3, FCH_EVT_LIPRESET = 4, FCH_EVT_RSCN = 5, FCH_EVT_ADAPTER_CHANGE = 259, FCH_EVT_PORT_UNKNOWN = 512, FCH_EVT_PORT_OFFLINE = 513, FCH_EVT_PORT_ONLINE = 514, FCH_EVT_PORT_FABRIC = 516, FCH_EVT_LINK_UNKNOWN = 1280, FCH_EVT_VENDOR_UNIQUE = 65535 } ; struct fc_bsg_buffer { unsigned int payload_len ; int sg_cnt ; struct scatterlist *sg_list ; }; struct fc_bsg_request; struct fc_bsg_reply; struct fc_bsg_job { struct Scsi_Host *shost ; struct fc_rport *rport ; struct device *dev ; struct request *req ; spinlock_t job_lock ; unsigned int state_flags ; unsigned int ref_cnt ; void (*job_done)(struct fc_bsg_job * ) ; struct fc_bsg_request *request ; struct fc_bsg_reply *reply ; unsigned int request_len ; unsigned int reply_len ; struct fc_bsg_buffer request_payload ; struct fc_bsg_buffer reply_payload ; void *dd_data ; }; struct fc_function_template { void (*get_rport_dev_loss_tmo)(struct fc_rport * ) ; void (*set_rport_dev_loss_tmo)(struct fc_rport * , u32 ) ; void (*get_starget_node_name)(struct scsi_target * ) ; void (*get_starget_port_name)(struct scsi_target * ) ; void (*get_starget_port_id)(struct scsi_target * ) ; void (*get_host_port_id)(struct Scsi_Host * ) ; void (*get_host_port_type)(struct Scsi_Host * ) ; void (*get_host_port_state)(struct Scsi_Host * ) ; void (*get_host_active_fc4s)(struct Scsi_Host * ) ; void (*get_host_speed)(struct Scsi_Host * ) ; void (*get_host_fabric_name)(struct Scsi_Host * ) ; void (*get_host_symbolic_name)(struct Scsi_Host * ) ; void (*set_host_system_hostname)(struct Scsi_Host * ) ; struct fc_host_statistics *(*get_fc_host_stats)(struct Scsi_Host * ) ; void (*reset_fc_host_stats)(struct Scsi_Host * ) ; int (*issue_fc_host_lip)(struct Scsi_Host * ) ; void (*dev_loss_tmo_callbk)(struct fc_rport * ) ; void (*terminate_rport_io)(struct fc_rport * ) ; void (*set_vport_symbolic_name)(struct fc_vport * ) ; int (*vport_create)(struct fc_vport * , bool ) ; int (*vport_disable)(struct fc_vport * , bool ) ; int (*vport_delete)(struct fc_vport * ) ; int (*tsk_mgmt_response)(struct Scsi_Host * , u64 , u64 , int ) ; int (*it_nexus_response)(struct Scsi_Host * , u64 , int ) ; int (*bsg_request)(struct fc_bsg_job * ) ; int (*bsg_timeout)(struct fc_bsg_job * ) ; u32 dd_fcrport_size ; u32 dd_fcvport_size ; u32 dd_bsg_size ; unsigned char show_rport_maxframe_size : 1 ; unsigned char show_rport_supported_classes : 1 ; unsigned char show_rport_dev_loss_tmo : 1 ; unsigned char show_starget_node_name : 1 ; unsigned char show_starget_port_name : 1 ; unsigned char show_starget_port_id : 1 ; unsigned char show_host_node_name : 1 ; unsigned char show_host_port_name : 1 ; unsigned char show_host_permanent_port_name : 1 ; unsigned char show_host_supported_classes : 1 ; unsigned char show_host_supported_fc4s : 1 ; unsigned char show_host_supported_speeds : 1 ; unsigned char show_host_maxframe_size : 1 ; unsigned char show_host_serial_number : 1 ; unsigned char show_host_manufacturer : 1 ; unsigned char show_host_model : 1 ; unsigned char show_host_model_description : 1 ; unsigned char show_host_hardware_version : 1 ; unsigned char show_host_driver_version : 1 ; unsigned char show_host_firmware_version : 1 ; unsigned char show_host_optionrom_version : 1 ; unsigned char show_host_port_id : 1 ; unsigned char show_host_port_type : 1 ; unsigned char show_host_port_state : 1 ; unsigned char show_host_active_fc4s : 1 ; unsigned char show_host_speed : 1 ; unsigned char show_host_fabric_name : 1 ; unsigned char show_host_symbolic_name : 1 ; unsigned char show_host_system_hostname : 1 ; unsigned char disable_target_scan : 1 ; }; struct fc_bsg_host_add_rport { uint8_t reserved ; uint8_t port_id[3U] ; }; struct fc_bsg_host_del_rport { uint8_t reserved ; uint8_t port_id[3U] ; }; struct fc_bsg_host_els { uint8_t command_code ; uint8_t port_id[3U] ; }; struct __anonstruct_rjt_data_204 { uint8_t action ; uint8_t reason_code ; uint8_t reason_explanation ; uint8_t vendor_unique ; }; struct fc_bsg_ctels_reply { uint32_t status ; struct __anonstruct_rjt_data_204 rjt_data ; }; struct fc_bsg_host_ct { uint8_t reserved ; uint8_t port_id[3U] ; uint32_t preamble_word0 ; uint32_t preamble_word1 ; uint32_t preamble_word2 ; }; struct fc_bsg_host_vendor { uint64_t vendor_id ; uint32_t vendor_cmd[0U] ; }; struct fc_bsg_host_vendor_reply { uint32_t vendor_rsp[0U] ; }; struct fc_bsg_rport_els { uint8_t els_code ; }; struct fc_bsg_rport_ct { uint32_t preamble_word0 ; uint32_t preamble_word1 ; uint32_t preamble_word2 ; }; union __anonunion_rqst_data_205 { struct fc_bsg_host_add_rport h_addrport ; struct fc_bsg_host_del_rport h_delrport ; struct fc_bsg_host_els h_els ; struct fc_bsg_host_ct h_ct ; struct fc_bsg_host_vendor h_vendor ; struct fc_bsg_rport_els r_els ; struct fc_bsg_rport_ct r_ct ; }; struct fc_bsg_request { uint32_t msgcode ; union __anonunion_rqst_data_205 rqst_data ; }; union __anonunion_reply_data_206 { struct fc_bsg_host_vendor_reply vendor_reply ; struct fc_bsg_ctels_reply ctels_reply ; }; struct fc_bsg_reply { uint32_t result ; uint32_t reply_payload_rcv_len ; union __anonunion_reply_data_206 reply_data ; }; struct qla82xx_legacy_intr_set { uint32_t int_vec_bit ; uint32_t tgt_status_reg ; uint32_t tgt_mask_reg ; uint32_t pci_int_reg ; }; struct device_reg_82xx { uint32_t req_q_out[64U] ; uint32_t rsp_q_in[64U] ; uint32_t rsp_q_out[64U] ; uint16_t mailbox_in[32U] ; uint16_t unused_1[32U] ; uint32_t hint ; uint16_t unused_2[62U] ; uint16_t mailbox_out[32U] ; uint32_t unused_3[48U] ; uint32_t host_status ; uint32_t host_int ; }; struct fcp_cmnd { struct scsi_lun lun ; uint8_t crn ; uint8_t task_attribute ; uint8_t task_management ; uint8_t additional_cdb_len ; uint8_t cdb[260U] ; }; struct dsd_dma { struct list_head list ; dma_addr_t dsd_list_dma ; void *dsd_addr ; }; struct ct6_dsd { uint16_t fcp_cmnd_len ; dma_addr_t fcp_cmnd_dma ; struct fcp_cmnd *fcp_cmnd ; int dsd_use_cnt ; struct list_head dsd_list ; }; struct qla8044_reset_template_hdr { uint16_t version ; uint16_t signature ; uint16_t size ; uint16_t entries ; uint16_t hdr_size ; uint16_t checksum ; uint16_t init_seq_offset ; uint16_t start_seq_offset ; }; struct qla8044_reset_template { int seq_index ; int seq_error ; int array_index ; uint32_t array[16U] ; uint8_t *buff ; uint8_t *stop_offset ; uint8_t *start_offset ; uint8_t *init_offset ; struct qla8044_reset_template_hdr *hdr ; uint8_t seq_end ; uint8_t template_end ; }; struct device_reg_24xx { uint32_t flash_addr ; uint32_t flash_data ; uint32_t ctrl_status ; uint32_t ictrl ; uint32_t istatus ; uint32_t unused_1[2U] ; uint32_t req_q_in ; uint32_t req_q_out ; uint32_t rsp_q_in ; uint32_t rsp_q_out ; uint32_t preq_q_in ; uint32_t preq_q_out ; uint32_t unused_2[2U] ; uint32_t atio_q_in ; uint32_t atio_q_out ; uint32_t host_status ; uint32_t hccr ; uint32_t gpiod ; uint32_t gpioe ; uint32_t iobase_addr ; uint32_t unused_3[10U] ; uint16_t mailbox0 ; uint16_t mailbox1 ; uint16_t mailbox2 ; uint16_t mailbox3 ; uint16_t mailbox4 ; uint16_t mailbox5 ; uint16_t mailbox6 ; uint16_t mailbox7 ; uint16_t mailbox8 ; uint16_t mailbox9 ; uint16_t mailbox10 ; uint16_t mailbox11 ; uint16_t mailbox12 ; uint16_t mailbox13 ; uint16_t mailbox14 ; uint16_t mailbox15 ; uint16_t mailbox16 ; uint16_t mailbox17 ; uint16_t mailbox18 ; uint16_t mailbox19 ; uint16_t mailbox20 ; uint16_t mailbox21 ; uint16_t mailbox22 ; uint16_t mailbox23 ; uint16_t mailbox24 ; uint16_t mailbox25 ; uint16_t mailbox26 ; uint16_t mailbox27 ; uint16_t mailbox28 ; uint16_t mailbox29 ; uint16_t mailbox30 ; uint16_t mailbox31 ; uint32_t iobase_window ; uint32_t iobase_c4 ; uint32_t iobase_c8 ; uint32_t unused_4_1[6U] ; uint32_t iobase_q ; uint32_t unused_5[2U] ; uint32_t iobase_select ; uint32_t unused_6[2U] ; uint32_t iobase_sdata ; }; struct qla_npiv_entry { uint16_t flags ; uint16_t vf_id ; uint8_t q_qos ; uint8_t f_qos ; uint16_t unused1 ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; }; struct ex_init_cb_81xx { uint16_t ex_version ; uint8_t prio_fcf_matching_flags ; uint8_t reserved_1[3U] ; uint16_t pri_fcf_vlan_id ; uint8_t pri_fcf_fabric_name[8U] ; uint16_t reserved_2[7U] ; uint8_t spma_mac_addr[6U] ; uint16_t reserved_3[14U] ; }; struct qla_fcp_prio_entry { uint16_t flags ; uint8_t tag ; uint8_t reserved ; uint32_t src_pid ; uint32_t dst_pid ; uint16_t lun_beg ; uint16_t lun_end ; uint8_t src_wwpn[8U] ; uint8_t dst_wwpn[8U] ; }; struct qla_fcp_prio_cfg { uint8_t signature[4U] ; uint16_t version ; uint16_t length ; uint16_t checksum ; uint16_t num_entries ; uint16_t size_of_entry ; uint8_t attributes ; uint8_t reserved ; struct qla_fcp_prio_entry entry[1U] ; }; struct req_que; struct srb_cmd { struct scsi_cmnd *cmd ; uint32_t request_sense_length ; uint32_t fw_sense_length ; uint8_t *request_sense_ptr ; void *ctx ; }; struct __anonstruct_logio_230 { uint16_t flags ; uint16_t data[2U] ; }; struct __anonstruct_tmf_231 { uint32_t flags ; uint32_t lun ; uint32_t data ; struct completion comp ; __le16 comp_status ; }; struct __anonstruct_fxiocb_232 { uint8_t flags ; uint32_t req_len ; uint32_t rsp_len ; void *req_addr ; void *rsp_addr ; dma_addr_t req_dma_handle ; dma_addr_t rsp_dma_handle ; __le32 adapter_id ; __le32 adapter_id_hi ; __le16 req_func_type ; __le32 req_data ; __le32 req_data_extra ; __le32 result ; __le32 seq_number ; __le16 fw_flags ; struct completion fxiocb_comp ; __le32 reserved_0 ; uint8_t reserved_1 ; }; struct __anonstruct_abt_233 { uint32_t cmd_hndl ; __le16 comp_status ; struct completion comp ; }; union __anonunion_u_229 { struct __anonstruct_logio_230 logio ; struct __anonstruct_tmf_231 tmf ; struct __anonstruct_fxiocb_232 fxiocb ; struct __anonstruct_abt_233 abt ; }; struct srb_iocb { union __anonunion_u_229 u ; struct timer_list timer ; void (*timeout)(void * ) ; }; union __anonunion_u_234 { struct srb_iocb iocb_cmd ; struct fc_bsg_job *bsg_job ; struct srb_cmd scmd ; }; struct srb { atomic_t ref_count ; struct fc_port *fcport ; uint32_t handle ; uint16_t flags ; uint16_t type ; char *name ; int iocbs ; union __anonunion_u_234 u ; void (*done)(void * , void * , int ) ; void (*free)(void * , void * ) ; }; typedef struct srb srb_t; struct __anonstruct_isp2100_236 { uint16_t mailbox0 ; uint16_t mailbox1 ; uint16_t mailbox2 ; uint16_t mailbox3 ; uint16_t mailbox4 ; uint16_t mailbox5 ; uint16_t mailbox6 ; uint16_t mailbox7 ; uint16_t unused_2[59U] ; }; struct __anonstruct_isp2300_237 { uint16_t req_q_in ; uint16_t req_q_out ; uint16_t rsp_q_in ; uint16_t rsp_q_out ; uint32_t host_status ; uint16_t host_semaphore ; uint16_t unused_3[17U] ; uint16_t mailbox0 ; uint16_t mailbox1 ; uint16_t mailbox2 ; uint16_t mailbox3 ; uint16_t mailbox4 ; uint16_t mailbox5 ; uint16_t mailbox6 ; uint16_t mailbox7 ; uint16_t mailbox8 ; uint16_t mailbox9 ; uint16_t mailbox10 ; uint16_t mailbox11 ; uint16_t mailbox12 ; uint16_t mailbox13 ; uint16_t mailbox14 ; uint16_t mailbox15 ; uint16_t mailbox16 ; uint16_t mailbox17 ; uint16_t mailbox18 ; uint16_t mailbox19 ; uint16_t mailbox20 ; uint16_t mailbox21 ; uint16_t mailbox22 ; uint16_t mailbox23 ; uint16_t mailbox24 ; uint16_t mailbox25 ; uint16_t mailbox26 ; uint16_t mailbox27 ; uint16_t mailbox28 ; uint16_t mailbox29 ; uint16_t mailbox30 ; uint16_t mailbox31 ; uint16_t fb_cmd ; uint16_t unused_4[10U] ; }; union __anonunion_u_235 { struct __anonstruct_isp2100_236 isp2100 ; struct __anonstruct_isp2300_237 isp2300 ; }; struct __anonstruct_isp2200_239 { uint16_t unused_10[8U] ; uint16_t mailbox8 ; uint16_t mailbox9 ; uint16_t mailbox10 ; uint16_t mailbox11 ; uint16_t mailbox12 ; uint16_t mailbox13 ; uint16_t mailbox14 ; uint16_t mailbox15 ; uint16_t mailbox16 ; uint16_t mailbox17 ; uint16_t mailbox18 ; uint16_t mailbox19 ; uint16_t mailbox20 ; uint16_t mailbox21 ; uint16_t mailbox22 ; uint16_t mailbox23 ; }; union __anonunion_u_end_238 { struct __anonstruct_isp2200_239 isp2200 ; }; struct device_reg_2xxx { uint16_t flash_address ; uint16_t flash_data ; uint16_t unused_1[1U] ; uint16_t ctrl_status ; uint16_t ictrl ; uint16_t istatus ; uint16_t semaphore ; uint16_t nvram ; union __anonunion_u_235 u ; uint16_t fpm_diag_config ; uint16_t unused_5[4U] ; uint16_t risc_hw ; uint16_t unused_5_1 ; uint16_t pcr ; uint16_t unused_6[5U] ; uint16_t mctr ; uint16_t unused_7[3U] ; uint16_t fb_cmd_2100 ; uint16_t unused_8[3U] ; uint16_t hccr ; uint16_t unused_9[5U] ; uint16_t gpiod ; uint16_t gpioe ; union __anonunion_u_end_238 u_end ; }; struct device_reg_25xxmq { uint32_t req_q_in ; uint32_t req_q_out ; uint32_t rsp_q_in ; uint32_t rsp_q_out ; uint32_t atio_q_in ; uint32_t atio_q_out ; }; struct device_reg_fx00 { uint32_t mailbox0 ; uint32_t mailbox1 ; uint32_t mailbox2 ; uint32_t mailbox3 ; uint32_t mailbox4 ; uint32_t mailbox5 ; uint32_t mailbox6 ; uint32_t mailbox7 ; uint32_t mailbox8 ; uint32_t mailbox9 ; uint32_t mailbox10 ; uint32_t mailbox11 ; uint32_t mailbox12 ; uint32_t mailbox13 ; uint32_t mailbox14 ; uint32_t mailbox15 ; uint32_t mailbox16 ; uint32_t mailbox17 ; uint32_t mailbox18 ; uint32_t mailbox19 ; uint32_t mailbox20 ; uint32_t mailbox21 ; uint32_t mailbox22 ; uint32_t mailbox23 ; uint32_t mailbox24 ; uint32_t mailbox25 ; uint32_t mailbox26 ; uint32_t mailbox27 ; uint32_t mailbox28 ; uint32_t mailbox29 ; uint32_t mailbox30 ; uint32_t mailbox31 ; uint32_t aenmailbox0 ; uint32_t aenmailbox1 ; uint32_t aenmailbox2 ; uint32_t aenmailbox3 ; uint32_t aenmailbox4 ; uint32_t aenmailbox5 ; uint32_t aenmailbox6 ; uint32_t aenmailbox7 ; uint32_t req_q_in ; uint32_t req_q_out ; uint32_t rsp_q_in ; uint32_t rsp_q_out ; uint32_t initval0 ; uint32_t initval1 ; uint32_t initval2 ; uint32_t initval3 ; uint32_t initval4 ; uint32_t initval5 ; uint32_t initval6 ; uint32_t initval7 ; uint32_t fwheartbeat ; uint32_t pseudoaen ; }; union __anonunion_device_reg_t_240 { struct device_reg_2xxx isp ; struct device_reg_24xx isp24 ; struct device_reg_25xxmq isp25mq ; struct device_reg_82xx isp82 ; struct device_reg_fx00 ispfx00 ; }; typedef union __anonunion_device_reg_t_240 device_reg_t; struct __anonstruct_mbx_cmd_t_241 { uint32_t out_mb ; uint32_t in_mb ; uint16_t mb[32U] ; long buf_size ; void *bufp ; uint32_t tov ; uint8_t flags ; }; typedef struct __anonstruct_mbx_cmd_t_241 mbx_cmd_t; struct mbx_cmd_32 { uint32_t out_mb ; uint32_t in_mb ; uint32_t mb[32U] ; long buf_size ; void *bufp ; uint32_t tov ; uint8_t flags ; }; struct __anonstruct_init_cb_t_243 { uint8_t version ; uint8_t reserved_1 ; uint8_t firmware_options[2U] ; uint16_t frame_payload_size ; uint16_t max_iocb_allocation ; uint16_t execution_throttle ; uint8_t retry_count ; uint8_t retry_delay ; uint8_t port_name[8U] ; uint16_t hard_address ; uint8_t inquiry_data ; uint8_t login_timeout ; uint8_t node_name[8U] ; uint16_t request_q_outpointer ; uint16_t response_q_inpointer ; uint16_t request_q_length ; uint16_t response_q_length ; uint32_t request_q_address[2U] ; uint32_t response_q_address[2U] ; uint16_t lun_enables ; uint8_t command_resource_count ; uint8_t immediate_notify_resource_count ; uint16_t timeout ; uint8_t reserved_2[2U] ; uint8_t add_firmware_options[2U] ; uint8_t response_accumulation_timer ; uint8_t interrupt_delay_timer ; uint8_t special_options[2U] ; uint8_t reserved_3[26U] ; }; typedef struct __anonstruct_init_cb_t_243 init_cb_t; struct __anonstruct_response_t_245 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint8_t data[52U] ; uint32_t signature ; }; typedef struct __anonstruct_response_t_245 response_t; struct atio { uint8_t entry_type ; uint8_t entry_count ; uint8_t data[58U] ; uint32_t signature ; }; struct __anonstruct_id_247 { uint8_t reserved ; uint8_t standard ; }; union __anonunion_target_id_t_246 { uint16_t extended ; struct __anonstruct_id_247 id ; }; typedef union __anonunion_target_id_t_246 target_id_t; struct __anonstruct_cmd_entry_t_248 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; target_id_t target ; uint16_t lun ; uint16_t control_flags ; uint16_t reserved_1 ; uint16_t timeout ; uint16_t dseg_count ; uint8_t scsi_cdb[16U] ; uint32_t byte_count ; uint32_t dseg_0_address ; uint32_t dseg_0_length ; uint32_t dseg_1_address ; uint32_t dseg_1_length ; uint32_t dseg_2_address ; uint32_t dseg_2_length ; }; typedef struct __anonstruct_cmd_entry_t_248 cmd_entry_t; struct __anonstruct_request_t_250 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; target_id_t target ; uint16_t lun ; uint16_t control_flags ; uint16_t reserved_1 ; uint16_t timeout ; uint16_t dseg_count ; uint8_t scsi_cdb[16U] ; uint32_t byte_count ; uint32_t dseg_0_address[2U] ; uint32_t dseg_0_length ; uint32_t dseg_1_address[2U] ; uint32_t dseg_1_length ; }; typedef struct __anonstruct_request_t_250 request_t; struct __anonstruct_nobundling_254 { uint32_t reserved_1 ; uint16_t reserved_2 ; uint16_t reserved_3 ; uint32_t reserved_4 ; uint32_t data_address[2U] ; uint32_t data_length ; uint32_t reserved_5[2U] ; uint32_t reserved_6 ; }; struct __anonstruct_bundling_255 { uint32_t dif_byte_count ; uint16_t reserved_1 ; uint16_t dseg_count ; uint32_t reserved_2 ; uint32_t data_address[2U] ; uint32_t data_length ; uint32_t dif_address[2U] ; uint32_t dif_length ; }; union __anonunion_u_253 { struct __anonstruct_nobundling_254 nobundling ; struct __anonstruct_bundling_255 bundling ; }; struct crc_context { uint32_t handle ; uint32_t ref_tag ; uint16_t app_tag ; uint8_t ref_tag_mask[4U] ; uint8_t app_tag_mask[2U] ; uint16_t guard_seed ; uint16_t prot_opts ; uint16_t blk_size ; uint16_t runt_blk_guard ; uint32_t byte_count ; union __anonunion_u_253 u ; struct fcp_cmnd fcp_cmnd ; dma_addr_t crc_ctx_dma ; struct list_head dsd_list ; }; struct __anonstruct_ms_iocb_entry_t_261 { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t handle1 ; target_id_t loop_id ; uint16_t status ; uint16_t control_flags ; uint16_t reserved2 ; uint16_t timeout ; uint16_t cmd_dsd_count ; uint16_t total_dsd_count ; uint8_t type ; uint8_t r_ctl ; uint16_t rx_id ; uint16_t reserved3 ; uint32_t handle2 ; uint32_t rsp_bytecount ; uint32_t req_bytecount ; uint32_t dseg_req_address[2U] ; uint32_t dseg_req_length ; uint32_t dseg_rsp_address[2U] ; uint32_t dseg_rsp_length ; }; typedef struct __anonstruct_ms_iocb_entry_t_261 ms_iocb_entry_t; struct __anonstruct_b_263 { uint8_t al_pa ; uint8_t area ; uint8_t domain ; uint8_t rsvd_1 ; }; union __anonunion_port_id_t_262 { unsigned int b24 : 24 ; struct __anonstruct_b_263 b ; }; typedef union __anonunion_port_id_t_262 port_id_t; enum ldv_28399 { FCT_UNKNOWN = 0, FCT_RSCN = 1, FCT_SWITCH = 2, FCT_BROADCAST = 3, FCT_INITIATOR = 4, FCT_TARGET = 5 } ; typedef enum ldv_28399 fc_port_type_t; struct fc_port { struct list_head list ; struct scsi_qla_host *vha ; uint8_t node_name[8U] ; uint8_t port_name[8U] ; port_id_t d_id ; uint16_t loop_id ; uint16_t old_loop_id ; uint16_t tgt_id ; uint16_t old_tgt_id ; uint8_t fcp_prio ; uint8_t fabric_port_name[8U] ; uint16_t fp_speed ; fc_port_type_t port_type ; atomic_t state ; uint32_t flags ; int login_retry ; struct fc_rport *rport ; struct fc_rport *drport ; u32 supported_classes ; uint8_t fc4_type ; uint8_t scan_state ; unsigned long last_queue_full ; unsigned long last_ramp_up ; uint16_t port_id ; }; typedef struct fc_port fc_port_t; struct mr_data_fx00 { uint8_t product_name[256U] ; uint8_t symbolic_name[64U] ; uint8_t serial_num[32U] ; uint8_t hw_version[16U] ; uint8_t fw_version[16U] ; uint8_t uboot_version[16U] ; uint8_t fru_serial_num[32U] ; fc_port_t fcport ; uint8_t fw_hbt_en ; uint8_t fw_hbt_cnt ; uint8_t fw_hbt_miss_cnt ; uint32_t old_fw_hbt_cnt ; uint16_t fw_reset_timer_tick ; uint8_t fw_reset_timer_exp ; uint16_t fw_critemp_timer_tick ; uint32_t old_aenmbx0_state ; uint32_t critical_temperature ; bool extended_io_enabled ; }; union __anonunion_a_265 { uint8_t node_name[8U] ; uint8_t manufacturer[32U] ; uint8_t serial_num[8U] ; uint8_t model[16U] ; uint8_t model_desc[80U] ; uint8_t hw_version[16U] ; uint8_t driver_version[32U] ; uint8_t orom_version[16U] ; uint8_t fw_version[16U] ; uint8_t os_version[128U] ; uint8_t max_ct_len[4U] ; }; struct ct_fdmi_hba_attr { uint16_t type ; uint16_t len ; union __anonunion_a_265 a ; }; struct ct_fdmi_hba_attributes { uint32_t count ; struct ct_fdmi_hba_attr entry[9U] ; }; union __anonunion_a_266 { uint8_t fc4_types[32U] ; uint32_t sup_speed ; uint32_t cur_speed ; uint32_t max_frame_size ; uint8_t os_dev_name[32U] ; uint8_t host_name[32U] ; }; struct ct_fdmi_port_attr { uint16_t type ; uint16_t len ; union __anonunion_a_266 a ; }; struct ct_fdmi_port_attributes { uint32_t count ; struct ct_fdmi_port_attr entry[6U] ; }; struct ct_cmd_hdr { uint8_t revision ; uint8_t in_id[3U] ; uint8_t gs_type ; uint8_t gs_subtype ; uint8_t options ; uint8_t reserved ; }; struct __anonstruct_port_id_268 { uint8_t reserved ; uint8_t port_id[3U] ; }; struct __anonstruct_gid_pt_269 { uint8_t port_type ; uint8_t domain ; uint8_t area ; uint8_t reserved ; }; struct __anonstruct_rft_id_270 { uint8_t reserved ; uint8_t port_id[3U] ; uint8_t fc4_types[32U] ; }; struct __anonstruct_rff_id_271 { uint8_t reserved ; uint8_t port_id[3U] ; uint16_t reserved2 ; uint8_t fc4_feature ; uint8_t fc4_type ; }; struct __anonstruct_rnn_id_272 { uint8_t reserved ; uint8_t port_id[3U] ; uint8_t node_name[8U] ; }; struct __anonstruct_rsnn_nn_273 { uint8_t node_name[8U] ; uint8_t name_len ; uint8_t sym_node_name[255U] ; }; struct __anonstruct_ghat_274 { uint8_t hba_indentifier[8U] ; }; struct __anonstruct_rhba_275 { uint8_t hba_identifier[8U] ; uint32_t entry_count ; uint8_t port_name[8U] ; struct ct_fdmi_hba_attributes attrs ; }; struct __anonstruct_rhat_276 { uint8_t hba_identifier[8U] ; struct ct_fdmi_hba_attributes attrs ; }; struct __anonstruct_rpa_277 { uint8_t port_name[8U] ; struct ct_fdmi_port_attributes attrs ; }; struct __anonstruct_dhba_278 { uint8_t port_name[8U] ; }; struct __anonstruct_dhat_279 { uint8_t port_name[8U] ; }; struct __anonstruct_dprt_280 { uint8_t port_name[8U] ; }; struct __anonstruct_dpa_281 { uint8_t port_name[8U] ; }; struct __anonstruct_gpsc_282 { uint8_t port_name[8U] ; }; struct __anonstruct_gff_id_283 { uint8_t reserved ; uint8_t port_name[3U] ; }; union __anonunion_req_267 { struct __anonstruct_port_id_268 port_id ; struct __anonstruct_gid_pt_269 gid_pt ; struct __anonstruct_rft_id_270 rft_id ; struct __anonstruct_rff_id_271 rff_id ; struct __anonstruct_rnn_id_272 rnn_id ; struct __anonstruct_rsnn_nn_273 rsnn_nn ; struct __anonstruct_ghat_274 ghat ; struct __anonstruct_rhba_275 rhba ; struct __anonstruct_rhat_276 rhat ; struct __anonstruct_rpa_277 rpa ; struct __anonstruct_dhba_278 dhba ; struct __anonstruct_dhat_279 dhat ; struct __anonstruct_dprt_280 dprt ; struct __anonstruct_dpa_281 dpa ; struct __anonstruct_gpsc_282 gpsc ; struct __anonstruct_gff_id_283 gff_id ; }; struct ct_sns_req { struct ct_cmd_hdr header ; uint16_t command ; uint16_t max_rsp_size ; uint8_t fragment_id ; uint8_t reserved[3U] ; union __anonunion_req_267 req ; }; struct ct_rsp_hdr { struct ct_cmd_hdr header ; uint16_t response ; uint16_t residual ; uint8_t fragment_id ; uint8_t reason_code ; uint8_t explanation_code ; uint8_t vendor_unique ; }; struct ct_sns_gid_pt_data { uint8_t control_byte ; uint8_t port_id[3U] ; }; struct __anonstruct_ga_nxt_285 { uint8_t port_type ; uint8_t port_id[3U] ; uint8_t port_name[8U] ; uint8_t sym_port_name_len ; uint8_t sym_port_name[255U] ; uint8_t node_name[8U] ; uint8_t sym_node_name_len ; uint8_t sym_node_name[255U] ; uint8_t init_proc_assoc[8U] ; uint8_t node_ip_addr[16U] ; uint8_t class_of_service[4U] ; uint8_t fc4_types[32U] ; uint8_t ip_address[16U] ; uint8_t fabric_port_name[8U] ; uint8_t reserved ; uint8_t hard_address[3U] ; }; struct __anonstruct_gid_pt_286 { struct ct_sns_gid_pt_data entries[2048U] ; }; struct __anonstruct_gpn_id_287 { uint8_t port_name[8U] ; }; struct __anonstruct_gnn_id_288 { uint8_t node_name[8U] ; }; struct __anonstruct_gft_id_289 { uint8_t fc4_types[32U] ; }; struct __anonstruct_ghat_290 { uint32_t entry_count ; uint8_t port_name[8U] ; struct ct_fdmi_hba_attributes attrs ; }; struct __anonstruct_gfpn_id_291 { uint8_t port_name[8U] ; }; struct __anonstruct_gpsc_292 { uint16_t speeds ; uint16_t speed ; }; struct __anonstruct_gff_id_293 { uint8_t fc4_features[128U] ; }; union __anonunion_rsp_284 { struct __anonstruct_ga_nxt_285 ga_nxt ; struct __anonstruct_gid_pt_286 gid_pt ; struct __anonstruct_gpn_id_287 gpn_id ; struct __anonstruct_gnn_id_288 gnn_id ; struct __anonstruct_gft_id_289 gft_id ; struct __anonstruct_ghat_290 ghat ; struct __anonstruct_gfpn_id_291 gfpn_id ; struct __anonstruct_gpsc_292 gpsc ; struct __anonstruct_gff_id_293 gff_id ; }; struct ct_sns_rsp { struct ct_rsp_hdr header ; union __anonunion_rsp_284 rsp ; }; union __anonunion_p_294 { struct ct_sns_req req ; struct ct_sns_rsp rsp ; }; struct ct_sns_pkt { union __anonunion_p_294 p ; }; struct __anonstruct_cmd_296 { uint16_t buffer_length ; uint16_t reserved_1 ; uint32_t buffer_address[2U] ; uint16_t subcommand_length ; uint16_t reserved_2 ; uint16_t subcommand ; uint16_t size ; uint32_t reserved_3 ; uint8_t param[36U] ; }; union __anonunion_p_295 { struct __anonstruct_cmd_296 cmd ; uint8_t rft_data[16U] ; uint8_t rnn_data[16U] ; uint8_t gan_data[636U] ; uint8_t gid_data[2064U] ; uint8_t gpn_data[24U] ; uint8_t gnn_data[24U] ; }; struct sns_cmd_pkt { union __anonunion_p_295 p ; }; struct fw_blob { char *name ; uint32_t segs[4U] ; struct firmware const *fw ; }; struct gid_list_info { uint8_t al_pa ; uint8_t area ; uint8_t domain ; uint8_t loop_id_2100 ; uint16_t loop_id ; uint16_t reserved_1 ; }; struct rsp_que; struct isp_operations { int (*pci_config)(struct scsi_qla_host * ) ; void (*reset_chip)(struct scsi_qla_host * ) ; int (*chip_diag)(struct scsi_qla_host * ) ; void (*config_rings)(struct scsi_qla_host * ) ; void (*reset_adapter)(struct scsi_qla_host * ) ; int (*nvram_config)(struct scsi_qla_host * ) ; void (*update_fw_options)(struct scsi_qla_host * ) ; int (*load_risc)(struct scsi_qla_host * , uint32_t * ) ; char *(*pci_info_str)(struct scsi_qla_host * , char * ) ; char *(*fw_version_str)(struct scsi_qla_host * , char * ) ; irqreturn_t (*intr_handler)(int , void * ) ; void (*enable_intrs)(struct qla_hw_data * ) ; void (*disable_intrs)(struct qla_hw_data * ) ; int (*abort_command)(srb_t * ) ; int (*target_reset)(struct fc_port * , unsigned int , int ) ; int (*lun_reset)(struct fc_port * , unsigned int , int ) ; int (*fabric_login)(struct scsi_qla_host * , uint16_t , uint8_t , uint8_t , uint8_t , uint16_t * , uint8_t ) ; int (*fabric_logout)(struct scsi_qla_host * , uint16_t , uint8_t , uint8_t , uint8_t ) ; uint16_t (*calc_req_entries)(uint16_t ) ; void (*build_iocbs)(srb_t * , cmd_entry_t * , uint16_t ) ; void *(*prep_ms_iocb)(struct scsi_qla_host * , uint32_t , uint32_t ) ; void *(*prep_ms_fdmi_iocb)(struct scsi_qla_host * , uint32_t , uint32_t ) ; uint8_t *(*read_nvram)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ) ; int (*write_nvram)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ) ; void (*fw_dump)(struct scsi_qla_host * , int ) ; int (*beacon_on)(struct scsi_qla_host * ) ; int (*beacon_off)(struct scsi_qla_host * ) ; void (*beacon_blink)(struct scsi_qla_host * ) ; uint8_t *(*read_optrom)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ) ; int (*write_optrom)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ) ; int (*get_flash_version)(struct scsi_qla_host * , void * ) ; int (*start_scsi)(srb_t * ) ; int (*abort_isp)(struct scsi_qla_host * ) ; int (*iospace_config)(struct qla_hw_data * ) ; int (*initialize_adapter)(struct scsi_qla_host * ) ; }; struct qla_msix_entry { int have_irq ; uint32_t vector ; uint16_t entry ; struct rsp_que *rsp ; }; enum qla_work_type { QLA_EVT_AEN = 0, QLA_EVT_IDC_ACK = 1, QLA_EVT_ASYNC_LOGIN = 2, QLA_EVT_ASYNC_LOGIN_DONE = 3, QLA_EVT_ASYNC_LOGOUT = 4, QLA_EVT_ASYNC_LOGOUT_DONE = 5, QLA_EVT_ASYNC_ADISC = 6, QLA_EVT_ASYNC_ADISC_DONE = 7, QLA_EVT_UEVENT = 8, QLA_EVT_AENFX = 9 } ; struct __anonstruct_aen_298 { enum fc_host_event_code code ; u32 data ; }; struct __anonstruct_idc_ack_299 { uint16_t mb[7U] ; }; struct __anonstruct_logio_300 { struct fc_port *fcport ; u16 data[2U] ; }; struct __anonstruct_uevent_301 { u32 code ; }; struct __anonstruct_aenfx_302 { uint32_t evtcode ; uint32_t mbx[8U] ; uint32_t count ; }; struct __anonstruct_iosb_303 { srb_t *sp ; }; union __anonunion_u_297 { struct __anonstruct_aen_298 aen ; struct __anonstruct_idc_ack_299 idc_ack ; struct __anonstruct_logio_300 logio ; struct __anonstruct_uevent_301 uevent ; struct __anonstruct_aenfx_302 aenfx ; struct __anonstruct_iosb_303 iosb ; }; struct qla_work_evt { struct list_head list ; enum qla_work_type type ; u32 flags ; union __anonunion_u_297 u ; }; struct qla_chip_state_84xx { struct list_head list ; struct kref kref ; void *bus ; spinlock_t access_lock ; struct mutex fw_update_mutex ; uint32_t fw_update ; uint32_t op_fw_version ; uint32_t op_fw_size ; uint32_t op_fw_seq_size ; uint32_t diag_fw_version ; uint32_t gold_fw_version ; }; struct qla_statistics { uint32_t total_isp_aborts ; uint64_t input_bytes ; uint64_t output_bytes ; uint64_t input_requests ; uint64_t output_requests ; uint32_t control_requests ; uint64_t jiffies_at_last_reset ; }; struct bidi_statistics { unsigned long long io_count ; unsigned long long transfer_bytes ; }; struct rsp_que { dma_addr_t dma ; response_t *ring ; response_t *ring_ptr ; uint32_t *rsp_q_in ; uint32_t *rsp_q_out ; uint16_t ring_index ; uint16_t out_ptr ; uint16_t length ; uint16_t options ; uint16_t rid ; uint16_t id ; uint16_t vp_idx ; struct qla_hw_data *hw ; struct qla_msix_entry *msix ; struct req_que *req ; srb_t *status_srb ; struct work_struct q_work ; dma_addr_t dma_fx00 ; response_t *ring_fx00 ; uint16_t length_fx00 ; uint8_t rsp_pkt[64U] ; }; struct req_que { dma_addr_t dma ; request_t *ring ; request_t *ring_ptr ; uint32_t *req_q_in ; uint32_t *req_q_out ; uint16_t ring_index ; uint16_t in_ptr ; uint16_t cnt ; uint16_t length ; uint16_t options ; uint16_t rid ; uint16_t id ; uint16_t qos ; uint16_t vp_idx ; struct rsp_que *rsp ; srb_t **outstanding_cmds ; uint32_t current_outstanding_cmd ; uint16_t num_outstanding_cmds ; int max_q_depth ; dma_addr_t dma_fx00 ; request_t *ring_fx00 ; uint16_t length_fx00 ; uint8_t req_pkt[64U] ; }; struct qlfc_fw { void *fw_buf ; dma_addr_t fw_dma ; uint32_t len ; }; struct qla_tgt_func_tmpl; struct qla_tgt; struct qla_tgt_cmd; struct qla_tgt_vp_map; struct qlt_hw_data { unsigned char enable_class_2 : 1 ; unsigned char enable_explicit_conf : 1 ; unsigned char ini_mode_force_reverse : 1 ; unsigned char node_name_set : 1 ; dma_addr_t atio_dma ; struct atio *atio_ring ; struct atio *atio_ring_ptr ; uint16_t atio_ring_index ; uint16_t atio_q_length ; uint32_t *atio_q_in ; uint32_t *atio_q_out ; void *target_lport_ptr ; struct qla_tgt_func_tmpl *tgt_ops ; struct qla_tgt *qla_tgt ; struct qla_tgt_cmd *cmds[1024U] ; uint16_t current_handle ; struct qla_tgt_vp_map *tgt_vp_map ; struct mutex tgt_mutex ; struct mutex tgt_host_action_mutex ; int saved_set ; uint16_t saved_exchange_count ; uint32_t saved_firmware_options_1 ; uint32_t saved_firmware_options_2 ; uint32_t saved_firmware_options_3 ; uint8_t saved_firmware_options[2U] ; uint8_t saved_add_firmware_options[2U] ; uint8_t tgt_node_name[8U] ; }; struct __anonstruct_flags_304 { unsigned char mbox_int : 1 ; unsigned char mbox_busy : 1 ; unsigned char disable_risc_code_load : 1 ; unsigned char enable_64bit_addressing : 1 ; unsigned char enable_lip_reset : 1 ; unsigned char enable_target_reset : 1 ; unsigned char enable_lip_full_login : 1 ; unsigned char enable_led_scheme : 1 ; unsigned char msi_enabled : 1 ; unsigned char msix_enabled : 1 ; unsigned char disable_serdes : 1 ; unsigned char gpsc_supported : 1 ; unsigned char npiv_supported : 1 ; unsigned char pci_channel_io_perm_failure : 1 ; unsigned char fce_enabled : 1 ; unsigned char fac_supported : 1 ; unsigned char chip_reset_done : 1 ; unsigned char port0 : 1 ; unsigned char running_gold_fw : 1 ; unsigned char eeh_busy : 1 ; unsigned char cpu_affinity_enabled : 1 ; unsigned char disable_msix_handshake : 1 ; unsigned char fcp_prio_enabled : 1 ; unsigned char isp82xx_fw_hung : 1 ; unsigned char nic_core_hung : 1 ; unsigned char quiesce_owner : 1 ; unsigned char nic_core_reset_hdlr_active : 1 ; unsigned char nic_core_reset_owner : 1 ; unsigned char isp82xx_no_md_cap : 1 ; unsigned char host_shutting_down : 1 ; unsigned char idc_compl_status : 1 ; unsigned char mr_reset_hdlr_active : 1 ; unsigned char mr_intr_valid : 1 ; }; struct qla2xxx_fw_dump; struct qla_hw_data { struct pci_dev *pdev ; mempool_t *srb_mempool ; struct __anonstruct_flags_304 volatile flags ; spinlock_t hardware_lock ; int bars ; int mem_only ; device_reg_t *iobase ; resource_size_t pio_address ; dma_addr_t bar0_hdl ; void *cregbase ; dma_addr_t bar2_hdl ; uint32_t rqstq_intr_code ; uint32_t mbx_intr_code ; uint32_t req_que_len ; uint32_t rsp_que_len ; uint32_t req_que_off ; uint32_t rsp_que_off ; device_reg_t *mqiobase ; device_reg_t *msixbase ; uint16_t msix_count ; uint8_t mqenable ; struct req_que **req_q_map ; struct rsp_que **rsp_q_map ; unsigned long req_qid_map[4U] ; unsigned long rsp_qid_map[4U] ; uint8_t max_req_queues ; uint8_t max_rsp_queues ; struct qla_npiv_entry *npiv_info ; uint16_t nvram_npiv_size ; uint16_t switch_cap ; uint8_t port_no ; uint8_t loop_down_abort_time ; atomic_t loop_down_timer ; uint8_t link_down_timeout ; uint16_t max_loop_id ; uint16_t max_fibre_devices ; uint16_t fb_rev ; uint16_t min_external_loopid ; uint16_t link_data_rate ; uint8_t current_topology ; uint8_t prev_topology ; uint8_t operating_mode ; uint8_t interrupts_on ; uint32_t isp_abort_cnt ; uint32_t device_type ; uint8_t serial0 ; uint8_t serial1 ; uint8_t serial2 ; uint16_t nvram_size ; uint16_t nvram_base ; void *nvram ; uint16_t vpd_size ; uint16_t vpd_base ; void *vpd ; uint16_t loop_reset_delay ; uint8_t retry_count ; uint8_t login_timeout ; uint16_t r_a_tov ; int port_down_retry_count ; uint8_t mbx_count ; uint8_t aen_mbx_count ; uint32_t login_retry_count ; ms_iocb_entry_t *ms_iocb ; dma_addr_t ms_iocb_dma ; struct ct_sns_pkt *ct_sns ; dma_addr_t ct_sns_dma ; struct sns_cmd_pkt *sns_cmd ; dma_addr_t sns_cmd_dma ; void *sfp_data ; dma_addr_t sfp_data_dma ; void *xgmac_data ; dma_addr_t xgmac_data_dma ; void *dcbx_tlv ; dma_addr_t dcbx_tlv_dma ; struct task_struct *dpc_thread ; uint8_t dpc_active ; dma_addr_t gid_list_dma ; struct gid_list_info *gid_list ; int gid_list_info_size ; struct dma_pool *s_dma_pool ; dma_addr_t init_cb_dma ; init_cb_t *init_cb ; int init_cb_size ; dma_addr_t ex_init_cb_dma ; struct ex_init_cb_81xx *ex_init_cb ; void *async_pd ; dma_addr_t async_pd_dma ; void *swl ; uint16_t mailbox_out[32U] ; uint32_t mailbox_out32[32U] ; uint32_t aenmb[8U] ; mbx_cmd_t *mcp ; struct mbx_cmd_32 *mcp32 ; unsigned long mbx_cmd_flags ; struct mutex vport_lock ; spinlock_t vport_slock ; struct completion mbx_cmd_comp ; struct completion mbx_intr_comp ; struct completion dcbx_comp ; struct completion lb_portup_comp ; int notify_dcbx_comp ; int notify_lb_portup_comp ; struct mutex selflogin_lock ; uint16_t fw_major_version ; uint16_t fw_minor_version ; uint16_t fw_subminor_version ; uint16_t fw_attributes ; uint16_t fw_attributes_h ; uint16_t fw_attributes_ext[2U] ; uint32_t fw_memory_size ; uint32_t fw_transfer_size ; uint32_t fw_srisc_address ; uint16_t fw_xcb_count ; uint16_t fw_iocb_count ; uint16_t fw_options[16U] ; uint8_t fw_seriallink_options[4U] ; uint16_t fw_seriallink_options24[4U] ; uint8_t mpi_version[3U] ; uint32_t mpi_capabilities ; uint8_t phy_version[3U] ; struct qla2xxx_fw_dump *fw_dump ; uint32_t fw_dump_len ; int fw_dumped ; int fw_dump_reading ; dma_addr_t eft_dma ; void *eft ; dma_addr_t mctp_dump_dma ; void *mctp_dump ; int mctp_dumped ; int mctp_dump_reading ; uint32_t chain_offset ; struct dentry *dfs_dir ; struct dentry *dfs_fce ; dma_addr_t fce_dma ; void *fce ; uint32_t fce_bufs ; uint16_t fce_mb[8U] ; uint64_t fce_wr ; uint64_t fce_rd ; struct mutex fce_mutex ; uint32_t pci_attr ; uint16_t chip_revision ; uint16_t product_id[4U] ; uint8_t model_number[17U] ; char model_desc[80U] ; uint8_t adapter_id[17U] ; char *optrom_buffer ; uint32_t optrom_size ; int optrom_state ; uint32_t optrom_region_start ; uint32_t optrom_region_size ; uint8_t bios_revision[2U] ; uint8_t efi_revision[2U] ; uint8_t fcode_revision[16U] ; uint32_t fw_revision[4U] ; uint32_t gold_fw_version[4U] ; uint32_t flash_conf_off ; uint32_t flash_data_off ; uint32_t nvram_conf_off ; uint32_t nvram_data_off ; uint32_t fdt_wrt_disable ; uint32_t fdt_wrt_enable ; uint32_t fdt_erase_cmd ; uint32_t fdt_block_size ; uint32_t fdt_unprotect_sec_cmd ; uint32_t fdt_protect_sec_cmd ; uint32_t fdt_wrt_sts_reg_cmd ; uint32_t flt_region_flt ; uint32_t flt_region_fdt ; uint32_t flt_region_boot ; uint32_t flt_region_fw ; uint32_t flt_region_vpd_nvram ; uint32_t flt_region_vpd ; uint32_t flt_region_nvram ; uint32_t flt_region_npiv_conf ; uint32_t flt_region_gold_fw ; uint32_t flt_region_fcp_prio ; uint32_t flt_region_bootload ; uint16_t beacon_blink_led ; uint8_t beacon_color_state ; uint16_t zio_mode ; uint16_t zio_timer ; struct qla_msix_entry *msix_entries ; struct list_head vp_list ; unsigned long vp_idx_map[4U] ; uint16_t num_vhosts ; uint16_t num_vsans ; uint16_t max_npiv_vports ; int cur_vport_count ; struct qla_chip_state_84xx *cs84xx ; struct qla_statistics qla_stats ; struct isp_operations *isp_ops ; struct workqueue_struct *wq ; struct qlfc_fw fw_buf ; struct qla_fcp_prio_cfg *fcp_prio_cfg ; struct dma_pool *dl_dma_pool ; struct dma_pool *fcp_cmnd_dma_pool ; mempool_t *ctx_mempool ; unsigned long nx_pcibase ; uint8_t *nxdb_rd_ptr ; unsigned long nxdb_wr_ptr ; uint32_t crb_win ; uint32_t curr_window ; uint32_t ddr_mn_window ; unsigned long mn_win_crb ; unsigned long ms_win_crb ; int qdr_sn_window ; uint32_t fcoe_dev_init_timeout ; uint32_t fcoe_reset_timeout ; rwlock_t hw_lock ; uint16_t portnum ; int link_width ; struct fw_blob *hablob ; struct qla82xx_legacy_intr_set nx_legacy_intr ; uint16_t gbl_dsd_inuse ; uint16_t gbl_dsd_avail ; struct list_head gbl_dsd_list ; uint8_t fw_type ; __le32 file_prd_off ; uint32_t md_template_size ; void *md_tmplt_hdr ; dma_addr_t md_tmplt_hdr_dma ; void *md_dump ; uint32_t md_dump_size ; void *loop_id_map ; uint32_t idc_audit_ts ; uint32_t idc_extend_tmo ; struct workqueue_struct *dpc_lp_wq ; struct work_struct idc_aen ; struct workqueue_struct *dpc_hp_wq ; struct work_struct nic_core_reset ; struct work_struct idc_state_handler ; struct work_struct nic_core_unrecoverable ; unsigned long host_last_rampdown_time ; unsigned long host_last_rampup_time ; int cfg_lun_q_depth ; struct mr_data_fx00 mr ; struct qlt_hw_data tgt ; }; struct __anonstruct_flags_305 { unsigned char init_done : 1 ; unsigned char online : 1 ; unsigned char reset_active : 1 ; unsigned char management_server_logged_in : 1 ; unsigned char process_response_queue : 1 ; unsigned char difdix_supported : 1 ; unsigned char delete_progress : 1 ; unsigned char fw_tgt_reported : 1 ; }; struct scsi_qla_host { struct list_head list ; struct list_head vp_fcports ; struct list_head work_list ; spinlock_t work_lock ; struct Scsi_Host *host ; unsigned long host_no ; uint8_t host_str[16U] ; struct __anonstruct_flags_305 volatile flags ; atomic_t loop_state ; unsigned long dpc_flags ; uint32_t device_flags ; uint16_t loop_id ; uint16_t self_login_loop_id ; fc_port_t bidir_fcport ; port_id_t d_id ; uint8_t marker_needed ; uint16_t mgmt_svr_loop_id ; uint8_t loop_down_abort_time ; atomic_t loop_down_timer ; uint8_t link_down_timeout ; uint32_t timer_active ; struct timer_list timer ; uint8_t node_name[8U] ; uint8_t port_name[8U] ; uint8_t fabric_node_name[8U] ; uint16_t fcoe_vlan_id ; uint16_t fcoe_fcf_idx ; uint8_t fcoe_vn_port_mac[6U] ; uint32_t vp_abort_cnt ; struct fc_vport *fc_vport ; uint16_t vp_idx ; unsigned long vp_flags ; atomic_t vp_state ; uint16_t vp_err_state ; uint16_t vp_prev_err_state ; struct qla_hw_data *hw ; struct req_que *req ; int fw_heartbeat_counter ; int seconds_since_last_heartbeat ; struct fc_host_statistics fc_host_stat ; struct qla_statistics qla_stats ; struct bidi_statistics bidi_stats ; atomic_t vref_count ; struct qla8044_reset_template reset_tmplt ; }; typedef struct scsi_qla_host scsi_qla_host_t; struct qla_tgt_vp_map { uint8_t idx ; scsi_qla_host_t *vha ; }; enum nexus_wait_type { WAIT_HOST = 0, WAIT_TARGET = 1, WAIT_LUN = 2 } ; struct qla2300_fw_dump { uint16_t hccr ; uint16_t pbiu_reg[8U] ; uint16_t risc_host_reg[8U] ; uint16_t mailbox_reg[32U] ; uint16_t resp_dma_reg[32U] ; uint16_t dma_reg[48U] ; uint16_t risc_hdw_reg[16U] ; uint16_t risc_gp0_reg[16U] ; uint16_t risc_gp1_reg[16U] ; uint16_t risc_gp2_reg[16U] ; uint16_t risc_gp3_reg[16U] ; uint16_t risc_gp4_reg[16U] ; uint16_t risc_gp5_reg[16U] ; uint16_t risc_gp6_reg[16U] ; uint16_t risc_gp7_reg[16U] ; uint16_t frame_buf_hdw_reg[64U] ; uint16_t fpm_b0_reg[64U] ; uint16_t fpm_b1_reg[64U] ; uint16_t risc_ram[63488U] ; uint16_t stack_ram[4096U] ; uint16_t data_ram[1U] ; }; struct qla2100_fw_dump { uint16_t hccr ; uint16_t pbiu_reg[8U] ; uint16_t mailbox_reg[32U] ; uint16_t dma_reg[48U] ; uint16_t risc_hdw_reg[16U] ; uint16_t risc_gp0_reg[16U] ; uint16_t risc_gp1_reg[16U] ; uint16_t risc_gp2_reg[16U] ; uint16_t risc_gp3_reg[16U] ; uint16_t risc_gp4_reg[16U] ; uint16_t risc_gp5_reg[16U] ; uint16_t risc_gp6_reg[16U] ; uint16_t risc_gp7_reg[16U] ; uint16_t frame_buf_hdw_reg[16U] ; uint16_t fpm_b0_reg[64U] ; uint16_t fpm_b1_reg[64U] ; uint16_t risc_ram[61440U] ; }; struct qla24xx_fw_dump { uint32_t host_status ; uint32_t host_reg[32U] ; uint32_t shadow_reg[7U] ; uint16_t mailbox_reg[32U] ; uint32_t xseq_gp_reg[128U] ; uint32_t xseq_0_reg[16U] ; uint32_t xseq_1_reg[16U] ; uint32_t rseq_gp_reg[128U] ; uint32_t rseq_0_reg[16U] ; uint32_t rseq_1_reg[16U] ; uint32_t rseq_2_reg[16U] ; uint32_t cmd_dma_reg[16U] ; uint32_t req0_dma_reg[15U] ; uint32_t resp0_dma_reg[15U] ; uint32_t req1_dma_reg[15U] ; uint32_t xmt0_dma_reg[32U] ; uint32_t xmt1_dma_reg[32U] ; uint32_t xmt2_dma_reg[32U] ; uint32_t xmt3_dma_reg[32U] ; uint32_t xmt4_dma_reg[32U] ; uint32_t xmt_data_dma_reg[16U] ; uint32_t rcvt0_data_dma_reg[32U] ; uint32_t rcvt1_data_dma_reg[32U] ; uint32_t risc_gp_reg[128U] ; uint32_t lmc_reg[112U] ; uint32_t fpm_hdw_reg[192U] ; uint32_t fb_hdw_reg[176U] ; uint32_t code_ram[8192U] ; uint32_t ext_mem[1U] ; }; struct qla25xx_fw_dump { uint32_t host_status ; uint32_t host_risc_reg[32U] ; uint32_t pcie_regs[4U] ; uint32_t host_reg[32U] ; uint32_t shadow_reg[11U] ; uint32_t risc_io_reg ; uint16_t mailbox_reg[32U] ; uint32_t xseq_gp_reg[128U] ; uint32_t xseq_0_reg[48U] ; uint32_t xseq_1_reg[16U] ; uint32_t rseq_gp_reg[128U] ; uint32_t rseq_0_reg[32U] ; uint32_t rseq_1_reg[16U] ; uint32_t rseq_2_reg[16U] ; uint32_t aseq_gp_reg[128U] ; uint32_t aseq_0_reg[32U] ; uint32_t aseq_1_reg[16U] ; uint32_t aseq_2_reg[16U] ; uint32_t cmd_dma_reg[16U] ; uint32_t req0_dma_reg[15U] ; uint32_t resp0_dma_reg[15U] ; uint32_t req1_dma_reg[15U] ; uint32_t xmt0_dma_reg[32U] ; uint32_t xmt1_dma_reg[32U] ; uint32_t xmt2_dma_reg[32U] ; uint32_t xmt3_dma_reg[32U] ; uint32_t xmt4_dma_reg[32U] ; uint32_t xmt_data_dma_reg[16U] ; uint32_t rcvt0_data_dma_reg[32U] ; uint32_t rcvt1_data_dma_reg[32U] ; uint32_t risc_gp_reg[128U] ; uint32_t lmc_reg[128U] ; uint32_t fpm_hdw_reg[192U] ; uint32_t fb_hdw_reg[192U] ; uint32_t code_ram[8192U] ; uint32_t ext_mem[1U] ; }; struct qla81xx_fw_dump { uint32_t host_status ; uint32_t host_risc_reg[32U] ; uint32_t pcie_regs[4U] ; uint32_t host_reg[32U] ; uint32_t shadow_reg[11U] ; uint32_t risc_io_reg ; uint16_t mailbox_reg[32U] ; uint32_t xseq_gp_reg[128U] ; uint32_t xseq_0_reg[48U] ; uint32_t xseq_1_reg[16U] ; uint32_t rseq_gp_reg[128U] ; uint32_t rseq_0_reg[32U] ; uint32_t rseq_1_reg[16U] ; uint32_t rseq_2_reg[16U] ; uint32_t aseq_gp_reg[128U] ; uint32_t aseq_0_reg[32U] ; uint32_t aseq_1_reg[16U] ; uint32_t aseq_2_reg[16U] ; uint32_t cmd_dma_reg[16U] ; uint32_t req0_dma_reg[15U] ; uint32_t resp0_dma_reg[15U] ; uint32_t req1_dma_reg[15U] ; uint32_t xmt0_dma_reg[32U] ; uint32_t xmt1_dma_reg[32U] ; uint32_t xmt2_dma_reg[32U] ; uint32_t xmt3_dma_reg[32U] ; uint32_t xmt4_dma_reg[32U] ; uint32_t xmt_data_dma_reg[16U] ; uint32_t rcvt0_data_dma_reg[32U] ; uint32_t rcvt1_data_dma_reg[32U] ; uint32_t risc_gp_reg[128U] ; uint32_t lmc_reg[128U] ; uint32_t fpm_hdw_reg[224U] ; uint32_t fb_hdw_reg[208U] ; uint32_t code_ram[8192U] ; uint32_t ext_mem[1U] ; }; struct qla83xx_fw_dump { uint32_t host_status ; uint32_t host_risc_reg[48U] ; uint32_t pcie_regs[4U] ; uint32_t host_reg[32U] ; uint32_t shadow_reg[11U] ; uint32_t risc_io_reg ; uint16_t mailbox_reg[32U] ; uint32_t xseq_gp_reg[256U] ; uint32_t xseq_0_reg[48U] ; uint32_t xseq_1_reg[16U] ; uint32_t xseq_2_reg[16U] ; uint32_t rseq_gp_reg[256U] ; uint32_t rseq_0_reg[32U] ; uint32_t rseq_1_reg[16U] ; uint32_t rseq_2_reg[16U] ; uint32_t rseq_3_reg[16U] ; uint32_t aseq_gp_reg[256U] ; uint32_t aseq_0_reg[32U] ; uint32_t aseq_1_reg[16U] ; uint32_t aseq_2_reg[16U] ; uint32_t aseq_3_reg[16U] ; uint32_t cmd_dma_reg[64U] ; uint32_t req0_dma_reg[15U] ; uint32_t resp0_dma_reg[15U] ; uint32_t req1_dma_reg[15U] ; uint32_t xmt0_dma_reg[32U] ; uint32_t xmt1_dma_reg[32U] ; uint32_t xmt2_dma_reg[32U] ; uint32_t xmt3_dma_reg[32U] ; uint32_t xmt4_dma_reg[32U] ; uint32_t xmt_data_dma_reg[16U] ; uint32_t rcvt0_data_dma_reg[32U] ; uint32_t rcvt1_data_dma_reg[32U] ; uint32_t risc_gp_reg[128U] ; uint32_t lmc_reg[128U] ; uint32_t fpm_hdw_reg[256U] ; uint32_t rq0_array_reg[256U] ; uint32_t rq1_array_reg[256U] ; uint32_t rp0_array_reg[256U] ; uint32_t rp1_array_reg[256U] ; uint32_t queue_control_reg[16U] ; uint32_t fb_hdw_reg[432U] ; uint32_t at0_array_reg[128U] ; uint32_t code_ram[9216U] ; uint32_t ext_mem[1U] ; }; union __anonunion_isp_306 { struct qla2100_fw_dump isp21 ; struct qla2300_fw_dump isp23 ; struct qla24xx_fw_dump isp24 ; struct qla25xx_fw_dump isp25 ; struct qla81xx_fw_dump isp81 ; struct qla83xx_fw_dump isp83 ; }; struct qla2xxx_fw_dump { uint8_t signature[4U] ; uint32_t version ; uint32_t fw_major_version ; uint32_t fw_minor_version ; uint32_t fw_subminor_version ; uint32_t fw_attributes ; uint32_t vendor ; uint32_t device ; uint32_t subsystem_vendor ; uint32_t subsystem_device ; uint32_t fixed_size ; uint32_t mem_size ; uint32_t req_q_size ; uint32_t rsp_q_size ; uint32_t eft_size ; uint32_t eft_addr_l ; uint32_t eft_addr_h ; uint32_t header_size ; union __anonunion_isp_306 isp ; }; struct attribute_container { struct list_head node ; struct klist containers ; struct class *class ; struct attribute_group const *grp ; struct device_attribute **attrs ; int (*match)(struct attribute_container * , struct device * ) ; unsigned long flags ; }; struct transport_container; struct transport_container { struct attribute_container ac ; struct attribute_group const *statistics ; }; struct scsi_transport_template { struct transport_container host_attrs ; struct transport_container target_attrs ; struct transport_container device_attrs ; int (*user_scan)(struct Scsi_Host * , uint , uint , uint ) ; int device_size ; int device_private_offset ; int target_size ; int target_private_offset ; int host_size ; unsigned char create_work_queue : 1 ; void (*eh_strategy_handler)(struct Scsi_Host * ) ; enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd * ) ; int (*it_nexus_response)(struct Scsi_Host * , u64 , int ) ; int (*tsk_mgmt_response)(struct Scsi_Host * , u64 , u64 , int ) ; }; struct __anonstruct_isp2x_308 { uint32_t sys_define_2 ; target_id_t target ; uint16_t lun ; uint8_t target_id ; uint8_t reserved_1 ; uint16_t status_modifier ; uint16_t status ; uint16_t task_flags ; uint16_t seq_id ; uint16_t srr_rx_id ; uint32_t srr_rel_offs ; uint16_t srr_ui ; uint16_t srr_ox_id ; uint8_t reserved_2[28U] ; }; struct __anonstruct_isp24_309 { uint32_t reserved ; uint16_t nport_handle ; uint16_t reserved_2 ; uint16_t flags ; uint16_t srr_rx_id ; uint16_t status ; uint8_t status_subcode ; uint8_t fw_handle ; uint32_t exchange_address ; uint32_t srr_rel_offs ; uint16_t srr_ui ; uint16_t srr_ox_id ; uint8_t reserved_4[19U] ; uint8_t vp_index ; uint32_t reserved_5 ; uint8_t port_id[3U] ; uint8_t reserved_6 ; }; union __anonunion_u_307 { struct __anonstruct_isp2x_308 isp2x ; struct __anonstruct_isp24_309 isp24 ; }; struct imm_ntfy_from_isp { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; union __anonunion_u_307 u ; uint16_t reserved_7 ; uint16_t ox_id ; }; struct fcp_hdr { uint8_t r_ctl ; uint8_t d_id[3U] ; uint8_t cs_ctl ; uint8_t s_id[3U] ; uint8_t type ; uint8_t f_ctl[3U] ; uint8_t seq_id ; uint8_t df_ctl ; uint16_t seq_cnt ; uint16_t ox_id ; uint16_t rx_id ; uint32_t parameter ; }; struct fcp_hdr_le { uint8_t d_id[3U] ; uint8_t r_ctl ; uint8_t s_id[3U] ; uint8_t cs_ctl ; uint8_t f_ctl[3U] ; uint8_t type ; uint16_t seq_cnt ; uint8_t df_ctl ; uint8_t seq_id ; uint16_t rx_id ; uint16_t ox_id ; uint32_t parameter ; }; struct atio7_fcp_cmnd { uint64_t lun ; uint8_t cmnd_ref ; unsigned char task_attr : 3 ; unsigned char reserved : 5 ; uint8_t task_mgmt_flags ; unsigned char wrdata : 1 ; unsigned char rddata : 1 ; unsigned char add_cdb_len : 6 ; uint8_t cdb[16U] ; uint8_t add_cdb[4U] ; }; struct __anonstruct_isp2x_314 { uint16_t entry_hdr ; uint8_t sys_define ; uint8_t entry_status ; uint32_t sys_define_2 ; target_id_t target ; uint16_t rx_id ; uint16_t flags ; uint16_t status ; uint8_t command_ref ; uint8_t task_codes ; uint8_t task_flags ; uint8_t execution_codes ; uint8_t cdb[16U] ; uint32_t data_length ; uint16_t lun ; uint8_t initiator_port_name[8U] ; uint16_t reserved_32[6U] ; uint16_t ox_id ; }; struct __anonstruct_isp24_315 { uint16_t entry_hdr ; uint8_t fcp_cmnd_len_low ; unsigned char fcp_cmnd_len_high : 4 ; unsigned char attr : 4 ; uint32_t exchange_addr ; struct fcp_hdr fcp_hdr ; struct atio7_fcp_cmnd fcp_cmnd ; }; struct __anonstruct_raw_316 { uint8_t entry_type ; uint8_t entry_count ; uint8_t data[58U] ; uint32_t signature ; }; union __anonunion_u_313 { struct __anonstruct_isp2x_314 isp2x ; struct __anonstruct_isp24_315 isp24 ; struct __anonstruct_raw_316 raw ; }; struct atio_from_isp { union __anonunion_u_313 u ; }; struct abts_recv_from_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint8_t reserved_1[6U] ; uint16_t nport_handle ; uint8_t reserved_2[2U] ; uint8_t vp_index ; unsigned char reserved_3 : 4 ; unsigned char sof_type : 4 ; uint32_t exchange_address ; struct fcp_hdr_le fcp_hdr_le ; uint8_t reserved_4[16U] ; uint32_t exchange_addr_to_abort ; }; struct qla_tgt_mgmt_cmd; struct qla_tgt_sess; struct qla_tgt_func_tmpl { int (*handle_cmd)(struct scsi_qla_host * , struct qla_tgt_cmd * , unsigned char * , uint32_t , int , int , int ) ; void (*handle_data)(struct qla_tgt_cmd * ) ; int (*handle_tmr)(struct qla_tgt_mgmt_cmd * , uint32_t , uint8_t , uint32_t ) ; void (*free_cmd)(struct qla_tgt_cmd * ) ; void (*free_mcmd)(struct qla_tgt_mgmt_cmd * ) ; void (*free_session)(struct qla_tgt_sess * ) ; int (*check_initiator_node_acl)(struct scsi_qla_host * , unsigned char * , void * , uint8_t * , uint16_t ) ; void (*update_sess)(struct qla_tgt_sess * , port_id_t , uint16_t , bool ) ; struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host * , uint16_t const ) ; struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host * , uint8_t const * ) ; void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess * ) ; void (*put_sess)(struct qla_tgt_sess * ) ; void (*shutdown_sess)(struct qla_tgt_sess * ) ; }; struct configfs_item_operations; struct configfs_group_operations; struct configfs_attribute; struct configfs_subsystem; struct config_group; struct config_item_type; struct config_item { char *ci_name ; char ci_namebuf[20U] ; struct kref ci_kref ; struct list_head ci_entry ; struct config_item *ci_parent ; struct config_group *ci_group ; struct config_item_type *ci_type ; struct dentry *ci_dentry ; }; struct config_item_type { struct module *ct_owner ; struct configfs_item_operations *ct_item_ops ; struct configfs_group_operations *ct_group_ops ; struct configfs_attribute **ct_attrs ; }; struct config_group { struct config_item cg_item ; struct list_head cg_children ; struct configfs_subsystem *cg_subsys ; struct config_group **default_groups ; }; struct configfs_attribute { char const *ca_name ; struct module *ca_owner ; umode_t ca_mode ; }; struct configfs_item_operations { void (*release)(struct config_item * ) ; ssize_t (*show_attribute)(struct config_item * , struct configfs_attribute * , char * ) ; ssize_t (*store_attribute)(struct config_item * , struct configfs_attribute * , char const * , size_t ) ; int (*allow_link)(struct config_item * , struct config_item * ) ; int (*drop_link)(struct config_item * , struct config_item * ) ; }; struct configfs_group_operations { struct config_item *(*make_item)(struct config_group * , char const * ) ; struct config_group *(*make_group)(struct config_group * , char const * ) ; int (*commit_item)(struct config_item * ) ; void (*disconnect_notify)(struct config_group * , struct config_item * ) ; void (*drop_item)(struct config_group * , struct config_item * ) ; }; struct configfs_subsystem { struct config_group su_group ; struct mutex su_mutex ; }; struct percpu_ida_cpu; struct __anonstruct_ldv_44369_321 { spinlock_t lock ; unsigned int cpu_last_stolen ; wait_queue_head_t wait ; unsigned int nr_free ; unsigned int *freelist ; }; struct percpu_ida { unsigned int nr_tags ; struct percpu_ida_cpu *tag_cpu ; cpumask_t cpus_have_tags ; struct __anonstruct_ldv_44369_321 ldv_44369 ; }; struct hlist_nulls_node; struct hlist_nulls_head { struct hlist_nulls_node *first ; }; struct hlist_nulls_node { struct hlist_nulls_node *next ; struct hlist_nulls_node **pprev ; }; struct pm_qos_request { struct plist_node node ; int pm_qos_class ; struct delayed_work work ; }; struct pm_qos_flags_request { struct list_head node ; s32 flags ; }; enum dev_pm_qos_req_type { DEV_PM_QOS_LATENCY = 1, DEV_PM_QOS_FLAGS = 2 } ; union __anonunion_data_322 { struct plist_node pnode ; struct pm_qos_flags_request flr ; }; struct dev_pm_qos_request { enum dev_pm_qos_req_type type ; union __anonunion_data_322 data ; struct device *dev ; }; enum pm_qos_type { PM_QOS_UNITIALIZED = 0, PM_QOS_MAX = 1, PM_QOS_MIN = 2 } ; struct pm_qos_constraints { struct plist_head list ; s32 target_value ; s32 default_value ; enum pm_qos_type type ; struct blocking_notifier_head *notifiers ; }; struct pm_qos_flags { struct list_head list ; s32 effective_flags ; }; struct dev_pm_qos { struct pm_qos_constraints latency ; struct pm_qos_flags flags ; struct dev_pm_qos_request *latency_req ; struct dev_pm_qos_request *flags_req ; }; struct dql { unsigned int num_queued ; unsigned int adj_limit ; unsigned int last_obj_cnt ; unsigned int limit ; unsigned int num_completed ; unsigned int prev_ovlimit ; unsigned int prev_num_queued ; unsigned int prev_last_obj_cnt ; unsigned int lowest_slack ; unsigned long slack_start_time ; unsigned int max_limit ; unsigned int min_limit ; unsigned int slack_hold_time ; }; struct __anonstruct_sync_serial_settings_323 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; }; typedef struct __anonstruct_sync_serial_settings_323 sync_serial_settings; struct __anonstruct_te1_settings_324 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; unsigned int slot_map ; }; typedef struct __anonstruct_te1_settings_324 te1_settings; struct __anonstruct_raw_hdlc_proto_325 { unsigned short encoding ; unsigned short parity ; }; typedef struct __anonstruct_raw_hdlc_proto_325 raw_hdlc_proto; struct __anonstruct_fr_proto_326 { unsigned int t391 ; unsigned int t392 ; unsigned int n391 ; unsigned int n392 ; unsigned int n393 ; unsigned short lmi ; unsigned short dce ; }; typedef struct __anonstruct_fr_proto_326 fr_proto; struct __anonstruct_fr_proto_pvc_327 { unsigned int dlci ; }; typedef struct __anonstruct_fr_proto_pvc_327 fr_proto_pvc; struct __anonstruct_fr_proto_pvc_info_328 { unsigned int dlci ; char master[16U] ; }; typedef struct __anonstruct_fr_proto_pvc_info_328 fr_proto_pvc_info; struct __anonstruct_cisco_proto_329 { unsigned int interval ; unsigned int timeout ; }; typedef struct __anonstruct_cisco_proto_329 cisco_proto; struct ifmap { unsigned long mem_start ; unsigned long mem_end ; unsigned short base_addr ; unsigned char irq ; unsigned char dma ; unsigned char port ; }; union __anonunion_ifs_ifsu_330 { raw_hdlc_proto *raw_hdlc ; cisco_proto *cisco ; fr_proto *fr ; fr_proto_pvc *fr_pvc ; fr_proto_pvc_info *fr_pvc_info ; sync_serial_settings *sync ; te1_settings *te1 ; }; struct if_settings { unsigned int type ; unsigned int size ; union __anonunion_ifs_ifsu_330 ifs_ifsu ; }; union __anonunion_ifr_ifrn_331 { char ifrn_name[16U] ; }; union __anonunion_ifr_ifru_332 { struct sockaddr ifru_addr ; struct sockaddr ifru_dstaddr ; struct sockaddr ifru_broadaddr ; struct sockaddr ifru_netmask ; struct sockaddr ifru_hwaddr ; short ifru_flags ; int ifru_ivalue ; int ifru_mtu ; struct ifmap ifru_map ; char ifru_slave[16U] ; char ifru_newname[16U] ; void *ifru_data ; struct if_settings ifru_settings ; }; struct ifreq { union __anonunion_ifr_ifrn_331 ifr_ifrn ; union __anonunion_ifr_ifru_332 ifr_ifru ; }; typedef s32 compat_time_t; typedef s32 compat_long_t; typedef u32 compat_uptr_t; struct compat_timespec { compat_time_t tv_sec ; s32 tv_nsec ; }; struct compat_robust_list { compat_uptr_t next ; }; struct compat_robust_list_head { struct compat_robust_list list ; compat_long_t futex_offset ; compat_uptr_t list_op_pending ; }; struct ethhdr { unsigned char h_dest[6U] ; unsigned char h_source[6U] ; __be16 h_proto ; }; struct ethtool_cmd { __u32 cmd ; __u32 supported ; __u32 advertising ; __u16 speed ; __u8 duplex ; __u8 port ; __u8 phy_address ; __u8 transceiver ; __u8 autoneg ; __u8 mdio_support ; __u32 maxtxpkt ; __u32 maxrxpkt ; __u16 speed_hi ; __u8 eth_tp_mdix ; __u8 eth_tp_mdix_ctrl ; __u32 lp_advertising ; __u32 reserved[2U] ; }; struct ethtool_drvinfo { __u32 cmd ; char driver[32U] ; char version[32U] ; char fw_version[32U] ; char bus_info[32U] ; char reserved1[32U] ; char reserved2[12U] ; __u32 n_priv_flags ; __u32 n_stats ; __u32 testinfo_len ; __u32 eedump_len ; __u32 regdump_len ; }; struct ethtool_wolinfo { __u32 cmd ; __u32 supported ; __u32 wolopts ; __u8 sopass[6U] ; }; struct ethtool_regs { __u32 cmd ; __u32 version ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eeprom { __u32 cmd ; __u32 magic ; __u32 offset ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eee { __u32 cmd ; __u32 supported ; __u32 advertised ; __u32 lp_advertised ; __u32 eee_active ; __u32 eee_enabled ; __u32 tx_lpi_enabled ; __u32 tx_lpi_timer ; __u32 reserved[2U] ; }; struct ethtool_modinfo { __u32 cmd ; __u32 type ; __u32 eeprom_len ; __u32 reserved[8U] ; }; struct ethtool_coalesce { __u32 cmd ; __u32 rx_coalesce_usecs ; __u32 rx_max_coalesced_frames ; __u32 rx_coalesce_usecs_irq ; __u32 rx_max_coalesced_frames_irq ; __u32 tx_coalesce_usecs ; __u32 tx_max_coalesced_frames ; __u32 tx_coalesce_usecs_irq ; __u32 tx_max_coalesced_frames_irq ; __u32 stats_block_coalesce_usecs ; __u32 use_adaptive_rx_coalesce ; __u32 use_adaptive_tx_coalesce ; __u32 pkt_rate_low ; __u32 rx_coalesce_usecs_low ; __u32 rx_max_coalesced_frames_low ; __u32 tx_coalesce_usecs_low ; __u32 tx_max_coalesced_frames_low ; __u32 pkt_rate_high ; __u32 rx_coalesce_usecs_high ; __u32 rx_max_coalesced_frames_high ; __u32 tx_coalesce_usecs_high ; __u32 tx_max_coalesced_frames_high ; __u32 rate_sample_interval ; }; struct ethtool_ringparam { __u32 cmd ; __u32 rx_max_pending ; __u32 rx_mini_max_pending ; __u32 rx_jumbo_max_pending ; __u32 tx_max_pending ; __u32 rx_pending ; __u32 rx_mini_pending ; __u32 rx_jumbo_pending ; __u32 tx_pending ; }; struct ethtool_channels { __u32 cmd ; __u32 max_rx ; __u32 max_tx ; __u32 max_other ; __u32 max_combined ; __u32 rx_count ; __u32 tx_count ; __u32 other_count ; __u32 combined_count ; }; struct ethtool_pauseparam { __u32 cmd ; __u32 autoneg ; __u32 rx_pause ; __u32 tx_pause ; }; struct ethtool_test { __u32 cmd ; __u32 flags ; __u32 reserved ; __u32 len ; __u64 data[0U] ; }; struct ethtool_stats { __u32 cmd ; __u32 n_stats ; __u64 data[0U] ; }; struct ethtool_tcpip4_spec { __be32 ip4src ; __be32 ip4dst ; __be16 psrc ; __be16 pdst ; __u8 tos ; }; struct ethtool_ah_espip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 spi ; __u8 tos ; }; struct ethtool_usrip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 l4_4_bytes ; __u8 tos ; __u8 ip_ver ; __u8 proto ; }; union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec ; struct ethtool_tcpip4_spec udp_ip4_spec ; struct ethtool_tcpip4_spec sctp_ip4_spec ; struct ethtool_ah_espip4_spec ah_ip4_spec ; struct ethtool_ah_espip4_spec esp_ip4_spec ; struct ethtool_usrip4_spec usr_ip4_spec ; struct ethhdr ether_spec ; __u8 hdata[52U] ; }; struct ethtool_flow_ext { __u8 padding[2U] ; unsigned char h_dest[6U] ; __be16 vlan_etype ; __be16 vlan_tci ; __be32 data[2U] ; }; struct ethtool_rx_flow_spec { __u32 flow_type ; union ethtool_flow_union h_u ; struct ethtool_flow_ext h_ext ; union ethtool_flow_union m_u ; struct ethtool_flow_ext m_ext ; __u64 ring_cookie ; __u32 location ; }; struct ethtool_rxnfc { __u32 cmd ; __u32 flow_type ; __u64 data ; struct ethtool_rx_flow_spec fs ; __u32 rule_cnt ; __u32 rule_locs[0U] ; }; struct ethtool_flash { __u32 cmd ; __u32 region ; char data[128U] ; }; struct ethtool_dump { __u32 cmd ; __u32 version ; __u32 flag ; __u32 len ; __u8 data[0U] ; }; struct ethtool_ts_info { __u32 cmd ; __u32 so_timestamping ; __s32 phc_index ; __u32 tx_types ; __u32 tx_reserved[3U] ; __u32 rx_filters ; __u32 rx_reserved[3U] ; }; enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; struct ethtool_ops { int (*get_settings)(struct net_device * , struct ethtool_cmd * ) ; int (*set_settings)(struct net_device * , struct ethtool_cmd * ) ; void (*get_drvinfo)(struct net_device * , struct ethtool_drvinfo * ) ; int (*get_regs_len)(struct net_device * ) ; void (*get_regs)(struct net_device * , struct ethtool_regs * , void * ) ; void (*get_wol)(struct net_device * , struct ethtool_wolinfo * ) ; int (*set_wol)(struct net_device * , struct ethtool_wolinfo * ) ; u32 (*get_msglevel)(struct net_device * ) ; void (*set_msglevel)(struct net_device * , u32 ) ; int (*nway_reset)(struct net_device * ) ; u32 (*get_link)(struct net_device * ) ; int (*get_eeprom_len)(struct net_device * ) ; int (*get_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*set_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; int (*set_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; void (*get_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; int (*set_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; void (*get_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; int (*set_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; void (*self_test)(struct net_device * , struct ethtool_test * , u64 * ) ; void (*get_strings)(struct net_device * , u32 , u8 * ) ; int (*set_phys_id)(struct net_device * , enum ethtool_phys_id_state ) ; void (*get_ethtool_stats)(struct net_device * , struct ethtool_stats * , u64 * ) ; int (*begin)(struct net_device * ) ; void (*complete)(struct net_device * ) ; u32 (*get_priv_flags)(struct net_device * ) ; int (*set_priv_flags)(struct net_device * , u32 ) ; int (*get_sset_count)(struct net_device * , int ) ; int (*get_rxnfc)(struct net_device * , struct ethtool_rxnfc * , u32 * ) ; int (*set_rxnfc)(struct net_device * , struct ethtool_rxnfc * ) ; int (*flash_device)(struct net_device * , struct ethtool_flash * ) ; int (*reset)(struct net_device * , u32 * ) ; u32 (*get_rxfh_indir_size)(struct net_device * ) ; int (*get_rxfh_indir)(struct net_device * , u32 * ) ; int (*set_rxfh_indir)(struct net_device * , u32 const * ) ; void (*get_channels)(struct net_device * , struct ethtool_channels * ) ; int (*set_channels)(struct net_device * , struct ethtool_channels * ) ; int (*get_dump_flag)(struct net_device * , struct ethtool_dump * ) ; int (*get_dump_data)(struct net_device * , struct ethtool_dump * , void * ) ; int (*set_dump)(struct net_device * , struct ethtool_dump * ) ; int (*get_ts_info)(struct net_device * , struct ethtool_ts_info * ) ; int (*get_module_info)(struct net_device * , struct ethtool_modinfo * ) ; int (*get_module_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_eee)(struct net_device * , struct ethtool_eee * ) ; int (*set_eee)(struct net_device * , struct ethtool_eee * ) ; }; struct prot_inuse; struct netns_core { struct ctl_table_header *sysctl_hdr ; int sysctl_somaxconn ; struct prot_inuse *inuse ; }; struct u64_stats_sync { }; struct ipstats_mib { u64 mibs[36U] ; struct u64_stats_sync syncp ; }; struct icmp_mib { unsigned long mibs[28U] ; }; struct icmpmsg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6_mib { unsigned long mibs[6U] ; }; struct icmpv6_mib_device { atomic_long_t mibs[6U] ; }; struct icmpv6msg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6msg_mib_device { atomic_long_t mibs[512U] ; }; struct tcp_mib { unsigned long mibs[16U] ; }; struct udp_mib { unsigned long mibs[8U] ; }; struct linux_mib { unsigned long mibs[96U] ; }; struct linux_xfrm_mib { unsigned long mibs[29U] ; }; struct netns_mib { struct tcp_mib *tcp_statistics[1U] ; struct ipstats_mib *ip_statistics[1U] ; struct linux_mib *net_statistics[1U] ; struct udp_mib *udp_statistics[1U] ; struct udp_mib *udplite_statistics[1U] ; struct icmp_mib *icmp_statistics[1U] ; struct icmpmsg_mib *icmpmsg_statistics ; struct proc_dir_entry *proc_net_devsnmp6 ; struct udp_mib *udp_stats_in6[1U] ; struct udp_mib *udplite_stats_in6[1U] ; struct ipstats_mib *ipv6_statistics[1U] ; struct icmpv6_mib *icmpv6_statistics[1U] ; struct icmpv6msg_mib *icmpv6msg_statistics ; struct linux_xfrm_mib *xfrm_statistics[1U] ; }; struct netns_unix { int sysctl_max_dgram_qlen ; struct ctl_table_header *ctl ; }; struct netns_packet { struct mutex sklist_lock ; struct hlist_head sklist ; }; struct netns_frags { int nqueues ; struct list_head lru_list ; spinlock_t lru_lock ; struct percpu_counter mem ; int timeout ; int high_thresh ; int low_thresh ; }; struct tcpm_hash_bucket; struct ipv4_devconf; struct fib_rules_ops; struct fib_table; struct inet_peer_base; struct xt_table; struct netns_ipv4 { struct ctl_table_header *forw_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *ipv4_hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *xfrm4_hdr ; struct ipv4_devconf *devconf_all ; struct ipv4_devconf *devconf_dflt ; struct fib_rules_ops *rules_ops ; bool fib_has_custom_rules ; struct fib_table *fib_local ; struct fib_table *fib_main ; struct fib_table *fib_default ; int fib_num_tclassid_users ; struct hlist_head *fib_table_hash ; struct sock *fibnl ; struct sock **icmp_sk ; struct inet_peer_base *peers ; struct tcpm_hash_bucket *tcp_metrics_hash ; unsigned int tcp_metrics_hash_log ; struct netns_frags frags ; struct xt_table *iptable_filter ; struct xt_table *iptable_mangle ; struct xt_table *iptable_raw ; struct xt_table *arptable_filter ; struct xt_table *iptable_security ; struct xt_table *nat_table ; int sysctl_icmp_echo_ignore_all ; int sysctl_icmp_echo_ignore_broadcasts ; int sysctl_icmp_ignore_bogus_error_responses ; int sysctl_icmp_ratelimit ; int sysctl_icmp_ratemask ; int sysctl_icmp_errors_use_inbound_ifaddr ; int sysctl_tcp_ecn ; kgid_t sysctl_ping_group_range[2U] ; long sysctl_tcp_mem[3U] ; atomic_t dev_addr_genid ; struct list_head mr_tables ; struct fib_rules_ops *mr_rules_ops ; atomic_t rt_genid ; }; struct neighbour; struct dst_ops { unsigned short family ; __be16 protocol ; unsigned int gc_thresh ; int (*gc)(struct dst_ops * ) ; struct dst_entry *(*check)(struct dst_entry * , __u32 ) ; unsigned int (*default_advmss)(struct dst_entry const * ) ; unsigned int (*mtu)(struct dst_entry const * ) ; u32 *(*cow_metrics)(struct dst_entry * , unsigned long ) ; void (*destroy)(struct dst_entry * ) ; void (*ifdown)(struct dst_entry * , struct net_device * , int ) ; struct dst_entry *(*negative_advice)(struct dst_entry * ) ; void (*link_failure)(struct sk_buff * ) ; void (*update_pmtu)(struct dst_entry * , struct sock * , struct sk_buff * , u32 ) ; void (*redirect)(struct dst_entry * , struct sock * , struct sk_buff * ) ; int (*local_out)(struct sk_buff * ) ; struct neighbour *(*neigh_lookup)(struct dst_entry const * , struct sk_buff * , void const * ) ; struct kmem_cache *kmem_cachep ; struct percpu_counter pcpuc_entries ; }; struct netns_sysctl_ipv6 { struct ctl_table_header *hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *icmp_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *xfrm6_hdr ; int bindv6only ; int flush_delay ; int ip6_rt_max_size ; int ip6_rt_gc_min_interval ; int ip6_rt_gc_timeout ; int ip6_rt_gc_interval ; int ip6_rt_gc_elasticity ; int ip6_rt_mtu_expires ; int ip6_rt_min_advmss ; int icmpv6_time ; }; struct ipv6_devconf; struct rt6_info; struct rt6_statistics; struct fib6_table; struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl ; struct ipv6_devconf *devconf_all ; struct ipv6_devconf *devconf_dflt ; struct inet_peer_base *peers ; struct netns_frags frags ; struct xt_table *ip6table_filter ; struct xt_table *ip6table_mangle ; struct xt_table *ip6table_raw ; struct xt_table *ip6table_security ; struct xt_table *ip6table_nat ; struct rt6_info *ip6_null_entry ; struct rt6_statistics *rt6_stats ; struct timer_list ip6_fib_timer ; struct hlist_head *fib_table_hash ; struct fib6_table *fib6_main_tbl ; struct dst_ops ip6_dst_ops ; unsigned int ip6_rt_gc_expire ; unsigned long ip6_rt_last_gc ; struct rt6_info *ip6_prohibit_entry ; struct rt6_info *ip6_blk_hole_entry ; struct fib6_table *fib6_local_tbl ; struct fib_rules_ops *fib6_rules_ops ; struct sock **icmp_sk ; struct sock *ndisc_sk ; struct sock *tcp_sk ; struct sock *igmp_sk ; struct list_head mr6_tables ; struct fib_rules_ops *mr6_rules_ops ; atomic_t dev_addr_genid ; atomic_t rt_genid ; }; struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl ; struct netns_frags frags ; }; struct sctp_mib; struct netns_sctp { struct sctp_mib *sctp_statistics[1U] ; struct proc_dir_entry *proc_net_sctp ; struct ctl_table_header *sysctl_header ; struct sock *ctl_sock ; struct list_head local_addr_list ; struct list_head addr_waitq ; struct timer_list addr_wq_timer ; struct list_head auto_asconf_splist ; spinlock_t addr_wq_lock ; spinlock_t local_addr_lock ; unsigned int rto_initial ; unsigned int rto_min ; unsigned int rto_max ; int rto_alpha ; int rto_beta ; int max_burst ; int cookie_preserve_enable ; char *sctp_hmac_alg ; unsigned int valid_cookie_life ; unsigned int sack_timeout ; unsigned int hb_interval ; int max_retrans_association ; int max_retrans_path ; int max_retrans_init ; int pf_retrans ; int sndbuf_policy ; int rcvbuf_policy ; int default_auto_asconf ; int addip_enable ; int addip_noauth ; int prsctp_enable ; int auth_enable ; int scope_policy ; int rwnd_upd_shift ; unsigned long max_autoclose ; }; struct netns_dccp { struct sock *v4_ctl_sk ; struct sock *v6_ctl_sk ; }; union __anonunion_in6_u_350 { __u8 u6_addr8[16U] ; __be16 u6_addr16[8U] ; __be32 u6_addr32[4U] ; }; struct in6_addr { union __anonunion_in6_u_350 in6_u ; }; struct nf_logger; struct netns_nf { struct proc_dir_entry *proc_netfilter ; struct nf_logger const *nf_loggers[13U] ; struct ctl_table_header *nf_log_dir_header ; }; struct ebt_table; struct netns_xt { struct list_head tables[13U] ; bool notrack_deprecated_warning ; struct ebt_table *broute_table ; struct ebt_table *frame_filter ; struct ebt_table *frame_nat ; bool ulog_warn_deprecated ; bool ebt_ulog_warn_deprecated ; }; struct nf_proto_net { struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; struct ctl_table_header *ctl_compat_header ; struct ctl_table *ctl_compat_table ; unsigned int users ; }; struct nf_generic_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_tcp_net { struct nf_proto_net pn ; unsigned int timeouts[14U] ; unsigned int tcp_loose ; unsigned int tcp_be_liberal ; unsigned int tcp_max_retrans ; }; struct nf_udp_net { struct nf_proto_net pn ; unsigned int timeouts[2U] ; }; struct nf_icmp_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_ip_net { struct nf_generic_net generic ; struct nf_tcp_net tcp ; struct nf_udp_net udp ; struct nf_icmp_net icmp ; struct nf_icmp_net icmpv6 ; struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; }; struct ip_conntrack_stat; struct nf_ct_event_notifier; struct nf_exp_event_notifier; struct netns_ct { atomic_t count ; unsigned int expect_count ; unsigned int htable_size ; struct kmem_cache *nf_conntrack_cachep ; struct hlist_nulls_head *hash ; struct hlist_head *expect_hash ; struct hlist_nulls_head unconfirmed ; struct hlist_nulls_head dying ; struct hlist_nulls_head tmpl ; struct ip_conntrack_stat *stat ; struct nf_ct_event_notifier *nf_conntrack_event_cb ; struct nf_exp_event_notifier *nf_expect_event_cb ; int sysctl_events ; unsigned int sysctl_events_retry_timeout ; int sysctl_acct ; int sysctl_tstamp ; int sysctl_checksum ; unsigned int sysctl_log_invalid ; int sysctl_auto_assign_helper ; bool auto_assign_helper_warned ; struct nf_ip_net nf_ct_proto ; unsigned int labels_used ; u8 label_words ; struct hlist_head *nat_bysource ; unsigned int nat_htable_size ; struct ctl_table_header *sysctl_header ; struct ctl_table_header *acct_sysctl_header ; struct ctl_table_header *tstamp_sysctl_header ; struct ctl_table_header *event_sysctl_header ; struct ctl_table_header *helper_sysctl_header ; char *slabname ; }; struct xfrm_policy_hash { struct hlist_head *table ; unsigned int hmask ; }; struct netns_xfrm { struct list_head state_all ; struct hlist_head *state_bydst ; struct hlist_head *state_bysrc ; struct hlist_head *state_byspi ; unsigned int state_hmask ; unsigned int state_num ; struct work_struct state_hash_work ; struct hlist_head state_gc_list ; struct work_struct state_gc_work ; wait_queue_head_t km_waitq ; struct list_head policy_all ; struct hlist_head *policy_byidx ; unsigned int policy_idx_hmask ; struct hlist_head policy_inexact[6U] ; struct xfrm_policy_hash policy_bydst[6U] ; unsigned int policy_count[6U] ; struct work_struct policy_hash_work ; struct sock *nlsk ; struct sock *nlsk_stash ; u32 sysctl_aevent_etime ; u32 sysctl_aevent_rseqth ; int sysctl_larval_drop ; u32 sysctl_acq_expires ; struct ctl_table_header *sysctl_hdr ; struct dst_ops xfrm4_dst_ops ; struct dst_ops xfrm6_dst_ops ; }; struct net_generic; struct netns_ipvs; struct net { atomic_t passive ; atomic_t count ; spinlock_t rules_mod_lock ; struct list_head list ; struct list_head cleanup_list ; struct list_head exit_list ; struct user_namespace *user_ns ; unsigned int proc_inum ; struct proc_dir_entry *proc_net ; struct proc_dir_entry *proc_net_stat ; struct ctl_table_set sysctls ; struct sock *rtnl ; struct sock *genl_sock ; struct list_head dev_base_head ; struct hlist_head *dev_name_head ; struct hlist_head *dev_index_head ; unsigned int dev_base_seq ; int ifindex ; struct list_head rules_ops ; struct net_device *loopback_dev ; struct netns_core core ; struct netns_mib mib ; struct netns_packet packet ; struct netns_unix unx ; struct netns_ipv4 ipv4 ; struct netns_ipv6 ipv6 ; struct netns_sctp sctp ; struct netns_dccp dccp ; struct netns_nf nf ; struct netns_xt xt ; struct netns_ct ct ; struct netns_nf_frag nf_frag ; struct sock *nfnl ; struct sock *nfnl_stash ; struct sk_buff_head wext_nlevents ; struct net_generic *gen ; struct netns_xfrm xfrm ; struct netns_ipvs *ipvs ; struct sock *diag_nlsk ; atomic_t fnhe_genid ; }; struct dsa_chip_data { struct device *mii_bus ; int sw_addr ; char *port_names[12U] ; s8 *rtable ; }; struct dsa_platform_data { struct device *netdev ; int nr_chips ; struct dsa_chip_data *chip ; }; struct dsa_switch; struct dsa_switch_tree { struct dsa_platform_data *pd ; struct net_device *master_netdev ; __be16 tag_protocol ; s8 cpu_switch ; s8 cpu_port ; int link_poll_needed ; struct work_struct link_poll_work ; struct timer_list link_poll_timer ; struct dsa_switch *ds[4U] ; }; struct dsa_switch_driver; struct mii_bus; struct dsa_switch { struct dsa_switch_tree *dst ; int index ; struct dsa_chip_data *pd ; struct dsa_switch_driver *drv ; struct mii_bus *master_mii_bus ; u32 dsa_port_mask ; u32 phys_port_mask ; struct mii_bus *slave_mii_bus ; struct net_device *ports[12U] ; }; struct dsa_switch_driver { struct list_head list ; __be16 tag_protocol ; int priv_size ; char *(*probe)(struct mii_bus * , int ) ; int (*setup)(struct dsa_switch * ) ; int (*set_addr)(struct dsa_switch * , u8 * ) ; int (*phy_read)(struct dsa_switch * , int , int ) ; int (*phy_write)(struct dsa_switch * , int , int , u16 ) ; void (*poll_link)(struct dsa_switch * ) ; void (*get_strings)(struct dsa_switch * , int , uint8_t * ) ; void (*get_ethtool_stats)(struct dsa_switch * , int , uint64_t * ) ; int (*get_sset_count)(struct dsa_switch * ) ; }; struct ieee_ets { __u8 willing ; __u8 ets_cap ; __u8 cbs ; __u8 tc_tx_bw[8U] ; __u8 tc_rx_bw[8U] ; __u8 tc_tsa[8U] ; __u8 prio_tc[8U] ; __u8 tc_reco_bw[8U] ; __u8 tc_reco_tsa[8U] ; __u8 reco_prio_tc[8U] ; }; struct ieee_maxrate { __u64 tc_maxrate[8U] ; }; struct ieee_pfc { __u8 pfc_cap ; __u8 pfc_en ; __u8 mbc ; __u16 delay ; __u64 requests[8U] ; __u64 indications[8U] ; }; struct cee_pg { __u8 willing ; __u8 error ; __u8 pg_en ; __u8 tcs_supported ; __u8 pg_bw[8U] ; __u8 prio_pg[8U] ; }; struct cee_pfc { __u8 willing ; __u8 error ; __u8 pfc_en ; __u8 tcs_supported ; }; struct dcb_app { __u8 selector ; __u8 priority ; __u16 protocol ; }; struct dcb_peer_app_info { __u8 willing ; __u8 error ; }; struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_setets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_getmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_setmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_getpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_setpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_getapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_setapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_delapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_peer_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_peer_getpfc)(struct net_device * , struct ieee_pfc * ) ; u8 (*getstate)(struct net_device * ) ; u8 (*setstate)(struct net_device * , u8 ) ; void (*getpermhwaddr)(struct net_device * , u8 * ) ; void (*setpgtccfgtx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgtx)(struct net_device * , int , u8 ) ; void (*setpgtccfgrx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgrx)(struct net_device * , int , u8 ) ; void (*getpgtccfgtx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgtx)(struct net_device * , int , u8 * ) ; void (*getpgtccfgrx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgrx)(struct net_device * , int , u8 * ) ; void (*setpfccfg)(struct net_device * , int , u8 ) ; void (*getpfccfg)(struct net_device * , int , u8 * ) ; u8 (*setall)(struct net_device * ) ; u8 (*getcap)(struct net_device * , int , u8 * ) ; int (*getnumtcs)(struct net_device * , int , u8 * ) ; int (*setnumtcs)(struct net_device * , int , u8 ) ; u8 (*getpfcstate)(struct net_device * ) ; void (*setpfcstate)(struct net_device * , u8 ) ; void (*getbcncfg)(struct net_device * , int , u32 * ) ; void (*setbcncfg)(struct net_device * , int , u32 ) ; void (*getbcnrp)(struct net_device * , int , u8 * ) ; void (*setbcnrp)(struct net_device * , int , u8 ) ; u8 (*setapp)(struct net_device * , u8 , u16 , u8 ) ; u8 (*getapp)(struct net_device * , u8 , u16 ) ; u8 (*getfeatcfg)(struct net_device * , int , u8 * ) ; u8 (*setfeatcfg)(struct net_device * , int , u8 ) ; u8 (*getdcbx)(struct net_device * ) ; u8 (*setdcbx)(struct net_device * , u8 ) ; int (*peer_getappinfo)(struct net_device * , struct dcb_peer_app_info * , u16 * ) ; int (*peer_getapptable)(struct net_device * , struct dcb_app * ) ; int (*cee_peer_getpg)(struct net_device * , struct cee_pg * ) ; int (*cee_peer_getpfc)(struct net_device * , struct cee_pfc * ) ; }; struct taskstats { __u16 version ; __u32 ac_exitcode ; __u8 ac_flag ; __u8 ac_nice ; __u64 cpu_count ; __u64 cpu_delay_total ; __u64 blkio_count ; __u64 blkio_delay_total ; __u64 swapin_count ; __u64 swapin_delay_total ; __u64 cpu_run_real_total ; __u64 cpu_run_virtual_total ; char ac_comm[32U] ; __u8 ac_sched ; __u8 ac_pad[3U] ; __u32 ac_uid ; __u32 ac_gid ; __u32 ac_pid ; __u32 ac_ppid ; __u32 ac_btime ; __u64 ac_etime ; __u64 ac_utime ; __u64 ac_stime ; __u64 ac_minflt ; __u64 ac_majflt ; __u64 coremem ; __u64 virtmem ; __u64 hiwater_rss ; __u64 hiwater_vm ; __u64 read_char ; __u64 write_char ; __u64 read_syscalls ; __u64 write_syscalls ; __u64 read_bytes ; __u64 write_bytes ; __u64 cancelled_write_bytes ; __u64 nvcsw ; __u64 nivcsw ; __u64 ac_utimescaled ; __u64 ac_stimescaled ; __u64 cpu_scaled_run_real_total ; __u64 freepages_count ; __u64 freepages_delay_total ; }; struct idr_layer { int prefix ; unsigned long bitmap[4U] ; struct idr_layer *ary[256U] ; int count ; int layer ; struct callback_head callback_head ; }; struct idr { struct idr_layer *hint ; struct idr_layer *top ; struct idr_layer *id_free ; int layers ; int id_free_cnt ; int cur ; spinlock_t lock ; }; struct xattr_handler { char const *prefix ; int flags ; size_t (*list)(struct dentry * , char * , size_t , char const * , size_t , int ) ; int (*get)(struct dentry * , char const * , void * , size_t , int ) ; int (*set)(struct dentry * , char const * , void const * , size_t , int , int ) ; }; struct simple_xattrs { struct list_head head ; spinlock_t lock ; }; struct percpu_ref; typedef void percpu_ref_func_t(struct percpu_ref * ); struct percpu_ref { atomic_t count ; unsigned int *pcpu_count ; percpu_ref_func_t *release ; percpu_ref_func_t *confirm_kill ; struct callback_head rcu ; }; struct cgroupfs_root; struct cgroup_subsys; struct cgroup; struct css_id; struct eventfd_ctx; struct cgroup_subsys_state { struct cgroup *cgroup ; struct cgroup_subsys *ss ; struct percpu_ref refcnt ; struct cgroup_subsys_state *parent ; unsigned long flags ; struct css_id *id ; struct callback_head callback_head ; struct work_struct destroy_work ; }; struct cgroup_name { struct callback_head callback_head ; char name[] ; }; struct cgroup { unsigned long flags ; int id ; int nr_css ; struct list_head sibling ; struct list_head children ; struct list_head files ; struct cgroup *parent ; struct dentry *dentry ; u64 serial_nr ; struct cgroup_name *name ; struct cgroup_subsys_state *subsys[12U] ; struct cgroupfs_root *root ; struct list_head cset_links ; struct list_head release_list ; struct list_head pidlists ; struct mutex pidlist_mutex ; struct cgroup_subsys_state dummy_css ; struct callback_head callback_head ; struct work_struct destroy_work ; struct list_head event_list ; spinlock_t event_list_lock ; struct simple_xattrs xattrs ; }; struct cgroupfs_root { struct super_block *sb ; unsigned long subsys_mask ; int hierarchy_id ; struct list_head subsys_list ; struct cgroup top_cgroup ; int number_of_cgroups ; struct list_head root_list ; unsigned long flags ; struct idr cgroup_idr ; char release_agent_path[4096U] ; char name[64U] ; }; struct css_set { atomic_t refcount ; struct hlist_node hlist ; struct list_head tasks ; struct list_head cgrp_links ; struct cgroup_subsys_state *subsys[12U] ; struct callback_head callback_head ; }; struct cgroup_map_cb { int (*fill)(struct cgroup_map_cb * , char const * , u64 ) ; void *state ; }; struct cftype { char name[64U] ; int private ; umode_t mode ; size_t max_write_len ; unsigned int flags ; struct cgroup_subsys *ss ; int (*open)(struct inode * , struct file * ) ; ssize_t (*read)(struct cgroup_subsys_state * , struct cftype * , struct file * , char * , size_t , loff_t * ) ; u64 (*read_u64)(struct cgroup_subsys_state * , struct cftype * ) ; s64 (*read_s64)(struct cgroup_subsys_state * , struct cftype * ) ; int (*read_map)(struct cgroup_subsys_state * , struct cftype * , struct cgroup_map_cb * ) ; int (*read_seq_string)(struct cgroup_subsys_state * , struct cftype * , struct seq_file * ) ; ssize_t (*write)(struct cgroup_subsys_state * , struct cftype * , struct file * , char const * , size_t , loff_t * ) ; int (*write_u64)(struct cgroup_subsys_state * , struct cftype * , u64 ) ; int (*write_s64)(struct cgroup_subsys_state * , struct cftype * , s64 ) ; int (*write_string)(struct cgroup_subsys_state * , struct cftype * , char const * ) ; int (*trigger)(struct cgroup_subsys_state * , unsigned int ) ; int (*release)(struct inode * , struct file * ) ; int (*register_event)(struct cgroup_subsys_state * , struct cftype * , struct eventfd_ctx * , char const * ) ; void (*unregister_event)(struct cgroup_subsys_state * , struct cftype * , struct eventfd_ctx * ) ; }; struct cftype_set { struct list_head node ; struct cftype *cfts ; }; struct cgroup_taskset; struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state * ) ; int (*css_online)(struct cgroup_subsys_state * ) ; void (*css_offline)(struct cgroup_subsys_state * ) ; void (*css_free)(struct cgroup_subsys_state * ) ; int (*can_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*cancel_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*fork)(struct task_struct * ) ; void (*exit)(struct cgroup_subsys_state * , struct cgroup_subsys_state * , struct task_struct * ) ; void (*bind)(struct cgroup_subsys_state * ) ; int subsys_id ; int disabled ; int early_init ; bool use_id ; bool broken_hierarchy ; bool warned_broken_hierarchy ; char const *name ; struct cgroupfs_root *root ; struct list_head sibling ; struct idr idr ; spinlock_t id_lock ; struct list_head cftsets ; struct cftype *base_cftypes ; struct cftype_set base_cftset ; struct module *module ; }; struct netprio_map { struct callback_head rcu ; u32 priomap_len ; u32 priomap[] ; }; struct ndmsg { __u8 ndm_family ; __u8 ndm_pad1 ; __u16 ndm_pad2 ; __s32 ndm_ifindex ; __u16 ndm_state ; __u8 ndm_flags ; __u8 ndm_type ; }; struct rtnl_link_stats64 { __u64 rx_packets ; __u64 tx_packets ; __u64 rx_bytes ; __u64 tx_bytes ; __u64 rx_errors ; __u64 tx_errors ; __u64 rx_dropped ; __u64 tx_dropped ; __u64 multicast ; __u64 collisions ; __u64 rx_length_errors ; __u64 rx_over_errors ; __u64 rx_crc_errors ; __u64 rx_frame_errors ; __u64 rx_fifo_errors ; __u64 rx_missed_errors ; __u64 tx_aborted_errors ; __u64 tx_carrier_errors ; __u64 tx_fifo_errors ; __u64 tx_heartbeat_errors ; __u64 tx_window_errors ; __u64 rx_compressed ; __u64 tx_compressed ; }; struct ifla_vf_info { __u32 vf ; __u8 mac[32U] ; __u32 vlan ; __u32 qos ; __u32 tx_rate ; __u32 spoofchk ; __u32 linkstate ; }; struct netpoll_info; struct phy_device; struct wireless_dev; enum netdev_tx { __NETDEV_TX_MIN = (-0x7FFFFFFF-1), NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, NETDEV_TX_LOCKED = 32 } ; typedef enum netdev_tx netdev_tx_t; struct net_device_stats { unsigned long rx_packets ; unsigned long tx_packets ; unsigned long rx_bytes ; unsigned long tx_bytes ; unsigned long rx_errors ; unsigned long tx_errors ; unsigned long rx_dropped ; unsigned long tx_dropped ; unsigned long multicast ; unsigned long collisions ; unsigned long rx_length_errors ; unsigned long rx_over_errors ; unsigned long rx_crc_errors ; unsigned long rx_frame_errors ; unsigned long rx_fifo_errors ; unsigned long rx_missed_errors ; unsigned long tx_aborted_errors ; unsigned long tx_carrier_errors ; unsigned long tx_fifo_errors ; unsigned long tx_heartbeat_errors ; unsigned long tx_window_errors ; unsigned long rx_compressed ; unsigned long tx_compressed ; }; struct neigh_parms; struct netdev_hw_addr_list { struct list_head list ; int count ; }; struct hh_cache { u16 hh_len ; u16 __pad ; seqlock_t hh_lock ; unsigned long hh_data[16U] ; }; struct header_ops { int (*create)(struct sk_buff * , struct net_device * , unsigned short , void const * , void const * , unsigned int ) ; int (*parse)(struct sk_buff const * , unsigned char * ) ; int (*rebuild)(struct sk_buff * ) ; int (*cache)(struct neighbour const * , struct hh_cache * , __be16 ) ; void (*cache_update)(struct hh_cache * , struct net_device const * , unsigned char const * ) ; }; struct napi_struct { struct list_head poll_list ; unsigned long state ; int weight ; unsigned int gro_count ; int (*poll)(struct napi_struct * , int ) ; spinlock_t poll_lock ; int poll_owner ; struct net_device *dev ; struct sk_buff *gro_list ; struct sk_buff *skb ; struct list_head dev_list ; struct hlist_node napi_hash_node ; unsigned int napi_id ; }; enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff ** ); struct Qdisc; struct netdev_queue { struct net_device *dev ; struct Qdisc *qdisc ; struct Qdisc *qdisc_sleeping ; struct kobject kobj ; int numa_node ; spinlock_t _xmit_lock ; int xmit_lock_owner ; unsigned long trans_start ; unsigned long trans_timeout ; unsigned long state ; struct dql dql ; }; struct rps_map { unsigned int len ; struct callback_head rcu ; u16 cpus[0U] ; }; struct rps_dev_flow { u16 cpu ; u16 filter ; unsigned int last_qtail ; }; struct rps_dev_flow_table { unsigned int mask ; struct callback_head rcu ; struct rps_dev_flow flows[0U] ; }; struct netdev_rx_queue { struct rps_map *rps_map ; struct rps_dev_flow_table *rps_flow_table ; struct kobject kobj ; struct net_device *dev ; }; struct xps_map { unsigned int len ; unsigned int alloc_len ; struct callback_head rcu ; u16 queues[0U] ; }; struct xps_dev_maps { struct callback_head rcu ; struct xps_map *cpu_map[0U] ; }; struct netdev_tc_txq { u16 count ; u16 offset ; }; struct netdev_fcoe_hbainfo { char manufacturer[64U] ; char serial_number[64U] ; char hardware_version[64U] ; char driver_version[64U] ; char optionrom_version[64U] ; char firmware_version[64U] ; char model[256U] ; char model_description[256U] ; }; struct netdev_phys_port_id { unsigned char id[32U] ; unsigned char id_len ; }; struct net_device_ops { int (*ndo_init)(struct net_device * ) ; void (*ndo_uninit)(struct net_device * ) ; int (*ndo_open)(struct net_device * ) ; int (*ndo_stop)(struct net_device * ) ; netdev_tx_t (*ndo_start_xmit)(struct sk_buff * , struct net_device * ) ; u16 (*ndo_select_queue)(struct net_device * , struct sk_buff * ) ; void (*ndo_change_rx_flags)(struct net_device * , int ) ; void (*ndo_set_rx_mode)(struct net_device * ) ; int (*ndo_set_mac_address)(struct net_device * , void * ) ; int (*ndo_validate_addr)(struct net_device * ) ; int (*ndo_do_ioctl)(struct net_device * , struct ifreq * , int ) ; int (*ndo_set_config)(struct net_device * , struct ifmap * ) ; int (*ndo_change_mtu)(struct net_device * , int ) ; int (*ndo_neigh_setup)(struct net_device * , struct neigh_parms * ) ; void (*ndo_tx_timeout)(struct net_device * ) ; struct rtnl_link_stats64 *(*ndo_get_stats64)(struct net_device * , struct rtnl_link_stats64 * ) ; struct net_device_stats *(*ndo_get_stats)(struct net_device * ) ; int (*ndo_vlan_rx_add_vid)(struct net_device * , __be16 , u16 ) ; int (*ndo_vlan_rx_kill_vid)(struct net_device * , __be16 , u16 ) ; void (*ndo_poll_controller)(struct net_device * ) ; int (*ndo_netpoll_setup)(struct net_device * , struct netpoll_info * , gfp_t ) ; void (*ndo_netpoll_cleanup)(struct net_device * ) ; int (*ndo_busy_poll)(struct napi_struct * ) ; int (*ndo_set_vf_mac)(struct net_device * , int , u8 * ) ; int (*ndo_set_vf_vlan)(struct net_device * , int , u16 , u8 ) ; int (*ndo_set_vf_tx_rate)(struct net_device * , int , int ) ; int (*ndo_set_vf_spoofchk)(struct net_device * , int , bool ) ; int (*ndo_get_vf_config)(struct net_device * , int , struct ifla_vf_info * ) ; int (*ndo_set_vf_link_state)(struct net_device * , int , int ) ; int (*ndo_set_vf_port)(struct net_device * , int , struct nlattr ** ) ; int (*ndo_get_vf_port)(struct net_device * , int , struct sk_buff * ) ; int (*ndo_setup_tc)(struct net_device * , u8 ) ; int (*ndo_fcoe_enable)(struct net_device * ) ; int (*ndo_fcoe_disable)(struct net_device * ) ; int (*ndo_fcoe_ddp_setup)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_ddp_done)(struct net_device * , u16 ) ; int (*ndo_fcoe_ddp_target)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_get_hbainfo)(struct net_device * , struct netdev_fcoe_hbainfo * ) ; int (*ndo_fcoe_get_wwn)(struct net_device * , u64 * , int ) ; int (*ndo_rx_flow_steer)(struct net_device * , struct sk_buff const * , u16 , u32 ) ; int (*ndo_add_slave)(struct net_device * , struct net_device * ) ; int (*ndo_del_slave)(struct net_device * , struct net_device * ) ; netdev_features_t (*ndo_fix_features)(struct net_device * , netdev_features_t ) ; int (*ndo_set_features)(struct net_device * , netdev_features_t ) ; int (*ndo_neigh_construct)(struct neighbour * ) ; void (*ndo_neigh_destroy)(struct neighbour * ) ; int (*ndo_fdb_add)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 ) ; int (*ndo_fdb_del)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * ) ; int (*ndo_fdb_dump)(struct sk_buff * , struct netlink_callback * , struct net_device * , int ) ; int (*ndo_bridge_setlink)(struct net_device * , struct nlmsghdr * ) ; int (*ndo_bridge_getlink)(struct sk_buff * , u32 , u32 , struct net_device * , u32 ) ; int (*ndo_bridge_dellink)(struct net_device * , struct nlmsghdr * ) ; int (*ndo_change_carrier)(struct net_device * , bool ) ; int (*ndo_get_phys_port_id)(struct net_device * , struct netdev_phys_port_id * ) ; void (*ndo_add_vxlan_port)(struct net_device * , sa_family_t , __u16 ) ; void (*ndo_del_vxlan_port)(struct net_device * , sa_family_t , __u16 ) ; }; struct iw_handler_def; struct iw_public_data; struct vlan_info; struct in_device; struct dn_dev; struct inet6_dev; struct cpu_rmap; struct pcpu_lstats; struct pcpu_tstats; struct pcpu_dstats; struct pcpu_vstats; union __anonunion_ldv_50454_359 { void *ml_priv ; struct pcpu_lstats *lstats ; struct pcpu_tstats *tstats ; struct pcpu_dstats *dstats ; struct pcpu_vstats *vstats ; }; struct garp_port; struct mrp_port; struct rtnl_link_ops; struct net_device { char name[16U] ; struct hlist_node name_hlist ; char *ifalias ; unsigned long mem_end ; unsigned long mem_start ; unsigned long base_addr ; unsigned int irq ; unsigned long state ; struct list_head dev_list ; struct list_head napi_list ; struct list_head unreg_list ; struct list_head upper_dev_list ; struct list_head lower_dev_list ; netdev_features_t features ; netdev_features_t hw_features ; netdev_features_t wanted_features ; netdev_features_t vlan_features ; netdev_features_t hw_enc_features ; netdev_features_t mpls_features ; int ifindex ; int iflink ; struct net_device_stats stats ; atomic_long_t rx_dropped ; struct iw_handler_def const *wireless_handlers ; struct iw_public_data *wireless_data ; struct net_device_ops const *netdev_ops ; struct ethtool_ops const *ethtool_ops ; struct header_ops const *header_ops ; unsigned int flags ; unsigned int priv_flags ; unsigned short gflags ; unsigned short padded ; unsigned char operstate ; unsigned char link_mode ; unsigned char if_port ; unsigned char dma ; unsigned int mtu ; unsigned short type ; unsigned short hard_header_len ; unsigned short needed_headroom ; unsigned short needed_tailroom ; unsigned char perm_addr[32U] ; unsigned char addr_assign_type ; unsigned char addr_len ; unsigned char neigh_priv_len ; unsigned short dev_id ; spinlock_t addr_list_lock ; struct netdev_hw_addr_list uc ; struct netdev_hw_addr_list mc ; struct netdev_hw_addr_list dev_addrs ; struct kset *queues_kset ; bool uc_promisc ; unsigned int promiscuity ; unsigned int allmulti ; struct vlan_info *vlan_info ; struct dsa_switch_tree *dsa_ptr ; void *atalk_ptr ; struct in_device *ip_ptr ; struct dn_dev *dn_ptr ; struct inet6_dev *ip6_ptr ; void *ax25_ptr ; struct wireless_dev *ieee80211_ptr ; unsigned long last_rx ; unsigned char *dev_addr ; struct netdev_rx_queue *_rx ; unsigned int num_rx_queues ; unsigned int real_num_rx_queues ; rx_handler_func_t *rx_handler ; void *rx_handler_data ; struct netdev_queue *ingress_queue ; unsigned char broadcast[32U] ; struct netdev_queue *_tx ; unsigned int num_tx_queues ; unsigned int real_num_tx_queues ; struct Qdisc *qdisc ; unsigned long tx_queue_len ; spinlock_t tx_global_lock ; struct xps_dev_maps *xps_maps ; struct cpu_rmap *rx_cpu_rmap ; unsigned long trans_start ; int watchdog_timeo ; struct timer_list watchdog_timer ; int *pcpu_refcnt ; struct list_head todo_list ; struct hlist_node index_hlist ; struct list_head link_watch_list ; unsigned char reg_state ; bool dismantle ; unsigned short rtnl_link_state ; void (*destructor)(struct net_device * ) ; struct netpoll_info *npinfo ; struct net *nd_net ; union __anonunion_ldv_50454_359 ldv_50454 ; struct garp_port *garp_port ; struct mrp_port *mrp_port ; struct device dev ; struct attribute_group const *sysfs_groups[4U] ; struct rtnl_link_ops const *rtnl_link_ops ; unsigned int gso_max_size ; u16 gso_max_segs ; struct dcbnl_rtnl_ops const *dcbnl_ops ; u8 num_tc ; struct netdev_tc_txq tc_to_txq[16U] ; u8 prio_tc_map[16U] ; unsigned int fcoe_ddp_xid ; struct netprio_map *priomap ; struct phy_device *phydev ; struct lock_class_key *qdisc_tx_busylock ; int group ; struct pm_qos_request pm_qos_req ; }; struct res_counter { unsigned long long usage ; unsigned long long max_usage ; unsigned long long limit ; unsigned long long soft_limit ; unsigned long long failcnt ; spinlock_t lock ; struct res_counter *parent ; }; struct kioctx; typedef int kiocb_cancel_fn(struct kiocb * ); union __anonunion_ki_obj_360 { void *user ; struct task_struct *tsk ; }; struct kiocb { struct file *ki_filp ; struct kioctx *ki_ctx ; kiocb_cancel_fn *ki_cancel ; void *private ; union __anonunion_ki_obj_360 ki_obj ; __u64 ki_user_data ; loff_t ki_pos ; size_t ki_nbytes ; struct list_head ki_list ; struct eventfd_ctx *ki_eventfd ; }; struct sock_filter { __u16 code ; __u8 jt ; __u8 jf ; __u32 k ; }; struct sk_filter { atomic_t refcnt ; unsigned int len ; unsigned int (*bpf_func)(struct sk_buff const * , struct sock_filter const * ) ; struct callback_head rcu ; struct sock_filter insns[0U] ; }; struct pollfd { int fd ; short events ; short revents ; }; struct poll_table_struct { void (*_qproc)(struct file * , wait_queue_head_t * , struct poll_table_struct * ) ; unsigned long _key ; }; struct nla_policy { u16 type ; u16 len ; }; struct rtnl_link_ops { struct list_head list ; char const *kind ; size_t priv_size ; void (*setup)(struct net_device * ) ; int maxtype ; struct nla_policy const *policy ; int (*validate)(struct nlattr ** , struct nlattr ** ) ; int (*newlink)(struct net * , struct net_device * , struct nlattr ** , struct nlattr ** ) ; int (*changelink)(struct net_device * , struct nlattr ** , struct nlattr ** ) ; void (*dellink)(struct net_device * , struct list_head * ) ; size_t (*get_size)(struct net_device const * ) ; int (*fill_info)(struct sk_buff * , struct net_device const * ) ; size_t (*get_xstats_size)(struct net_device const * ) ; int (*fill_xstats)(struct sk_buff * , struct net_device const * ) ; unsigned int (*get_num_tx_queues)(void) ; unsigned int (*get_num_rx_queues)(void) ; }; struct neigh_table; struct neigh_parms { struct net *net ; struct net_device *dev ; struct neigh_parms *next ; int (*neigh_setup)(struct neighbour * ) ; void (*neigh_cleanup)(struct neighbour * ) ; struct neigh_table *tbl ; void *sysctl_table ; int dead ; atomic_t refcnt ; struct callback_head callback_head ; int base_reachable_time ; int retrans_time ; int gc_staletime ; int reachable_time ; int delay_probe_time ; int queue_len_bytes ; int ucast_probes ; int app_probes ; int mcast_probes ; int anycast_delay ; int proxy_delay ; int proxy_qlen ; int locktime ; }; struct neigh_statistics { unsigned long allocs ; unsigned long destroys ; unsigned long hash_grows ; unsigned long res_failed ; unsigned long lookups ; unsigned long hits ; unsigned long rcv_probes_mcast ; unsigned long rcv_probes_ucast ; unsigned long periodic_gc_runs ; unsigned long forced_gc_runs ; unsigned long unres_discards ; }; struct neigh_ops; struct neighbour { struct neighbour *next ; struct neigh_table *tbl ; struct neigh_parms *parms ; unsigned long confirmed ; unsigned long updated ; rwlock_t lock ; atomic_t refcnt ; struct sk_buff_head arp_queue ; unsigned int arp_queue_len_bytes ; struct timer_list timer ; unsigned long used ; atomic_t probes ; __u8 flags ; __u8 nud_state ; __u8 type ; __u8 dead ; seqlock_t ha_lock ; unsigned char ha[32U] ; struct hh_cache hh ; int (*output)(struct neighbour * , struct sk_buff * ) ; struct neigh_ops const *ops ; struct callback_head rcu ; struct net_device *dev ; u8 primary_key[0U] ; }; struct neigh_ops { int family ; void (*solicit)(struct neighbour * , struct sk_buff * ) ; void (*error_report)(struct neighbour * , struct sk_buff * ) ; int (*output)(struct neighbour * , struct sk_buff * ) ; int (*connected_output)(struct neighbour * , struct sk_buff * ) ; }; struct pneigh_entry { struct pneigh_entry *next ; struct net *net ; struct net_device *dev ; u8 flags ; u8 key[0U] ; }; struct neigh_hash_table { struct neighbour **hash_buckets ; unsigned int hash_shift ; __u32 hash_rnd[4U] ; struct callback_head rcu ; }; struct neigh_table { struct neigh_table *next ; int family ; int entry_size ; int key_len ; __u32 (*hash)(void const * , struct net_device const * , __u32 * ) ; int (*constructor)(struct neighbour * ) ; int (*pconstructor)(struct pneigh_entry * ) ; void (*pdestructor)(struct pneigh_entry * ) ; void (*proxy_redo)(struct sk_buff * ) ; char *id ; struct neigh_parms parms ; int gc_interval ; int gc_thresh1 ; int gc_thresh2 ; int gc_thresh3 ; unsigned long last_flush ; struct delayed_work gc_work ; struct timer_list proxy_timer ; struct sk_buff_head proxy_queue ; atomic_t entries ; rwlock_t lock ; unsigned long last_rand ; struct neigh_statistics *stats ; struct neigh_hash_table *nht ; struct pneigh_entry **phash_buckets ; }; struct dn_route; union __anonunion_ldv_53555_365 { struct dst_entry *next ; struct rtable *rt_next ; struct rt6_info *rt6_next ; struct dn_route *dn_next ; }; struct dst_entry { struct callback_head callback_head ; struct dst_entry *child ; struct net_device *dev ; struct dst_ops *ops ; unsigned long _metrics ; unsigned long expires ; struct dst_entry *path ; struct dst_entry *from ; struct xfrm_state *xfrm ; int (*input)(struct sk_buff * ) ; int (*output)(struct sk_buff * ) ; unsigned short flags ; unsigned short pending_confirm ; short error ; short obsolete ; unsigned short header_len ; unsigned short trailer_len ; __u32 tclassid ; long __pad_to_align_refcnt[2U] ; atomic_t __refcnt ; int __use ; unsigned long lastuse ; union __anonunion_ldv_53555_365 ldv_53555 ; }; struct __anonstruct_socket_lock_t_366 { spinlock_t slock ; int owned ; wait_queue_head_t wq ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_socket_lock_t_366 socket_lock_t; struct proto; typedef __u32 __portpair; typedef __u64 __addrpair; struct __anonstruct_ldv_53776_368 { __be32 skc_daddr ; __be32 skc_rcv_saddr ; }; union __anonunion_ldv_53777_367 { __addrpair skc_addrpair ; struct __anonstruct_ldv_53776_368 ldv_53776 ; }; union __anonunion_ldv_53781_369 { unsigned int skc_hash ; __u16 skc_u16hashes[2U] ; }; struct __anonstruct_ldv_53787_371 { __be16 skc_dport ; __u16 skc_num ; }; union __anonunion_ldv_53788_370 { __portpair skc_portpair ; struct __anonstruct_ldv_53787_371 ldv_53787 ; }; union __anonunion_ldv_53797_372 { struct hlist_node skc_bind_node ; struct hlist_nulls_node skc_portaddr_node ; }; union __anonunion_ldv_53804_373 { struct hlist_node skc_node ; struct hlist_nulls_node skc_nulls_node ; }; struct sock_common { union __anonunion_ldv_53777_367 ldv_53777 ; union __anonunion_ldv_53781_369 ldv_53781 ; union __anonunion_ldv_53788_370 ldv_53788 ; unsigned short skc_family ; unsigned char volatile skc_state ; unsigned char skc_reuse : 4 ; unsigned char skc_reuseport : 4 ; int skc_bound_dev_if ; union __anonunion_ldv_53797_372 ldv_53797 ; struct proto *skc_prot ; struct net *skc_net ; int skc_dontcopy_begin[0U] ; union __anonunion_ldv_53804_373 ldv_53804 ; int skc_tx_queue_mapping ; atomic_t skc_refcnt ; int skc_dontcopy_end[0U] ; }; struct cg_proto; struct __anonstruct_sk_backlog_374 { atomic_t rmem_alloc ; int len ; struct sk_buff *head ; struct sk_buff *tail ; }; struct sock { struct sock_common __sk_common ; socket_lock_t sk_lock ; struct sk_buff_head sk_receive_queue ; struct __anonstruct_sk_backlog_374 sk_backlog ; int sk_forward_alloc ; __u32 sk_rxhash ; unsigned int sk_napi_id ; unsigned int sk_ll_usec ; atomic_t sk_drops ; int sk_rcvbuf ; struct sk_filter *sk_filter ; struct socket_wq *sk_wq ; struct sk_buff_head sk_async_wait_queue ; struct xfrm_policy *sk_policy[2U] ; unsigned long sk_flags ; struct dst_entry *sk_rx_dst ; struct dst_entry *sk_dst_cache ; spinlock_t sk_dst_lock ; atomic_t sk_wmem_alloc ; atomic_t sk_omem_alloc ; int sk_sndbuf ; struct sk_buff_head sk_write_queue ; unsigned char sk_shutdown : 2 ; unsigned char sk_no_check : 2 ; unsigned char sk_userlocks : 4 ; unsigned char sk_protocol ; unsigned short sk_type ; int sk_wmem_queued ; gfp_t sk_allocation ; u32 sk_pacing_rate ; netdev_features_t sk_route_caps ; netdev_features_t sk_route_nocaps ; int sk_gso_type ; unsigned int sk_gso_max_size ; u16 sk_gso_max_segs ; int sk_rcvlowat ; unsigned long sk_lingertime ; struct sk_buff_head sk_error_queue ; struct proto *sk_prot_creator ; rwlock_t sk_callback_lock ; int sk_err ; int sk_err_soft ; unsigned short sk_ack_backlog ; unsigned short sk_max_ack_backlog ; __u32 sk_priority ; __u32 sk_cgrp_prioidx ; struct pid *sk_peer_pid ; struct cred const *sk_peer_cred ; long sk_rcvtimeo ; long sk_sndtimeo ; void *sk_protinfo ; struct timer_list sk_timer ; ktime_t sk_stamp ; struct socket *sk_socket ; void *sk_user_data ; struct page_frag sk_frag ; struct sk_buff *sk_send_head ; __s32 sk_peek_off ; int sk_write_pending ; void *sk_security ; __u32 sk_mark ; u32 sk_classid ; struct cg_proto *sk_cgrp ; void (*sk_state_change)(struct sock * ) ; void (*sk_data_ready)(struct sock * , int ) ; void (*sk_write_space)(struct sock * ) ; void (*sk_error_report)(struct sock * ) ; int (*sk_backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*sk_destruct)(struct sock * ) ; }; struct request_sock_ops; struct timewait_sock_ops; struct inet_hashinfo; struct raw_hashinfo; struct udp_table; union __anonunion_h_375 { struct inet_hashinfo *hashinfo ; struct udp_table *udp_table ; struct raw_hashinfo *raw_hash ; }; struct proto { void (*close)(struct sock * , long ) ; int (*connect)(struct sock * , struct sockaddr * , int ) ; int (*disconnect)(struct sock * , int ) ; struct sock *(*accept)(struct sock * , int , int * ) ; int (*ioctl)(struct sock * , int , unsigned long ) ; int (*init)(struct sock * ) ; void (*destroy)(struct sock * ) ; void (*shutdown)(struct sock * , int ) ; int (*setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_ioctl)(struct sock * , unsigned int , unsigned long ) ; int (*sendmsg)(struct kiocb * , struct sock * , struct msghdr * , size_t ) ; int (*recvmsg)(struct kiocb * , struct sock * , struct msghdr * , size_t , int , int , int * ) ; int (*sendpage)(struct sock * , struct page * , int , size_t , int ) ; int (*bind)(struct sock * , struct sockaddr * , int ) ; int (*backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*release_cb)(struct sock * ) ; void (*mtu_reduced)(struct sock * ) ; void (*hash)(struct sock * ) ; void (*unhash)(struct sock * ) ; void (*rehash)(struct sock * ) ; int (*get_port)(struct sock * , unsigned short ) ; void (*clear_sk)(struct sock * , int ) ; unsigned int inuse_idx ; bool (*stream_memory_free)(struct sock const * ) ; void (*enter_memory_pressure)(struct sock * ) ; atomic_long_t *memory_allocated ; struct percpu_counter *sockets_allocated ; int *memory_pressure ; long *sysctl_mem ; int *sysctl_wmem ; int *sysctl_rmem ; int max_header ; bool no_autobind ; struct kmem_cache *slab ; unsigned int obj_size ; int slab_flags ; struct percpu_counter *orphan_count ; struct request_sock_ops *rsk_prot ; struct timewait_sock_ops *twsk_prot ; union __anonunion_h_375 h ; struct module *owner ; char name[32U] ; struct list_head node ; int (*init_cgroup)(struct mem_cgroup * , struct cgroup_subsys * ) ; void (*destroy_cgroup)(struct mem_cgroup * ) ; struct cg_proto *(*proto_cgroup)(struct mem_cgroup * ) ; }; struct cg_proto { void (*enter_memory_pressure)(struct sock * ) ; struct res_counter *memory_allocated ; struct percpu_counter *sockets_allocated ; int *memory_pressure ; long *sysctl_mem ; unsigned long flags ; struct mem_cgroup *memcg ; }; struct request_sock_ops { int family ; int obj_size ; struct kmem_cache *slab ; char *slab_name ; int (*rtx_syn_ack)(struct sock * , struct request_sock * ) ; void (*send_ack)(struct sock * , struct sk_buff * , struct request_sock * ) ; void (*send_reset)(struct sock * , struct sk_buff * ) ; void (*destructor)(struct request_sock * ) ; void (*syn_ack_timeout)(struct sock * , struct request_sock * ) ; }; struct request_sock { struct request_sock *dl_next ; u16 mss ; u8 num_retrans ; unsigned char cookie_ts : 1 ; unsigned char num_timeout : 7 ; u32 window_clamp ; u32 rcv_wnd ; u32 ts_recent ; unsigned long expires ; struct request_sock_ops const *rsk_ops ; struct sock *sk ; u32 secid ; u32 peer_secid ; }; struct timewait_sock_ops { struct kmem_cache *twsk_slab ; char *twsk_slab_name ; unsigned int twsk_obj_size ; int (*twsk_unique)(struct sock * , struct sock * , void * ) ; void (*twsk_destructor)(struct sock * ) ; }; struct ipv6_devconf { __s32 forwarding ; __s32 hop_limit ; __s32 mtu6 ; __s32 accept_ra ; __s32 accept_redirects ; __s32 autoconf ; __s32 dad_transmits ; __s32 rtr_solicits ; __s32 rtr_solicit_interval ; __s32 rtr_solicit_delay ; __s32 force_mld_version ; __s32 mldv1_unsolicited_report_interval ; __s32 mldv2_unsolicited_report_interval ; __s32 use_tempaddr ; __s32 temp_valid_lft ; __s32 temp_prefered_lft ; __s32 regen_max_retry ; __s32 max_desync_factor ; __s32 max_addresses ; __s32 accept_ra_defrtr ; __s32 accept_ra_pinfo ; __s32 accept_ra_rtr_pref ; __s32 rtr_probe_interval ; __s32 accept_ra_rt_info_max_plen ; __s32 proxy_ndp ; __s32 accept_source_route ; __s32 optimistic_dad ; __s32 mc_forwarding ; __s32 disable_ipv6 ; __s32 accept_dad ; __s32 force_tllao ; __s32 ndisc_notify ; __s32 suppress_frag_ndisc ; void *sysctl ; }; struct ip6_sf_list { struct ip6_sf_list *sf_next ; struct in6_addr sf_addr ; unsigned long sf_count[2U] ; unsigned char sf_gsresp ; unsigned char sf_oldin ; unsigned char sf_crcount ; }; struct ifmcaddr6 { struct in6_addr mca_addr ; struct inet6_dev *idev ; struct ifmcaddr6 *next ; struct ip6_sf_list *mca_sources ; struct ip6_sf_list *mca_tomb ; unsigned int mca_sfmode ; unsigned char mca_crcount ; unsigned long mca_sfcount[2U] ; struct timer_list mca_timer ; unsigned int mca_flags ; int mca_users ; atomic_t mca_refcnt ; spinlock_t mca_lock ; unsigned long mca_cstamp ; unsigned long mca_tstamp ; }; struct ifacaddr6 { struct in6_addr aca_addr ; struct inet6_dev *aca_idev ; struct rt6_info *aca_rt ; struct ifacaddr6 *aca_next ; int aca_users ; atomic_t aca_refcnt ; spinlock_t aca_lock ; unsigned long aca_cstamp ; unsigned long aca_tstamp ; }; struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry ; struct ipstats_mib *ipv6[1U] ; struct icmpv6_mib_device *icmpv6dev ; struct icmpv6msg_mib_device *icmpv6msgdev ; }; struct inet6_dev { struct net_device *dev ; struct list_head addr_list ; int valid_ll_addr_cnt ; struct ifmcaddr6 *mc_list ; struct ifmcaddr6 *mc_tomb ; spinlock_t mc_lock ; unsigned char mc_qrv ; unsigned char mc_gq_running ; unsigned char mc_ifc_count ; unsigned char mc_dad_count ; unsigned long mc_v1_seen ; unsigned long mc_qi ; unsigned long mc_qri ; unsigned long mc_maxdelay ; struct timer_list mc_gq_timer ; struct timer_list mc_ifc_timer ; struct timer_list mc_dad_timer ; struct ifacaddr6 *ac_list ; rwlock_t lock ; atomic_t refcnt ; __u32 if_flags ; int dead ; u8 rndid[8U] ; struct timer_list regen_timer ; struct list_head tempaddr_list ; struct in6_addr token ; struct neigh_parms *nd_parms ; struct ipv6_devconf cnf ; struct ipv6_devstat stats ; struct timer_list rs_timer ; __u8 rs_probes ; unsigned long tstamp ; struct callback_head rcu ; }; union __anonunion_ldv_58122_389 { __be32 a4 ; __be32 a6[4U] ; }; struct inetpeer_addr_base { union __anonunion_ldv_58122_389 ldv_58122 ; }; struct inetpeer_addr { struct inetpeer_addr_base addr ; __u16 family ; }; union __anonunion_ldv_58137_390 { struct list_head gc_list ; struct callback_head gc_rcu ; }; struct __anonstruct_ldv_58142_392 { atomic_t rid ; atomic_t ip_id_count ; }; union __anonunion_ldv_58145_391 { struct __anonstruct_ldv_58142_392 ldv_58142 ; struct callback_head rcu ; struct inet_peer *gc_next ; }; struct inet_peer { struct inet_peer *avl_left ; struct inet_peer *avl_right ; struct inetpeer_addr daddr ; __u32 avl_height ; u32 metrics[15U] ; u32 rate_tokens ; unsigned long rate_last ; union __anonunion_ldv_58137_390 ldv_58137 ; union __anonunion_ldv_58145_391 ldv_58145 ; __u32 dtime ; atomic_t refcnt ; }; struct inet_peer_base { struct inet_peer *root ; seqlock_t lock ; u32 flush_seq ; int total ; }; struct rtable { struct dst_entry dst ; int rt_genid ; unsigned int rt_flags ; __u16 rt_type ; __u8 rt_is_input ; __u8 rt_uses_gateway ; int rt_iif ; __be32 rt_gateway ; u32 rt_pmtu ; struct list_head rt_uncached ; }; struct inet_ehash_bucket { struct hlist_nulls_head chain ; struct hlist_nulls_head twchain ; }; struct inet_bind_hashbucket { spinlock_t lock ; struct hlist_head chain ; }; struct inet_listen_hashbucket { spinlock_t lock ; struct hlist_nulls_head head ; }; struct inet_hashinfo { struct inet_ehash_bucket *ehash ; spinlock_t *ehash_locks ; unsigned int ehash_mask ; unsigned int ehash_locks_mask ; struct inet_bind_hashbucket *bhash ; unsigned int bhash_size ; struct kmem_cache *bind_bucket_cachep ; struct inet_listen_hashbucket listening_hash[32U] ; atomic_t bsockets ; }; enum transport_lun_status_table { TRANSPORT_LUN_STATUS_FREE = 0, TRANSPORT_LUN_STATUS_ACTIVE = 1 } ; enum transport_tpg_type_table { TRANSPORT_TPG_TYPE_NORMAL = 0, TRANSPORT_TPG_TYPE_DISCOVERY = 1 } ; enum transport_state_table { TRANSPORT_NO_STATE = 0, TRANSPORT_NEW_CMD = 1, TRANSPORT_WRITE_PENDING = 3, TRANSPORT_PROCESSING = 5, TRANSPORT_COMPLETE = 6, TRANSPORT_ISTATE_PROCESSING = 11, TRANSPORT_COMPLETE_QF_WP = 18, TRANSPORT_COMPLETE_QF_OK = 19 } ; typedef unsigned int sense_reason_t; struct se_cmd; struct se_device; struct t10_alua_tg_pt_gp; struct t10_alua { u16 alua_tg_pt_gps_counter ; u32 alua_tg_pt_gps_count ; spinlock_t tg_pt_gps_lock ; struct se_device *t10_dev ; struct t10_alua_tg_pt_gp *default_tg_pt_gp ; struct config_group alua_tg_pt_gps_group ; struct list_head tg_pt_gps_list ; }; struct t10_alua_lu_gp { u16 lu_gp_id ; int lu_gp_valid_id ; u32 lu_gp_members ; atomic_t lu_gp_ref_cnt ; spinlock_t lu_gp_lock ; struct config_group lu_gp_group ; struct list_head lu_gp_node ; struct list_head lu_gp_mem_list ; }; struct t10_alua_lu_gp_member { bool lu_gp_assoc ; atomic_t lu_gp_mem_ref_cnt ; spinlock_t lu_gp_mem_lock ; struct t10_alua_lu_gp *lu_gp ; struct se_device *lu_gp_mem_dev ; struct list_head lu_gp_mem_list ; }; struct t10_alua_tg_pt_gp { u16 tg_pt_gp_id ; int tg_pt_gp_valid_id ; int tg_pt_gp_alua_access_status ; int tg_pt_gp_alua_access_type ; int tg_pt_gp_nonop_delay_msecs ; int tg_pt_gp_trans_delay_msecs ; int tg_pt_gp_implict_trans_secs ; int tg_pt_gp_pref ; int tg_pt_gp_write_metadata ; u32 tg_pt_gp_md_buf_len ; u32 tg_pt_gp_members ; atomic_t tg_pt_gp_alua_access_state ; atomic_t tg_pt_gp_ref_cnt ; spinlock_t tg_pt_gp_lock ; struct mutex tg_pt_gp_md_mutex ; struct se_device *tg_pt_gp_dev ; struct config_group tg_pt_gp_group ; struct list_head tg_pt_gp_list ; struct list_head tg_pt_gp_mem_list ; }; struct se_port; struct t10_alua_tg_pt_gp_member { bool tg_pt_gp_assoc ; atomic_t tg_pt_gp_mem_ref_cnt ; spinlock_t tg_pt_gp_mem_lock ; struct t10_alua_tg_pt_gp *tg_pt_gp ; struct se_port *tg_pt ; struct list_head tg_pt_gp_mem_list ; }; struct t10_wwn { char vendor[8U] ; char model[16U] ; char revision[4U] ; char unit_serial[254U] ; spinlock_t t10_vpd_lock ; struct se_device *t10_dev ; struct config_group t10_wwn_group ; struct list_head t10_vpd_list ; }; struct se_node_acl; struct se_dev_entry; struct se_lun; struct t10_pr_registration { char pr_reg_isid[16U] ; unsigned char pr_iport[256U] ; unsigned char pr_tport[256U] ; u16 pr_aptpl_rpti ; u16 pr_reg_tpgt ; int pr_reg_all_tg_pt ; int pr_reg_aptpl ; int pr_res_holder ; int pr_res_type ; int pr_res_scope ; bool isid_present_at_reg ; u32 pr_res_mapped_lun ; u32 pr_aptpl_target_lun ; u32 pr_res_generation ; u64 pr_reg_bin_isid ; u64 pr_res_key ; atomic_t pr_res_holders ; struct se_node_acl *pr_reg_nacl ; struct se_dev_entry *pr_reg_deve ; struct se_lun *pr_reg_tg_pt_lun ; struct list_head pr_reg_list ; struct list_head pr_reg_abort_list ; struct list_head pr_reg_aptpl_list ; struct list_head pr_reg_atp_list ; struct list_head pr_reg_atp_mem_list ; }; struct t10_reservation { int pr_all_tg_pt ; int pr_aptpl_active ; u32 pr_generation ; spinlock_t registration_lock ; spinlock_t aptpl_reg_lock ; struct se_node_acl *pr_res_holder ; struct list_head registration_list ; struct list_head aptpl_reg_list ; }; struct se_tmr_req { u8 function ; u8 response ; int call_transport ; u32 ref_task_tag ; void *fabric_tmr_ptr ; struct se_cmd *task_cmd ; struct se_device *tmr_dev ; struct se_lun *tmr_lun ; struct list_head tmr_list ; }; struct se_session; struct target_core_fabric_ops; struct se_cmd { u8 scsi_status ; u8 scsi_asc ; u8 scsi_ascq ; u16 scsi_sense_length ; int alua_nonop_delay ; enum dma_data_direction data_direction ; int sam_task_attr ; unsigned int map_tag ; enum transport_state_table t_state ; unsigned char cmd_wait_set : 1 ; unsigned char unknown_data_length : 1 ; u32 se_cmd_flags ; u32 se_ordered_id ; u32 data_length ; u32 residual_count ; u32 orig_fe_lun ; u64 pr_res_key ; void *sense_buffer ; struct list_head se_delayed_node ; struct list_head se_lun_node ; struct list_head se_qf_node ; struct se_device *se_dev ; struct se_dev_entry *se_deve ; struct se_lun *se_lun ; struct se_session *se_sess ; struct se_tmr_req *se_tmr_req ; struct list_head se_cmd_list ; struct completion cmd_wait_comp ; struct kref cmd_kref ; struct target_core_fabric_ops *se_tfo ; sense_reason_t (*execute_cmd)(struct se_cmd * ) ; sense_reason_t (*execute_rw)(struct se_cmd * , struct scatterlist * , u32 , enum dma_data_direction ) ; sense_reason_t (*transport_complete_callback)(struct se_cmd * ) ; unsigned char *t_task_cdb ; unsigned char __t_task_cdb[32U] ; unsigned long long t_task_lba ; unsigned int t_task_nolb ; unsigned int transport_state ; spinlock_t t_state_lock ; struct completion t_transport_stop_comp ; struct completion transport_lun_fe_stop_comp ; struct completion transport_lun_stop_comp ; struct work_struct work ; struct scatterlist *t_data_sg ; struct scatterlist *t_data_sg_orig ; unsigned int t_data_nents ; unsigned int t_data_nents_orig ; void *t_data_vmap ; struct scatterlist *t_bidi_data_sg ; unsigned int t_bidi_data_nents ; struct list_head state_list ; bool state_active ; struct completion task_stop_comp ; void *priv ; }; struct se_portal_group; struct se_node_acl { char initiatorname[224U] ; bool dynamic_node_acl ; bool acl_stop ; u32 queue_depth ; u32 acl_index ; char acl_tag[64U] ; u64 num_cmds ; u64 read_bytes ; u64 write_bytes ; spinlock_t stats_lock ; atomic_t acl_pr_ref_count ; struct se_dev_entry **device_list ; struct se_session *nacl_sess ; struct se_portal_group *se_tpg ; spinlock_t device_list_lock ; spinlock_t nacl_sess_lock ; struct config_group acl_group ; struct config_group acl_attrib_group ; struct config_group acl_auth_group ; struct config_group acl_param_group ; struct config_group acl_fabric_stat_group ; struct config_group *acl_default_groups[5U] ; struct list_head acl_list ; struct list_head acl_sess_list ; struct completion acl_free_comp ; struct kref acl_kref ; }; struct se_session { unsigned char sess_tearing_down : 1 ; u64 sess_bin_isid ; struct se_node_acl *se_node_acl ; struct se_portal_group *se_tpg ; void *fabric_sess_ptr ; struct list_head sess_list ; struct list_head sess_acl_list ; struct list_head sess_cmd_list ; struct list_head sess_wait_list ; spinlock_t sess_cmd_lock ; struct kref sess_kref ; void *sess_cmd_map ; struct percpu_ida sess_tag_pool ; }; struct se_ml_stat_grps { struct config_group stat_group ; struct config_group scsi_auth_intr_group ; struct config_group scsi_att_intr_port_group ; }; struct se_lun_acl { char initiatorname[224U] ; u32 mapped_lun ; struct se_node_acl *se_lun_nacl ; struct se_lun *se_lun ; struct list_head lacl_list ; struct config_group se_lun_group ; struct se_ml_stat_grps ml_stat_grps ; }; struct se_dev_entry { bool def_pr_registered ; u32 lun_flags ; u32 mapped_lun ; u32 total_cmds ; u64 pr_res_key ; u64 creation_time ; u32 attach_count ; u64 read_bytes ; u64 write_bytes ; atomic_t ua_count ; atomic_t pr_ref_count ; struct se_lun_acl *se_lun_acl ; spinlock_t ua_lock ; struct se_lun *se_lun ; struct list_head alua_port_list ; struct list_head ua_list ; }; struct se_dev_attrib { int emulate_model_alias ; int emulate_dpo ; int emulate_fua_write ; int emulate_fua_read ; int emulate_write_cache ; int emulate_ua_intlck_ctrl ; int emulate_tas ; int emulate_tpu ; int emulate_tpws ; int emulate_caw ; int emulate_3pc ; int enforce_pr_isids ; int is_nonrot ; int emulate_rest_reord ; u32 hw_block_size ; u32 block_size ; u32 hw_max_sectors ; u32 fabric_max_sectors ; u32 optimal_sectors ; u32 hw_queue_depth ; u32 queue_depth ; u32 max_unmap_lba_count ; u32 max_unmap_block_desc_count ; u32 unmap_granularity ; u32 unmap_granularity_alignment ; u32 max_write_same_len ; struct se_device *da_dev ; struct config_group da_group ; }; struct se_dev_stat_grps { struct config_group stat_group ; struct config_group scsi_dev_group ; struct config_group scsi_tgt_dev_group ; struct config_group scsi_lu_group ; }; struct se_hba; struct se_subsystem_api; struct se_device { u32 dev_link_magic ; u16 dev_rpti_counter ; u32 dev_cur_ordered_id ; u32 dev_flags ; u32 dev_port_count ; u32 queue_depth ; u64 dev_res_bin_isid ; u32 dev_index ; u64 creation_time ; u32 num_resets ; u64 num_cmds ; u64 read_bytes ; u64 write_bytes ; spinlock_t stats_lock ; atomic_t simple_cmds ; atomic_t dev_ordered_id ; atomic_t dev_ordered_sync ; atomic_t dev_qf_count ; int export_count ; spinlock_t delayed_cmd_lock ; spinlock_t execute_task_lock ; spinlock_t dev_reservation_lock ; unsigned int dev_reservation_flags ; spinlock_t se_port_lock ; spinlock_t se_tmr_lock ; spinlock_t qf_cmd_lock ; struct semaphore caw_sem ; struct se_node_acl *dev_reserved_node_acl ; struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem ; struct t10_pr_registration *dev_pr_res_holder ; struct list_head dev_sep_list ; struct list_head dev_tmr_list ; struct workqueue_struct *tmr_wq ; struct work_struct qf_work_queue ; struct list_head delayed_cmd_list ; struct list_head state_list ; struct list_head qf_cmd_list ; struct list_head g_dev_node ; struct se_hba *se_hba ; struct t10_wwn t10_wwn ; struct t10_alua t10_alua ; struct t10_reservation t10_pr ; struct se_dev_attrib dev_attrib ; struct config_group dev_group ; struct config_group dev_pr_group ; struct se_dev_stat_grps dev_stat_grps ; unsigned char dev_alias[512U] ; unsigned char udev_path[512U] ; struct se_subsystem_api *transport ; struct list_head dev_list ; }; struct se_hba { u16 hba_tpgt ; u32 hba_id ; u32 hba_flags ; u32 dev_count ; u32 hba_index ; void *hba_ptr ; struct list_head hba_node ; spinlock_t device_lock ; struct config_group hba_group ; struct mutex hba_access_mutex ; struct se_subsystem_api *transport ; }; struct se_port_stat_grps { struct config_group stat_group ; struct config_group scsi_port_group ; struct config_group scsi_tgt_port_group ; struct config_group scsi_transport_group ; }; struct se_lun { u32 lun_link_magic ; enum transport_lun_status_table lun_status ; u32 lun_access ; u32 lun_flags ; u32 unpacked_lun ; atomic_t lun_acl_count ; spinlock_t lun_acl_lock ; spinlock_t lun_cmd_lock ; spinlock_t lun_sep_lock ; struct completion lun_shutdown_comp ; struct list_head lun_cmd_list ; struct list_head lun_acl_list ; struct se_device *lun_se_dev ; struct se_port *lun_sep ; struct config_group lun_group ; struct se_port_stat_grps port_stat_grps ; }; struct scsi_port_stats { u64 cmd_pdus ; u64 tx_data_octets ; u64 rx_data_octets ; }; struct se_port { u16 sep_rtpi ; int sep_tg_pt_secondary_stat ; int sep_tg_pt_secondary_write_md ; u32 sep_index ; struct scsi_port_stats sep_stats ; atomic_t sep_tg_pt_secondary_offline ; atomic_t sep_tg_pt_ref_cnt ; spinlock_t sep_alua_lock ; struct mutex sep_tg_pt_md_mutex ; struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem ; struct se_lun *sep_lun ; struct se_portal_group *sep_tpg ; struct list_head sep_alua_list ; struct list_head sep_list ; }; struct se_wwn; struct se_portal_group { enum transport_tpg_type_table se_tpg_type ; u32 num_node_acls ; atomic_t tpg_pr_ref_count ; spinlock_t acl_node_lock ; spinlock_t session_lock ; spinlock_t tpg_lun_lock ; void *se_tpg_fabric_ptr ; struct list_head se_tpg_node ; struct list_head acl_node_list ; struct se_lun **tpg_lun_list ; struct se_lun tpg_virt_lun0 ; struct list_head tpg_sess_list ; struct target_core_fabric_ops *se_tpg_tfo ; struct se_wwn *se_tpg_wwn ; struct config_group tpg_group ; struct config_group *tpg_default_groups[7U] ; struct config_group tpg_lun_group ; struct config_group tpg_np_group ; struct config_group tpg_acl_group ; struct config_group tpg_attrib_group ; struct config_group tpg_auth_group ; struct config_group tpg_param_group ; }; struct target_fabric_configfs; struct se_wwn { struct target_fabric_configfs *wwn_tf ; struct config_group wwn_group ; struct config_group *wwn_default_groups[2U] ; struct config_group fabric_stat_group ; }; struct qla_tgt { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; int irq_cmd_count ; int datasegs_per_cmd ; int datasegs_per_cont ; int sg_tablesize ; unsigned char tgt_enable_64bit_addr : 1 ; unsigned char link_reinit_iocb_pending : 1 ; int tgt_stop ; int tgt_stopped ; int sess_count ; struct list_head sess_list ; struct list_head del_sess_list ; struct delayed_work sess_del_work ; spinlock_t sess_work_lock ; struct list_head sess_works_list ; struct work_struct sess_work ; struct imm_ntfy_from_isp link_reinit_iocb ; wait_queue_head_t waitQ ; int notify_ack_expected ; int abts_resp_expected ; int modify_lun_expected ; int ctio_srr_id ; int imm_srr_id ; spinlock_t srr_lock ; struct list_head srr_ctio_list ; struct list_head srr_imm_list ; struct work_struct srr_work ; atomic_t tgt_global_resets_count ; struct list_head tgt_list_entry ; }; struct qla_tgt_sess { uint16_t loop_id ; port_id_t s_id ; unsigned char conf_compl_supported : 1 ; unsigned char deleted : 1 ; unsigned char local : 1 ; struct se_session *se_sess ; struct scsi_qla_host *vha ; struct qla_tgt *tgt ; struct list_head sess_list_entry ; unsigned long expires ; struct list_head del_list_entry ; uint8_t port_name[8U] ; struct work_struct free_work ; }; struct qla_tgt_cmd { struct qla_tgt_sess *sess ; int state ; struct se_cmd se_cmd ; struct work_struct free_work ; struct work_struct work ; unsigned char sense_buffer[96U] ; unsigned char conf_compl_supported : 1 ; unsigned char sg_mapped : 1 ; unsigned char free_sg : 1 ; unsigned char aborted : 1 ; unsigned char write_data_transferred : 1 ; struct scatterlist *sg ; int sg_cnt ; int bufflen ; int offset ; uint32_t tag ; uint32_t unpacked_lun ; enum dma_data_direction dma_data_direction ; uint16_t loop_id ; struct qla_tgt *tgt ; struct scsi_qla_host *vha ; struct list_head cmd_list ; struct atio_from_isp atio ; }; union __anonunion_orig_iocb_396 { struct atio_from_isp atio ; struct imm_ntfy_from_isp imm_ntfy ; struct abts_recv_from_24xx abts ; }; struct qla_tgt_mgmt_cmd { uint8_t tmr_func ; uint8_t fc_tm_rsp ; struct qla_tgt_sess *sess ; struct se_cmd se_cmd ; struct work_struct free_work ; unsigned int flags ; union __anonunion_orig_iocb_396 orig_iocb ; }; typedef int ldv_func_ret_type; typedef int ldv_func_ret_type___0; typedef int ldv_func_ret_type___1; typedef int ldv_func_ret_type___2; typedef struct Scsi_Host *ldv_func_ret_type___3; typedef int ldv_func_ret_type___4; enum hrtimer_restart; enum blk_eh_timer_return; struct fc_rport_identifiers { u64 node_name ; u64 port_name ; u32 port_id ; u32 roles ; }; struct nvram_24xx { uint8_t id[4U] ; uint16_t nvram_version ; uint16_t reserved_0 ; uint16_t version ; uint16_t reserved_1 ; uint16_t frame_payload_size ; uint16_t execution_throttle ; uint16_t exchange_count ; uint16_t hard_address ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; uint16_t login_retry_count ; uint16_t link_down_on_nos ; uint16_t interrupt_delay_timer ; uint16_t login_timeout ; uint32_t firmware_options_1 ; uint32_t firmware_options_2 ; uint32_t firmware_options_3 ; uint16_t seriallink_options[4U] ; uint16_t reserved_2[16U] ; uint16_t reserved_3[16U] ; uint16_t reserved_4[16U] ; uint16_t reserved_5[16U] ; uint16_t reserved_6[16U] ; uint16_t reserved_7[16U] ; uint32_t host_p ; uint8_t alternate_port_name[8U] ; uint8_t alternate_node_name[8U] ; uint8_t boot_port_name[8U] ; uint16_t boot_lun_number ; uint16_t reserved_8 ; uint8_t alt1_boot_port_name[8U] ; uint16_t alt1_boot_lun_number ; uint16_t reserved_9 ; uint8_t alt2_boot_port_name[8U] ; uint16_t alt2_boot_lun_number ; uint16_t reserved_10 ; uint8_t alt3_boot_port_name[8U] ; uint16_t alt3_boot_lun_number ; uint16_t reserved_11 ; uint32_t efi_parameters ; uint8_t reset_delay ; uint8_t reserved_12 ; uint16_t reserved_13 ; uint16_t boot_id_number ; uint16_t reserved_14 ; uint16_t max_luns_per_target ; uint16_t reserved_15 ; uint16_t port_down_retry_count ; uint16_t link_down_timeout ; uint16_t fcode_parameter ; uint16_t reserved_16[3U] ; uint8_t prev_drv_ver_major ; uint8_t prev_drv_ver_submajob ; uint8_t prev_drv_ver_minor ; uint8_t prev_drv_ver_subminor ; uint16_t prev_bios_ver_major ; uint16_t prev_bios_ver_minor ; uint16_t prev_efi_ver_major ; uint16_t prev_efi_ver_minor ; uint16_t prev_fw_ver_major ; uint8_t prev_fw_ver_minor ; uint8_t prev_fw_ver_subminor ; uint16_t reserved_17[8U] ; uint16_t reserved_18[16U] ; uint16_t reserved_19[16U] ; uint16_t reserved_20[16U] ; uint8_t model_name[16U] ; uint16_t reserved_21[2U] ; uint16_t pcie_table_sig ; uint16_t pcie_table_offset ; uint16_t subsystem_vendor_id ; uint16_t subsystem_device_id ; uint32_t checksum ; }; struct init_cb_24xx { uint16_t version ; uint16_t reserved_1 ; uint16_t frame_payload_size ; uint16_t execution_throttle ; uint16_t exchange_count ; uint16_t hard_address ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; uint16_t response_q_inpointer ; uint16_t request_q_outpointer ; uint16_t login_retry_count ; uint16_t prio_request_q_outpointer ; uint16_t response_q_length ; uint16_t request_q_length ; uint16_t link_down_on_nos ; uint16_t prio_request_q_length ; uint32_t request_q_address[2U] ; uint32_t response_q_address[2U] ; uint32_t prio_request_q_address[2U] ; uint16_t msix ; uint16_t msix_atio ; uint8_t reserved_2[4U] ; uint16_t atio_q_inpointer ; uint16_t atio_q_length ; uint32_t atio_q_address[2U] ; uint16_t interrupt_delay_timer ; uint16_t login_timeout ; uint32_t firmware_options_1 ; uint32_t firmware_options_2 ; uint32_t firmware_options_3 ; uint16_t qos ; uint16_t rid ; uint8_t reserved_3[20U] ; }; struct mid_conf_entry_24xx { uint16_t reserved_1 ; uint8_t options ; uint8_t hard_address ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; }; struct mid_init_cb_24xx { struct init_cb_24xx init_cb ; uint16_t count ; uint16_t options ; struct mid_conf_entry_24xx entries[256U] ; }; struct nvram_81xx { uint8_t id[4U] ; uint16_t nvram_version ; uint16_t reserved_0 ; uint16_t version ; uint16_t reserved_1 ; uint16_t frame_payload_size ; uint16_t execution_throttle ; uint16_t exchange_count ; uint16_t reserved_2 ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; uint16_t login_retry_count ; uint16_t reserved_3 ; uint16_t interrupt_delay_timer ; uint16_t login_timeout ; uint32_t firmware_options_1 ; uint32_t firmware_options_2 ; uint32_t firmware_options_3 ; uint16_t reserved_4[4U] ; uint8_t enode_mac[6U] ; uint16_t reserved_5[5U] ; uint16_t reserved_6[24U] ; uint16_t ex_version ; uint8_t prio_fcf_matching_flags ; uint8_t reserved_6_1[3U] ; uint16_t pri_fcf_vlan_id ; uint8_t pri_fcf_fabric_name[8U] ; uint16_t reserved_6_2[7U] ; uint8_t spma_mac_addr[6U] ; uint16_t reserved_6_3[14U] ; uint16_t reserved_7[32U] ; uint32_t host_p ; uint8_t alternate_port_name[8U] ; uint8_t alternate_node_name[8U] ; uint8_t boot_port_name[8U] ; uint16_t boot_lun_number ; uint16_t reserved_8 ; uint8_t alt1_boot_port_name[8U] ; uint16_t alt1_boot_lun_number ; uint16_t reserved_9 ; uint8_t alt2_boot_port_name[8U] ; uint16_t alt2_boot_lun_number ; uint16_t reserved_10 ; uint8_t alt3_boot_port_name[8U] ; uint16_t alt3_boot_lun_number ; uint16_t reserved_11 ; uint32_t efi_parameters ; uint8_t reset_delay ; uint8_t reserved_12 ; uint16_t reserved_13 ; uint16_t boot_id_number ; uint16_t reserved_14 ; uint16_t max_luns_per_target ; uint16_t reserved_15 ; uint16_t port_down_retry_count ; uint16_t link_down_timeout ; uint16_t fcode_parameter ; uint16_t reserved_16[3U] ; uint8_t reserved_17[4U] ; uint16_t reserved_18[5U] ; uint8_t reserved_19[2U] ; uint16_t reserved_20[8U] ; uint8_t reserved_21[16U] ; uint16_t reserved_22[3U] ; uint8_t enhanced_features ; uint8_t reserved_23 ; uint16_t reserved_24[4U] ; uint16_t reserved_25[32U] ; uint8_t model_name[16U] ; uint16_t feature_mask_l ; uint16_t feature_mask_h ; uint16_t reserved_26[2U] ; uint16_t subsystem_vendor_id ; uint16_t subsystem_device_id ; uint32_t checksum ; }; struct init_cb_81xx { uint16_t version ; uint16_t reserved_1 ; uint16_t frame_payload_size ; uint16_t execution_throttle ; uint16_t exchange_count ; uint16_t reserved_2 ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; uint16_t response_q_inpointer ; uint16_t request_q_outpointer ; uint16_t login_retry_count ; uint16_t prio_request_q_outpointer ; uint16_t response_q_length ; uint16_t request_q_length ; uint16_t reserved_3 ; uint16_t prio_request_q_length ; uint32_t request_q_address[2U] ; uint32_t response_q_address[2U] ; uint32_t prio_request_q_address[2U] ; uint8_t reserved_4[8U] ; uint16_t atio_q_inpointer ; uint16_t atio_q_length ; uint32_t atio_q_address[2U] ; uint16_t interrupt_delay_timer ; uint16_t login_timeout ; uint32_t firmware_options_1 ; uint32_t firmware_options_2 ; uint32_t firmware_options_3 ; uint8_t reserved_5[8U] ; uint8_t enode_mac[6U] ; uint8_t reserved_6[10U] ; }; struct __anonstruct_nvram_t_244 { uint8_t id[4U] ; uint8_t nvram_version ; uint8_t reserved_0 ; uint8_t parameter_block_version ; uint8_t reserved_1 ; uint8_t firmware_options[2U] ; uint16_t frame_payload_size ; uint16_t max_iocb_allocation ; uint16_t execution_throttle ; uint8_t retry_count ; uint8_t retry_delay ; uint8_t port_name[8U] ; uint16_t hard_address ; uint8_t inquiry_data ; uint8_t login_timeout ; uint8_t node_name[8U] ; uint8_t add_firmware_options[2U] ; uint8_t response_accumulation_timer ; uint8_t interrupt_delay_timer ; uint8_t special_options[2U] ; uint8_t reserved_2[22U] ; uint8_t seriallink_options[4U] ; uint8_t host_p[2U] ; uint8_t boot_node_name[8U] ; uint8_t boot_lun_number ; uint8_t reset_delay ; uint8_t port_down_retry_count ; uint8_t boot_id_number ; uint16_t max_luns_per_target ; uint8_t fcode_boot_port_name[8U] ; uint8_t alternate_port_name[8U] ; uint8_t alternate_node_name[8U] ; uint8_t efi_parameters ; uint8_t link_down_timeout ; uint8_t adapter_id[16U] ; uint8_t alt1_boot_node_name[8U] ; uint16_t alt1_boot_lun_number ; uint8_t alt2_boot_node_name[8U] ; uint16_t alt2_boot_lun_number ; uint8_t alt3_boot_node_name[8U] ; uint16_t alt3_boot_lun_number ; uint8_t alt4_boot_node_name[8U] ; uint16_t alt4_boot_lun_number ; uint8_t alt5_boot_node_name[8U] ; uint16_t alt5_boot_lun_number ; uint8_t alt6_boot_node_name[8U] ; uint16_t alt6_boot_lun_number ; uint8_t alt7_boot_node_name[8U] ; uint16_t alt7_boot_lun_number ; uint8_t reserved_3[2U] ; uint8_t model_number[16U] ; uint8_t oem_specific[16U] ; uint8_t adapter_features[2U] ; uint8_t reserved_4[16U] ; uint16_t subsystem_vendor_id_2200 ; uint16_t subsystem_device_id_2200 ; uint8_t reserved_5 ; uint8_t checksum ; }; typedef struct __anonstruct_nvram_t_244 nvram_t; struct __anonstruct_sw_info_t_264 { port_id_t d_id ; uint8_t node_name[8U] ; uint8_t port_name[8U] ; uint8_t fabric_port_name[8U] ; uint16_t fp_speed ; uint8_t fc4_type ; }; typedef struct __anonstruct_sw_info_t_264 sw_info_t; enum hrtimer_restart; enum blk_eh_timer_return; struct port_database_24xx { uint16_t flags ; uint8_t current_login_state ; uint8_t last_login_state ; uint8_t hard_address[3U] ; uint8_t reserved_1 ; uint8_t port_id[3U] ; uint8_t sequence_id ; uint16_t port_timer ; uint16_t nport_handle ; uint16_t receive_data_size ; uint16_t reserved_2 ; uint8_t prli_svc_param_word_0[2U] ; uint8_t prli_svc_param_word_3[2U] ; uint8_t port_name[8U] ; uint8_t node_name[8U] ; uint8_t reserved_3[24U] ; }; struct sts_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t comp_status ; uint16_t ox_id ; uint32_t residual_len ; uint16_t reserved_1 ; uint16_t state_flags ; uint16_t reserved_2 ; uint16_t scsi_status ; uint32_t rsp_residual_count ; uint32_t sense_len ; uint32_t rsp_data_len ; uint8_t data[28U] ; }; struct logio_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t comp_status ; uint16_t nport_handle ; uint16_t control_flags ; uint8_t vp_index ; uint8_t reserved_1 ; uint8_t port_id[3U] ; uint8_t rsp_size ; uint32_t io_parameter[11U] ; }; struct tsk_mgmt_entry { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t reserved_1 ; uint16_t delay ; uint16_t timeout ; struct scsi_lun lun ; uint32_t control_flags ; uint8_t reserved_2[20U] ; uint8_t port_id[3U] ; uint8_t vp_index ; uint8_t reserved_3[12U] ; }; struct abort_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t options ; uint32_t handle_to_abort ; uint16_t req_que_no ; uint8_t reserved_1[30U] ; uint8_t port_id[3U] ; uint8_t vp_index ; uint8_t reserved_2[12U] ; }; struct vp_ctrl_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t vp_idx_failed ; uint16_t comp_status ; uint16_t command ; uint16_t vp_count ; uint8_t vp_idx_map[16U] ; uint16_t flags ; uint16_t id ; uint16_t reserved_4 ; uint16_t hopct ; uint8_t reserved_5[24U] ; }; struct vp_config_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t handle ; uint16_t flags ; uint16_t comp_status ; uint8_t command ; uint8_t vp_count ; uint8_t vp_index1 ; uint8_t vp_index2 ; uint8_t options_idx1 ; uint8_t hard_address_idx1 ; uint16_t reserved_vp1 ; uint8_t port_name_idx1[8U] ; uint8_t node_name_idx1[8U] ; uint8_t options_idx2 ; uint8_t hard_address_idx2 ; uint16_t reserved_vp2 ; uint8_t port_name_idx2[8U] ; uint8_t node_name_idx2[8U] ; uint16_t id ; uint16_t reserved_4 ; uint16_t hopct ; uint8_t reserved_5[2U] ; }; struct vp_rpt_id_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t vp_count ; uint16_t vp_idx ; uint8_t port_id[3U] ; uint8_t format ; uint8_t vp_idx_map[16U] ; uint8_t reserved_4[32U] ; }; struct verify_chip_entry_84xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_defined ; uint8_t entry_status ; uint32_t handle ; uint16_t options ; uint16_t reserved_1 ; uint16_t data_seg_cnt ; uint16_t reserved_2[3U] ; uint32_t fw_ver ; uint32_t exchange_address ; uint32_t reserved_3[3U] ; uint32_t fw_size ; uint32_t fw_seq_size ; uint32_t relative_offset ; uint32_t dseg_address[2U] ; uint32_t dseg_length ; }; struct verify_chip_rsp_84xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_defined ; uint8_t entry_status ; uint32_t handle ; uint16_t comp_status ; uint16_t failure_code ; uint16_t reserved_1[4U] ; uint32_t fw_ver ; uint32_t exchange_address ; uint32_t reserved_2[6U] ; }; struct msg_echo_lb { dma_addr_t send_dma ; dma_addr_t rcv_dma ; uint16_t req_sg_cnt ; uint16_t rsp_sg_cnt ; uint16_t options ; uint32_t transfer_size ; uint32_t iteration_count ; }; struct __anonstruct_port_database_t_242 { uint8_t options ; uint8_t control ; uint8_t master_state ; uint8_t slave_state ; uint8_t reserved[2U] ; uint8_t hard_address ; uint8_t reserved_1 ; uint8_t port_id[4U] ; uint8_t node_name[8U] ; uint8_t port_name[8U] ; uint16_t execution_throttle ; uint16_t execution_count ; uint8_t reset_count ; uint8_t reserved_2 ; uint16_t resource_allocation ; uint16_t current_allocation ; uint16_t queue_head ; uint16_t queue_tail ; uint16_t transmit_execution_list_next ; uint16_t transmit_execution_list_previous ; uint16_t common_features ; uint16_t total_concurrent_sequences ; uint16_t RO_by_information_category ; uint8_t recipient ; uint8_t initiator ; uint16_t receive_data_size ; uint16_t concurrent_sequences ; uint16_t open_sequences_per_exchange ; uint16_t lun_abort_flags ; uint16_t lun_stop_flags ; uint16_t stop_queue_head ; uint16_t stop_queue_tail ; uint16_t port_retry_timer ; uint16_t next_sequence_id ; uint16_t frame_count ; uint16_t PRLI_payload_length ; uint8_t prli_svc_param_word_0[2U] ; uint8_t prli_svc_param_word_3[2U] ; uint16_t loop_id ; uint16_t extended_lun_info_list_pointer ; uint16_t extended_lun_stop_list_pointer ; }; typedef struct __anonstruct_port_database_t_242 port_database_t; struct link_statistics { uint32_t link_fail_cnt ; uint32_t loss_sync_cnt ; uint32_t loss_sig_cnt ; uint32_t prim_seq_err_cnt ; uint32_t inval_xmit_word_cnt ; uint32_t inval_crc_cnt ; uint32_t lip_cnt ; uint32_t unused1[26U] ; uint32_t tx_frames ; uint32_t rx_frames ; uint32_t discarded_frames ; uint32_t dropped_frames ; uint32_t unused2[1U] ; uint32_t nos_rcvd ; }; struct __anonstruct_sts_entry_t_256 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t scsi_status ; uint16_t comp_status ; uint16_t state_flags ; uint16_t status_flags ; uint16_t rsp_info_len ; uint16_t req_sense_length ; uint32_t residual_length ; uint8_t rsp_info[8U] ; uint8_t req_sense_data[32U] ; }; typedef struct __anonstruct_sts_entry_t_256 sts_entry_t; struct qla_port_24xx_data { uint8_t port_name[8U] ; uint16_t loop_id ; uint16_t reserved ; }; union __anonunion_p_397 { struct tsk_mgmt_entry tsk ; struct sts_entry_24xx sts ; }; struct tsk_mgmt_cmd { union __anonunion_p_397 p ; }; union __anonunion_p_398 { struct verify_chip_entry_84xx req ; struct verify_chip_rsp_84xx rsp ; }; struct cs84xx_mgmt_cmd { union __anonunion_p_398 p ; }; enum hrtimer_restart; enum blk_eh_timer_return; struct cmd_bidir { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t timeout ; uint16_t wr_dseg_count ; uint16_t rd_dseg_count ; struct scsi_lun lun ; uint16_t control_flags ; uint16_t fcp_cmnd_dseg_len ; uint32_t fcp_cmnd_dseg_address[2U] ; uint16_t reserved[2U] ; uint32_t rd_byte_count ; uint32_t wr_byte_count ; uint8_t port_id[3U] ; uint8_t vp_index ; uint32_t fcp_data_dseg_address[2U] ; uint16_t fcp_data_dseg_len ; }; struct cmd_type_6 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t timeout ; uint16_t dseg_count ; uint16_t fcp_rsp_dsd_len ; struct scsi_lun lun ; uint16_t control_flags ; uint16_t fcp_cmnd_dseg_len ; uint32_t fcp_cmnd_dseg_address[2U] ; uint32_t fcp_rsp_dseg_address[2U] ; uint32_t byte_count ; uint8_t port_id[3U] ; uint8_t vp_index ; uint32_t fcp_data_dseg_address[2U] ; uint32_t fcp_data_dseg_len ; }; struct cmd_type_7 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t timeout ; uint16_t dseg_count ; uint16_t reserved_1 ; struct scsi_lun lun ; uint16_t task_mgmt_flags ; uint8_t task ; uint8_t crn ; uint8_t fcp_cdb[16U] ; uint32_t byte_count ; uint8_t port_id[3U] ; uint8_t vp_index ; uint32_t dseg_0_address[2U] ; uint32_t dseg_0_len ; }; struct cmd_type_crc_2 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t timeout ; uint16_t dseg_count ; uint16_t fcp_rsp_dseg_len ; struct scsi_lun lun ; uint16_t control_flags ; uint16_t fcp_cmnd_dseg_len ; uint32_t fcp_cmnd_dseg_address[2U] ; uint32_t fcp_rsp_dseg_address[2U] ; uint32_t byte_count ; uint8_t port_id[3U] ; uint8_t vp_index ; uint32_t crc_context_address[2U] ; uint16_t crc_context_len ; uint16_t reserved_1 ; }; struct mrk_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint8_t modifier ; uint8_t reserved_1 ; uint8_t reserved_2 ; uint8_t vp_index ; uint16_t reserved_3 ; uint8_t lun[8U] ; uint8_t reserved_4[40U] ; }; struct ct_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t comp_status ; uint16_t nport_handle ; uint16_t cmd_dsd_count ; uint8_t vp_index ; uint8_t reserved_1 ; uint16_t timeout ; uint16_t reserved_2 ; uint16_t rsp_dsd_count ; uint8_t reserved_3[10U] ; uint32_t rsp_byte_count ; uint32_t cmd_byte_count ; uint32_t dseg_0_address[2U] ; uint32_t dseg_0_len ; uint32_t dseg_1_address[2U] ; uint32_t dseg_1_len ; }; struct els_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t reserved_1 ; uint16_t nport_handle ; uint16_t tx_dsd_count ; uint8_t vp_index ; uint8_t sof_type ; uint32_t rx_xchg_address ; uint16_t rx_dsd_count ; uint8_t opcode ; uint8_t reserved_2 ; uint8_t port_id[3U] ; uint8_t reserved_3 ; uint16_t reserved_4 ; uint16_t control_flags ; uint32_t rx_byte_count ; uint32_t tx_byte_count ; uint32_t tx_address[2U] ; uint32_t tx_len ; uint32_t rx_address[2U] ; uint32_t rx_len ; }; struct __anonstruct_cont_entry_t_251 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t reserved ; uint32_t dseg_0_address ; uint32_t dseg_0_length ; uint32_t dseg_1_address ; uint32_t dseg_1_length ; uint32_t dseg_2_address ; uint32_t dseg_2_length ; uint32_t dseg_3_address ; uint32_t dseg_3_length ; uint32_t dseg_4_address ; uint32_t dseg_4_length ; uint32_t dseg_5_address ; uint32_t dseg_5_length ; uint32_t dseg_6_address ; uint32_t dseg_6_length ; }; typedef struct __anonstruct_cont_entry_t_251 cont_entry_t; struct __anonstruct_cont_a64_entry_t_252 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t dseg_0_address[2U] ; uint32_t dseg_0_length ; uint32_t dseg_1_address[2U] ; uint32_t dseg_1_length ; uint32_t dseg_2_address[2U] ; uint32_t dseg_2_length ; uint32_t dseg_3_address[2U] ; uint32_t dseg_3_length ; uint32_t dseg_4_address[2U] ; uint32_t dseg_4_length ; }; typedef struct __anonstruct_cont_a64_entry_t_252 cont_a64_entry_t; struct __anonstruct_mrk_entry_t_260 { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t sys_define_2 ; target_id_t target ; uint8_t modifier ; uint8_t reserved_1 ; uint16_t sequence_number ; uint16_t lun ; uint8_t reserved_2[48U] ; }; typedef struct __anonstruct_mrk_entry_t_260 mrk_entry_t; struct mbx_entry { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define1 ; uint8_t entry_status ; uint32_t handle ; target_id_t loop_id ; uint16_t status ; uint16_t state_flags ; uint16_t status_flags ; uint32_t sys_define2[2U] ; uint16_t mb0 ; uint16_t mb1 ; uint16_t mb2 ; uint16_t mb3 ; uint16_t mb6 ; uint16_t mb7 ; uint16_t mb9 ; uint16_t mb10 ; uint32_t reserved_2[2U] ; uint8_t node_name[8U] ; uint8_t port_name[8U] ; }; struct mrk_entry_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t handle ; uint32_t handle_hi ; uint16_t tgt_id ; uint8_t modifier ; uint8_t reserved_1 ; uint8_t reserved_2[5U] ; uint8_t lun[8U] ; uint8_t reserved_3[36U] ; }; struct tsk_mgmt_entry_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; __le32 handle ; uint32_t handle_hi ; __le16 tgt_id ; uint16_t reserved_1 ; uint16_t delay ; __le16 timeout ; struct scsi_lun lun ; __le32 control_flags ; uint8_t reserved_2[32U] ; }; struct abort_iocb_entry_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; __le32 handle ; __le32 handle_hi ; __le16 tgt_id_sts ; __le16 options ; __le32 abort_handle ; __le32 abort_handle_hi ; __le16 req_que_no ; uint8_t reserved_1[38U] ; }; struct fxdisc_entry_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; __le32 handle ; __le32 reserved_0 ; __le16 func_num ; __le16 req_xfrcnt ; __le16 req_dsdcnt ; __le16 rsp_xfrcnt ; __le16 rsp_dsdcnt ; uint8_t flags ; uint8_t reserved_1 ; __le32 dseg_rq_address[2U] ; __le32 dseg_rq_len ; __le32 dseg_rsp_address[2U] ; __le32 dseg_rsp_len ; __le32 dataword ; __le32 adapid ; __le32 adapid_hi ; __le32 dataword_extra ; }; struct fw_dif_context { uint32_t ref_tag ; uint16_t app_tag ; uint8_t ref_tag_mask[4U] ; uint8_t app_tag_mask[2U] ; }; struct qla2_sgx { dma_addr_t dma_addr ; uint32_t dma_len ; uint32_t tot_bytes ; struct scatterlist *cur_sg ; uint32_t bytes_consumed ; uint32_t num_bytes ; uint32_t tot_partial ; uint32_t num_sg ; srb_t *sp ; }; enum hrtimer_restart; struct msix_entry { u32 vector ; u16 entry ; }; enum blk_eh_timer_return; struct els_sts_entry_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t comp_status ; uint16_t nport_handle ; uint16_t reserved_1 ; uint8_t vp_index ; uint8_t sof_type ; uint32_t rx_xchg_address ; uint16_t reserved_2 ; uint8_t opcode ; uint8_t reserved_3 ; uint8_t port_id[3U] ; uint8_t reserved_4 ; uint16_t reserved_5 ; uint16_t control_flags ; uint32_t total_byte_count ; uint32_t error_subcode_1 ; uint32_t error_subcode_2 ; }; struct sd_dif_tuple { __be16 guard_tag ; __be16 app_tag ; __be32 ref_tag ; }; struct __anonstruct_sts_cont_entry_t_257 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint8_t data[60U] ; }; typedef struct __anonstruct_sts_cont_entry_t_257 sts_cont_entry_t; struct __anonstruct_sts21_entry_t_258 { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint32_t handle[15U] ; }; typedef struct __anonstruct_sts21_entry_t_258 sts21_entry_t; struct __anonstruct_sts22_entry_t_259 { uint8_t entry_type ; uint8_t entry_count ; uint8_t handle_count ; uint8_t entry_status ; uint16_t handle[30U] ; }; typedef struct __anonstruct_sts22_entry_t_259 sts22_entry_t; struct scsi_sense_hdr { u8 response_code ; u8 sense_key ; u8 asc ; u8 ascq ; u8 byte4 ; u8 byte5 ; u8 byte6 ; u8 additional_length ; }; struct qla_init_msix_entry { char const *name ; irqreturn_t (*handler)(int , void * ) ; }; enum hrtimer_restart; enum blk_eh_timer_return; enum fc_tgtid_binding_type { FC_TGTID_BIND_NONE = 0, FC_TGTID_BIND_BY_WWPN = 1, FC_TGTID_BIND_BY_WWNN = 2, FC_TGTID_BIND_BY_ID = 3 } ; struct fc_host_attrs { u64 node_name ; u64 port_name ; u64 permanent_port_name ; u32 supported_classes ; u8 supported_fc4s[32U] ; u32 supported_speeds ; u32 maxframe_size ; u16 max_npiv_vports ; char serial_number[80U] ; char manufacturer[80U] ; char model[256U] ; char model_description[256U] ; char hardware_version[64U] ; char driver_version[64U] ; char firmware_version[64U] ; char optionrom_version[64U] ; u32 port_id ; enum fc_port_type port_type ; enum fc_port_state port_state ; u8 active_fc4s[32U] ; u32 speed ; u64 fabric_name ; char symbolic_name[256U] ; char system_hostname[256U] ; u32 dev_loss_tmo ; enum fc_tgtid_binding_type tgtid_bind_type ; struct list_head rports ; struct list_head rport_bindings ; struct list_head vports ; u32 next_rport_number ; u32 next_target_id ; u32 next_vport_number ; u16 npiv_vports_inuse ; char work_q_name[20U] ; struct workqueue_struct *work_q ; char devloss_work_q_name[20U] ; struct workqueue_struct *devloss_work_q ; struct request_queue *rqst_q ; }; struct __va_list_tag; typedef struct __va_list_tag __va_list_tag; typedef __builtin_va_list __gnuc_va_list; typedef __gnuc_va_list va_list; struct va_format { char const *fmt ; va_list *va ; }; enum hrtimer_restart; enum blk_eh_timer_return; struct qla2xxx_fce_chain { uint32_t type ; uint32_t chain_size ; uint32_t size ; uint32_t addr_l ; uint32_t addr_h ; uint32_t eregs[8U] ; }; struct qla2xxx_mq_chain { uint32_t type ; uint32_t chain_size ; uint32_t count ; uint32_t qregs[128U] ; }; struct qla2xxx_mqueue_header { uint32_t queue ; uint32_t number ; uint32_t size ; }; struct qla2xxx_mqueue_chain { uint32_t type ; uint32_t chain_size ; }; struct __anonstruct_aq_308 { int length ; void *ring ; }; struct __anonstruct_aqp_309 { int length ; void *ring ; }; enum hrtimer_restart; enum blk_eh_timer_return; struct fc_vport_identifiers { u64 node_name ; u64 port_name ; u32 roles ; bool disable ; enum fc_port_type vport_type ; char symbolic_name[64U] ; }; struct qla_fdt_layout { uint8_t sig[4U] ; uint16_t version ; uint16_t len ; uint16_t checksum ; uint8_t unused1[2U] ; uint8_t model[16U] ; uint16_t man_id ; uint16_t id ; uint8_t flags ; uint8_t erase_cmd ; uint8_t alt_erase_cmd ; uint8_t wrt_enable_cmd ; uint8_t wrt_enable_bits ; uint8_t wrt_sts_reg_cmd ; uint8_t unprotect_sec_cmd ; uint8_t read_man_id_cmd ; uint32_t block_size ; uint32_t alt_block_size ; uint32_t flash_size ; uint32_t wrt_enable_data ; uint8_t read_id_addr_len ; uint8_t wrt_disable_bits ; uint8_t read_dev_id_len ; uint8_t chip_erase_cmd ; uint16_t read_timeout ; uint8_t protect_sec_cmd ; uint8_t unused2[65U] ; }; struct qla_flt_location { uint8_t sig[4U] ; uint16_t start_lo ; uint16_t start_hi ; uint8_t version ; uint8_t unused[5U] ; uint16_t checksum ; }; struct qla_flt_header { uint16_t version ; uint16_t length ; uint16_t checksum ; uint16_t unused ; }; struct qla_flt_region { uint32_t code ; uint32_t size ; uint32_t start ; uint32_t end ; }; struct qla_npiv_header { uint8_t sig[2U] ; uint16_t version ; uint16_t entries ; uint16_t unused[4U] ; uint16_t checksum ; }; enum hrtimer_restart; enum blk_eh_timer_return; struct fc_starget_attrs { u64 node_name ; u64 port_name ; u32 port_id ; }; struct sysfs_entry { char *name ; struct bin_attribute *attr ; int is4GBp_only ; }; enum hrtimer_restart; enum blk_eh_timer_return; enum hrtimer_restart; enum blk_eh_timer_return; enum hrtimer_restart; enum blk_eh_timer_return; struct __anonstruct_mem_208 { uint32_t start_addr ; }; struct __anonstruct_config_209 { uint32_t id ; uint32_t param0 ; uint32_t param1 ; }; struct __anonstruct_info_210 { uint32_t type ; uint32_t context ; }; union __anonunion_u_207 { struct __anonstruct_mem_208 mem ; struct __anonstruct_config_209 config ; struct __anonstruct_info_210 info ; }; struct qla84_mgmt_param { union __anonunion_u_207 u ; }; struct qla84_msg_mgmt { uint16_t cmd ; uint16_t rsrvd ; struct qla84_mgmt_param mgmtp ; uint32_t len ; uint8_t payload[0U] ; }; struct qla_bsg_a84_mgmt { struct qla84_msg_mgmt mgmt ; }; struct qla_scsi_addr { uint16_t bus ; uint16_t target ; }; union __anonunion_dest_addr_211 { uint8_t wwnn[8U] ; uint8_t wwpn[8U] ; uint8_t id[4U] ; struct qla_scsi_addr scsi_addr ; }; struct qla_ext_dest_addr { union __anonunion_dest_addr_211 dest_addr ; uint16_t dest_type ; uint16_t lun ; uint16_t padding[2U] ; }; struct qla_port_param { struct qla_ext_dest_addr fc_scsi_addr ; uint16_t mode ; uint16_t speed ; }; struct qla_field_address { uint16_t offset ; uint16_t device ; uint16_t option ; }; struct qla_field_info { uint8_t version[36U] ; }; struct qla_image_version { struct qla_field_address field_address ; struct qla_field_info field_info ; }; struct qla_image_version_list { uint32_t count ; struct qla_image_version version[0U] ; }; struct qla_status_reg { struct qla_field_address field_address ; uint8_t status_reg ; uint8_t reserved[7U] ; }; struct qla_i2c_access { uint16_t device ; uint16_t offset ; uint16_t option ; uint16_t length ; uint8_t buffer[64U] ; }; struct access_chip_84xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_defined ; uint8_t entry_status ; uint32_t handle ; uint16_t options ; uint16_t reserved1 ; uint16_t dseg_count ; uint16_t reserved2[3U] ; uint32_t parameter1 ; uint32_t parameter2 ; uint32_t parameter3 ; uint32_t reserved3[3U] ; uint32_t total_byte_cnt ; uint32_t reserved4 ; uint32_t dseg_address[2U] ; uint32_t dseg_length ; }; struct qla_mt_iocb_rqst_fx00 { __le32 reserved_0 ; __le16 func_type ; uint8_t flags ; uint8_t reserved_1 ; __le32 dataword ; __le32 adapid ; __le32 adapid_hi ; __le32 dataword_extra ; __le16 req_len ; __le16 reserved_2 ; __le16 rsp_len ; __le16 reserved_3 ; }; typedef __u64 __le64; enum hrtimer_restart; struct ratelimit_state { raw_spinlock_t lock ; int interval ; int burst ; int printed ; int missed ; unsigned long begin ; }; enum blk_eh_timer_return; struct crb_128M_2M_sub_block_map { unsigned int valid ; unsigned int start_128M ; unsigned int end_128M ; unsigned int start_2M ; }; struct crb_128M_2M_block_map { struct crb_128M_2M_sub_block_map sub_block[16U] ; }; struct crb_addr_pair { long addr ; long data ; }; struct qla82xx_uri_table_desc { uint32_t findex ; uint32_t num_entries ; uint32_t entry_size ; uint32_t reserved[5U] ; }; struct qla82xx_uri_data_desc { uint32_t findex ; uint32_t size ; uint32_t reserved[5U] ; }; struct qla82xx_md_template_hdr { uint32_t entry_type ; uint32_t first_entry_offset ; uint32_t size_of_template ; uint32_t capture_debug_level ; uint32_t num_of_entries ; uint32_t version ; uint32_t driver_timestamp ; uint32_t template_checksum ; uint32_t driver_capture_mask ; uint32_t driver_info[3U] ; uint32_t saved_state_array[16U] ; uint32_t capture_size_array[8U] ; uint32_t markers_array[8U] ; uint32_t num_of_free_entries ; uint32_t free_entry_offset ; uint32_t total_table_size ; uint32_t bkup_table_offset ; }; struct __anonstruct_d_ctrl_212 { uint8_t entry_capture_mask ; uint8_t entry_code ; uint8_t driver_code ; uint8_t driver_flags ; }; struct qla82xx_md_entry_hdr { uint32_t entry_type ; uint32_t entry_size ; uint32_t entry_capture_size ; struct __anonstruct_d_ctrl_212 d_ctrl ; }; typedef struct qla82xx_md_entry_hdr qla82xx_md_entry_hdr_t; struct __anonstruct_crb_strd_213 { uint8_t addr_stride ; uint8_t state_index_a ; uint16_t poll_timeout ; }; struct __anonstruct_crb_ctrl_214 { uint8_t opcode ; uint8_t state_index_v ; uint8_t shl ; uint8_t shr ; }; struct qla82xx_md_entry_crb { qla82xx_md_entry_hdr_t h ; uint32_t addr ; struct __anonstruct_crb_strd_213 crb_strd ; uint32_t data_size ; uint32_t op_count ; struct __anonstruct_crb_ctrl_214 crb_ctrl ; uint32_t value_1 ; uint32_t value_2 ; uint32_t value_3 ; }; struct __anonstruct_addr_ctrl_215 { uint16_t tag_value_stride ; uint16_t init_tag_value ; }; struct __anonstruct_cache_ctrl_216 { uint16_t write_value ; uint8_t poll_mask ; uint8_t poll_wait ; }; struct __anonstruct_read_ctrl_217 { uint8_t read_addr_stride ; uint8_t read_addr_cnt ; uint16_t rsvd_1 ; }; struct qla82xx_md_entry_cache { qla82xx_md_entry_hdr_t h ; uint32_t tag_reg_addr ; struct __anonstruct_addr_ctrl_215 addr_ctrl ; uint32_t data_size ; uint32_t op_count ; uint32_t control_addr ; struct __anonstruct_cache_ctrl_216 cache_ctrl ; uint32_t read_addr ; struct __anonstruct_read_ctrl_217 read_ctrl ; }; struct qla82xx_md_entry_rdocm { qla82xx_md_entry_hdr_t h ; uint32_t rsvd_0 ; uint32_t rsvd_1 ; uint32_t data_size ; uint32_t op_count ; uint32_t rsvd_2 ; uint32_t rsvd_3 ; uint32_t read_addr ; uint32_t read_addr_stride ; uint32_t read_addr_cntrl ; }; struct qla82xx_md_entry_rdmem { qla82xx_md_entry_hdr_t h ; uint32_t rsvd[6U] ; uint32_t read_addr ; uint32_t read_data_size ; }; struct qla82xx_md_entry_rdrom { qla82xx_md_entry_hdr_t h ; uint32_t rsvd[6U] ; uint32_t read_addr ; uint32_t read_data_size ; }; struct qla82xx_md_entry_mux { qla82xx_md_entry_hdr_t h ; uint32_t select_addr ; uint32_t rsvd_0 ; uint32_t data_size ; uint32_t op_count ; uint32_t select_value ; uint32_t select_value_stride ; uint32_t read_addr ; uint32_t rsvd_1 ; }; struct __anonstruct_q_strd_218 { uint16_t queue_id_stride ; uint16_t rsvd_0 ; }; struct __anonstruct_rd_strd_219 { uint8_t read_addr_stride ; uint8_t read_addr_cnt ; uint16_t rsvd_3 ; }; struct qla82xx_md_entry_queue { qla82xx_md_entry_hdr_t h ; uint32_t select_addr ; struct __anonstruct_q_strd_218 q_strd ; uint32_t data_size ; uint32_t op_count ; uint32_t rsvd_1 ; uint32_t rsvd_2 ; uint32_t read_addr ; struct __anonstruct_rd_strd_219 rd_strd ; }; struct crb_addr_pair___0 { long addr ; long data ; }; typedef __kernel_long_t __kernel_suseconds_t; struct timeval { __kernel_time_t tv_sec ; __kernel_suseconds_t tv_usec ; }; enum hrtimer_restart; enum blk_eh_timer_return; struct init_cb_fx { uint16_t version ; uint16_t reserved_1[13U] ; __le16 request_q_outpointer ; __le16 response_q_inpointer ; uint16_t reserved_2[2U] ; __le16 response_q_length ; __le16 request_q_length ; uint16_t reserved_3[2U] ; __le32 request_q_address[2U] ; __le32 response_q_address[2U] ; uint16_t reserved_4[4U] ; uint8_t response_q_msivec ; uint8_t reserved_5[19U] ; uint16_t interrupt_delay_timer ; uint16_t reserved_6 ; uint32_t fwoptions1 ; uint32_t fwoptions2 ; uint32_t fwoptions3 ; uint8_t reserved_7[24U] ; }; struct cmd_type_7_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint32_t handle_hi ; __le16 tgt_idx ; uint16_t timeout ; __le16 dseg_count ; uint16_t scsi_rsp_dsd_len ; struct scsi_lun lun ; uint8_t cntrl_flags ; uint8_t task_mgmt_flags ; uint8_t task ; uint8_t crn ; uint8_t fcp_cdb[16U] ; __le32 byte_count ; uint32_t dseg_0_address[2U] ; uint32_t dseg_0_len ; }; struct sts_entry_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint32_t handle_hi ; __le16 comp_status ; uint16_t reserved_0 ; __le32 residual_len ; uint16_t reserved_1 ; uint16_t state_flags ; uint16_t reserved_2 ; __le16 scsi_status ; uint32_t sense_len ; uint8_t data[32U] ; }; struct multi_sts_entry_fx00 { uint8_t entry_type ; uint8_t sys_define ; uint8_t handle_count ; uint8_t entry_status ; __le32 handles[15U] ; }; struct ioctl_iocb_entry_fx00 { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint32_t reserved_0 ; uint16_t comp_func_num ; __le16 fw_iotcl_flags ; __le32 dataword_r ; uint32_t adapid ; uint32_t adapid_hi ; uint32_t reserved_1 ; __le32 seq_no ; uint8_t reserved_2[20U] ; uint32_t residuallen ; __le32 status ; }; struct qlafx00_tgt_node_info { uint8_t tgt_node_wwpn[8U] ; uint8_t tgt_node_wwnn[8U] ; uint32_t tgt_node_state ; uint8_t reserved[128U] ; uint32_t reserved_1[8U] ; uint64_t reserved_2[4U] ; }; struct port_info_data { uint8_t port_state ; uint8_t port_type ; uint16_t port_identifier ; uint32_t up_port_state ; uint8_t fw_ver_num[32U] ; uint8_t portal_attrib ; uint16_t host_option ; uint8_t reset_delay ; uint8_t pdwn_retry_cnt ; uint16_t max_luns2tgt ; uint8_t risc_ver ; uint8_t pconn_option ; uint16_t risc_option ; uint16_t max_frame_len ; uint16_t max_iocb_alloc ; uint16_t exec_throttle ; uint8_t retry_cnt ; uint8_t retry_delay ; uint8_t port_name[8U] ; uint8_t port_id[3U] ; uint8_t link_status ; uint8_t plink_rate ; uint32_t link_config ; uint16_t adap_haddr ; uint8_t tgt_disc ; uint8_t log_tout ; uint8_t node_name[8U] ; uint16_t erisc_opt1 ; uint8_t resp_acc_tmr ; uint8_t intr_del_tmr ; uint8_t erisc_opt2 ; uint8_t alt_port_name[8U] ; uint8_t alt_node_name[8U] ; uint8_t link_down_tout ; uint8_t conn_type ; uint8_t fc_fw_mode ; uint32_t uiReserved[48U] ; }; struct host_system_info { uint32_t os_type ; char sysname[128U] ; char nodename[64U] ; char release[64U] ; char version[64U] ; char machine[64U] ; char domainname[64U] ; char hostdriver[64U] ; uint32_t reserved[64U] ; }; struct register_host_info { struct host_system_info hsi ; uint64_t utc ; uint32_t reserved[64U] ; }; struct config_info_data { uint8_t product_name[256U] ; uint8_t symbolic_name[64U] ; uint8_t serial_num[32U] ; uint8_t hw_version[16U] ; uint8_t fw_version[16U] ; uint8_t uboot_version[16U] ; uint8_t fru_serial_num[32U] ; uint8_t fc_port_count ; uint8_t iscsi_port_count ; uint8_t reserved1[2U] ; uint8_t mode ; uint8_t log_level ; uint8_t reserved2[2U] ; uint32_t log_size ; uint8_t tgt_pres_mode ; uint8_t iqn_flags ; uint8_t lun_mapping ; uint64_t adapter_id ; uint32_t cluster_key_len ; uint8_t cluster_key[16U] ; uint64_t cluster_master_id ; uint64_t cluster_slave_id ; uint8_t cluster_flags ; uint32_t enabled_capabilities ; uint32_t nominal_temp_value ; }; struct qla_mt_iocb_rsp_fx00 { uint32_t reserved_1 ; uint16_t func_type ; __le16 ioctl_flags ; __le32 ioctl_data ; uint32_t adapid ; uint32_t adapid_hi ; uint32_t reserved_2 ; __le32 seq_number ; uint8_t reserved_3[20U] ; int32_t res_count ; __le32 status ; }; struct new_utsname { char sysname[65U] ; char nodename[65U] ; char release[65U] ; char version[65U] ; char machine[65U] ; char domainname[65U] ; }; struct uts_namespace { struct kref kref ; struct new_utsname name ; struct user_namespace *user_ns ; unsigned int proc_inum ; }; enum hrtimer_restart; enum blk_eh_timer_return; struct qla8044_reset_entry_hdr { uint16_t cmd ; uint16_t size ; uint16_t count ; uint16_t delay ; }; struct qla8044_poll { uint32_t test_mask ; uint32_t test_value ; }; struct qla8044_rmw { uint32_t test_mask ; uint32_t xor_value ; uint32_t or_value ; uint8_t shl ; uint8_t shr ; uint8_t index_a ; uint8_t rsvd ; }; struct qla8044_entry { uint32_t arg1 ; uint32_t arg2 ; }; struct qla8044_quad_entry { uint32_t dr_addr ; uint32_t dr_value ; uint32_t ar_addr ; uint32_t ar_value ; }; struct __anonstruct_d_ctrl_220 { uint8_t entry_capture_mask ; uint8_t entry_code ; uint8_t driver_code ; uint8_t driver_flags ; }; struct qla8044_minidump_entry_hdr { uint32_t entry_type ; uint32_t entry_size ; uint32_t entry_capture_size ; struct __anonstruct_d_ctrl_220 d_ctrl ; }; struct __anonstruct_crb_strd_221 { uint8_t addr_stride ; uint8_t state_index_a ; uint16_t poll_timeout ; }; struct __anonstruct_crb_ctrl_222 { uint8_t opcode ; uint8_t state_index_v ; uint8_t shl ; uint8_t shr ; }; struct qla8044_minidump_entry_crb { struct qla8044_minidump_entry_hdr h ; uint32_t addr ; struct __anonstruct_crb_strd_221 crb_strd ; uint32_t data_size ; uint32_t op_count ; struct __anonstruct_crb_ctrl_222 crb_ctrl ; uint32_t value_1 ; uint32_t value_2 ; uint32_t value_3 ; }; struct __anonstruct_addr_ctrl_223 { uint16_t tag_value_stride ; uint16_t init_tag_value ; }; struct __anonstruct_cache_ctrl_224 { uint16_t write_value ; uint8_t poll_mask ; uint8_t poll_wait ; }; struct __anonstruct_read_ctrl_225 { uint8_t read_addr_stride ; uint8_t read_addr_cnt ; uint16_t rsvd_1 ; }; struct qla8044_minidump_entry_cache { struct qla8044_minidump_entry_hdr h ; uint32_t tag_reg_addr ; struct __anonstruct_addr_ctrl_223 addr_ctrl ; uint32_t data_size ; uint32_t op_count ; uint32_t control_addr ; struct __anonstruct_cache_ctrl_224 cache_ctrl ; uint32_t read_addr ; struct __anonstruct_read_ctrl_225 read_ctrl ; }; struct qla8044_minidump_entry_rdocm { struct qla8044_minidump_entry_hdr h ; uint32_t rsvd_0 ; uint32_t rsvd_1 ; uint32_t data_size ; uint32_t op_count ; uint32_t rsvd_2 ; uint32_t rsvd_3 ; uint32_t read_addr ; uint32_t read_addr_stride ; }; struct qla8044_minidump_entry_rdmem { struct qla8044_minidump_entry_hdr h ; uint32_t rsvd[6U] ; uint32_t read_addr ; uint32_t read_data_size ; }; struct qla8044_minidump_entry_rdmem_pex_dma { struct qla8044_minidump_entry_hdr h ; uint32_t desc_card_addr ; uint16_t dma_desc_cmd ; uint8_t rsvd[2U] ; uint32_t start_dma_cmd ; uint8_t rsvd2[12U] ; uint32_t read_addr ; uint32_t read_data_size ; }; struct qla8044_minidump_entry_rdrom { struct qla8044_minidump_entry_hdr h ; uint32_t rsvd[6U] ; uint32_t read_addr ; uint32_t read_data_size ; }; struct qla8044_minidump_entry_mux { struct qla8044_minidump_entry_hdr h ; uint32_t select_addr ; uint32_t rsvd_0 ; uint32_t data_size ; uint32_t op_count ; uint32_t select_value ; uint32_t select_value_stride ; uint32_t read_addr ; uint32_t rsvd_1 ; }; struct __anonstruct_q_strd_226 { uint16_t queue_id_stride ; uint16_t rsvd_0 ; }; struct __anonstruct_rd_strd_227 { uint8_t read_addr_stride ; uint8_t read_addr_cnt ; uint16_t rsvd_3 ; }; struct qla8044_minidump_entry_queue { struct qla8044_minidump_entry_hdr h ; uint32_t select_addr ; struct __anonstruct_q_strd_226 q_strd ; uint32_t data_size ; uint32_t op_count ; uint32_t rsvd_1 ; uint32_t rsvd_2 ; uint32_t read_addr ; struct __anonstruct_rd_strd_227 rd_strd ; }; struct qla8044_minidump_entry_pollrd { struct qla8044_minidump_entry_hdr h ; uint32_t select_addr ; uint32_t read_addr ; uint32_t select_value ; uint16_t select_value_stride ; uint16_t op_count ; uint32_t poll_wait ; uint32_t poll_mask ; uint32_t data_size ; uint32_t rsvd_1 ; }; struct qla8044_minidump_entry_rdmux2 { struct qla8044_minidump_entry_hdr h ; uint32_t select_addr_1 ; uint32_t select_addr_2 ; uint32_t select_value_1 ; uint32_t select_value_2 ; uint32_t op_count ; uint32_t select_value_mask ; uint32_t read_addr ; uint8_t select_value_stride ; uint8_t data_size ; uint8_t rsvd[2U] ; }; struct qla8044_minidump_entry_pollrdmwr { struct qla8044_minidump_entry_hdr h ; uint32_t addr_1 ; uint32_t addr_2 ; uint32_t value_1 ; uint32_t value_2 ; uint32_t poll_wait ; uint32_t poll_mask ; uint32_t modify_mask ; uint32_t data_size ; }; struct qla8044_minidump_template_hdr { uint32_t entry_type ; uint32_t first_entry_offset ; uint32_t size_of_template ; uint32_t capture_debug_level ; uint32_t num_of_entries ; uint32_t version ; uint32_t driver_timestamp ; uint32_t checksum ; uint32_t driver_capture_mask ; uint32_t driver_info_word2 ; uint32_t driver_info_word3 ; uint32_t driver_info_word4 ; uint32_t saved_state_array[16U] ; uint32_t capture_size_array[8U] ; uint32_t ocm_window_reg[16U] ; }; struct __anonstruct_cmd_228 { uint32_t read_data_size ; uint8_t rsvd[2U] ; uint16_t dma_desc_cmd ; }; struct qla8044_pex_dma_descriptor { struct __anonstruct_cmd_228 cmd ; uint64_t src_addr ; uint64_t dma_bus_addr ; uint8_t rsvd[24U] ; }; typedef __u64 __be64; struct _ddebug { char const *modname ; char const *function ; char const *filename ; char const *format ; unsigned int lineno : 18 ; unsigned char flags ; }; struct __wait_queue; typedef struct __wait_queue wait_queue_t; struct __wait_queue { unsigned int flags ; void *private ; int (*func)(wait_queue_t * , unsigned int , int , void * ) ; struct list_head task_list ; }; enum hrtimer_restart; struct se_tpg_np { struct se_portal_group *tpg_np_parent ; struct config_group tpg_np_group ; }; struct target_core_fabric_ops { struct configfs_subsystem *tf_subsys ; char *(*get_fabric_name)(void) ; u8 (*get_fabric_proto_ident)(struct se_portal_group * ) ; char *(*tpg_get_wwn)(struct se_portal_group * ) ; u16 (*tpg_get_tag)(struct se_portal_group * ) ; u32 (*tpg_get_default_depth)(struct se_portal_group * ) ; u32 (*tpg_get_pr_transport_id)(struct se_portal_group * , struct se_node_acl * , struct t10_pr_registration * , int * , unsigned char * ) ; u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group * , struct se_node_acl * , struct t10_pr_registration * , int * ) ; char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group * , char const * , u32 * , char ** ) ; int (*tpg_check_demo_mode)(struct se_portal_group * ) ; int (*tpg_check_demo_mode_cache)(struct se_portal_group * ) ; int (*tpg_check_demo_mode_write_protect)(struct se_portal_group * ) ; int (*tpg_check_prod_mode_write_protect)(struct se_portal_group * ) ; int (*tpg_check_demo_mode_login_only)(struct se_portal_group * ) ; struct se_node_acl *(*tpg_alloc_fabric_acl)(struct se_portal_group * ) ; void (*tpg_release_fabric_acl)(struct se_portal_group * , struct se_node_acl * ) ; u32 (*tpg_get_inst_index)(struct se_portal_group * ) ; int (*check_stop_free)(struct se_cmd * ) ; void (*release_cmd)(struct se_cmd * ) ; void (*put_session)(struct se_session * ) ; int (*shutdown_session)(struct se_session * ) ; void (*close_session)(struct se_session * ) ; u32 (*sess_get_index)(struct se_session * ) ; u32 (*sess_get_initiator_sid)(struct se_session * , unsigned char * , u32 ) ; int (*write_pending)(struct se_cmd * ) ; int (*write_pending_status)(struct se_cmd * ) ; void (*set_default_node_attributes)(struct se_node_acl * ) ; u32 (*get_task_tag)(struct se_cmd * ) ; int (*get_cmd_state)(struct se_cmd * ) ; int (*queue_data_in)(struct se_cmd * ) ; int (*queue_status)(struct se_cmd * ) ; void (*queue_tm_rsp)(struct se_cmd * ) ; struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs * , struct config_group * , char const * ) ; void (*fabric_drop_wwn)(struct se_wwn * ) ; struct se_portal_group *(*fabric_make_tpg)(struct se_wwn * , struct config_group * , char const * ) ; void (*fabric_drop_tpg)(struct se_portal_group * ) ; int (*fabric_post_link)(struct se_portal_group * , struct se_lun * ) ; void (*fabric_pre_unlink)(struct se_portal_group * , struct se_lun * ) ; struct se_tpg_np *(*fabric_make_np)(struct se_portal_group * , struct config_group * , char const * ) ; void (*fabric_drop_np)(struct se_tpg_np * ) ; struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group * , struct config_group * , char const * ) ; void (*fabric_drop_nodeacl)(struct se_node_acl * ) ; }; struct __anonstruct_isp2x_385 { uint32_t sys_define_2 ; target_id_t target ; uint8_t target_id ; uint8_t reserved_1 ; uint16_t flags ; uint16_t resp_code ; uint16_t status ; uint16_t task_flags ; uint16_t seq_id ; uint16_t srr_rx_id ; uint32_t srr_rel_offs ; uint16_t srr_ui ; uint16_t srr_flags ; uint16_t srr_reject_code ; uint8_t srr_reject_vendor_uniq ; uint8_t srr_reject_code_expl ; uint8_t reserved_2[24U] ; }; struct __anonstruct_isp24_386 { uint32_t handle ; uint16_t nport_handle ; uint16_t reserved_1 ; uint16_t flags ; uint16_t srr_rx_id ; uint16_t status ; uint8_t status_subcode ; uint8_t fw_handle ; uint32_t exchange_address ; uint32_t srr_rel_offs ; uint16_t srr_ui ; uint16_t srr_flags ; uint8_t reserved_4[19U] ; uint8_t vp_index ; uint8_t srr_reject_vendor_uniq ; uint8_t srr_reject_code_expl ; uint8_t srr_reject_code ; uint8_t reserved_5[5U] ; }; union __anonunion_u_384 { struct __anonstruct_isp2x_385 isp2x ; struct __anonstruct_isp24_386 isp24 ; }; struct nack_to_isp { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; union __anonunion_u_384 u ; uint8_t reserved[2U] ; uint16_t ox_id ; }; struct ctio_to_2xxx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; target_id_t target ; uint16_t rx_id ; uint16_t flags ; uint16_t status ; uint16_t timeout ; uint16_t dseg_count ; uint32_t relative_offset ; uint32_t residual ; uint16_t reserved_1[3U] ; uint16_t scsi_status ; uint32_t transfer_length ; uint32_t dseg_0_address ; uint32_t dseg_0_length ; uint32_t dseg_1_address ; uint32_t dseg_1_length ; uint32_t dseg_2_address ; uint32_t dseg_2_length ; }; struct __anonstruct_status0_392 { uint16_t reserved1 ; uint16_t flags ; uint32_t residual ; uint16_t ox_id ; uint16_t scsi_status ; uint32_t relative_offset ; uint32_t reserved2 ; uint32_t transfer_length ; uint32_t reserved3 ; uint32_t dseg_0_address[2U] ; uint32_t dseg_0_length ; }; struct __anonstruct_status1_393 { uint16_t sense_length ; uint16_t flags ; uint32_t residual ; uint16_t ox_id ; uint16_t scsi_status ; uint16_t response_len ; uint16_t reserved ; uint8_t sense_data[24U] ; }; union __anonunion_u_391 { struct __anonstruct_status0_392 status0 ; struct __anonstruct_status1_393 status1 ; }; struct ctio7_to_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t nport_handle ; uint16_t timeout ; uint16_t dseg_count ; uint8_t vp_index ; uint8_t add_flags ; uint8_t initiator_id[3U] ; uint8_t reserved ; uint32_t exchange_addr ; union __anonunion_u_391 u ; }; struct ctio7_from_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t status ; uint16_t timeout ; uint16_t dseg_count ; uint8_t vp_index ; uint8_t reserved1[5U] ; uint32_t exchange_address ; uint16_t reserved2 ; uint16_t flags ; uint32_t residual ; uint16_t ox_id ; uint16_t reserved3 ; uint32_t relative_offset ; uint8_t reserved4[24U] ; }; struct ba_acc_le { uint16_t reserved ; uint8_t seq_id_last ; uint8_t seq_id_valid ; uint16_t rx_id ; uint16_t ox_id ; uint16_t high_seq_cnt ; uint16_t low_seq_cnt ; }; struct ba_rjt_le { uint8_t vendor_uniq ; uint8_t reason_expl ; uint8_t reason_code ; uint8_t reserved ; }; union __anonunion_payload_394 { struct ba_acc_le ba_acct ; struct ba_rjt_le ba_rjt ; }; struct abts_resp_to_24xx { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t reserved_1 ; uint16_t nport_handle ; uint16_t control_flags ; uint8_t vp_index ; unsigned char reserved_3 : 4 ; unsigned char sof_type : 4 ; uint32_t exchange_address ; struct fcp_hdr_le fcp_hdr_le ; union __anonunion_payload_394 payload ; uint32_t reserved_4 ; uint32_t exchange_addr_to_abort ; }; struct abts_resp_from_24xx_fw { uint8_t entry_type ; uint8_t entry_count ; uint8_t sys_define ; uint8_t entry_status ; uint32_t handle ; uint16_t compl_status ; uint16_t nport_handle ; uint16_t reserved_1 ; uint8_t reserved_2 ; unsigned char reserved_3 : 4 ; unsigned char sof_type : 4 ; uint32_t exchange_address ; struct fcp_hdr_le fcp_hdr_le ; uint8_t reserved_4[8U] ; uint32_t error_subcode1 ; uint32_t error_subcode2 ; uint32_t exchange_addr_to_abort ; }; union __anonunion_ldv_60976_395 { struct abts_recv_from_24xx abts ; struct imm_ntfy_from_isp tm_iocb ; struct atio_from_isp tm_iocb2 ; }; struct qla_tgt_sess_work_param { struct list_head sess_works_list_entry ; int type ; union __anonunion_ldv_60976_395 ldv_60976 ; }; struct qla_tgt_prm { struct qla_tgt_cmd *cmd ; struct qla_tgt *tgt ; void *pkt ; struct scatterlist *sg ; int seg_cnt ; int req_cnt ; uint16_t rq_result ; uint16_t scsi_status ; unsigned char *sense_buffer ; int sense_buffer_len ; int residual ; int add_status_pkt ; }; struct qla_tgt_srr_imm { struct list_head srr_list_entry ; int srr_id ; struct imm_ntfy_from_isp imm_ntfy ; }; struct qla_tgt_srr_ctio { struct list_head srr_list_entry ; int srr_id ; struct qla_tgt_cmd *cmd ; }; long ldv__builtin_expect(long exp , long c ) ; extern struct module __this_module ; extern struct pv_irq_ops pv_irq_ops ; __inline static void set_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return; } } __inline static void clear_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr)); return; } } __inline static int test_and_set_bit(long nr , unsigned long volatile *addr ) { int oldbit ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %2,%1\n\tsbb %0,%0": "=r" (oldbit), "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return (oldbit); } } __inline static int test_and_clear_bit(long nr , unsigned long volatile *addr ) { int oldbit ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %2,%1\n\tsbb %0,%0": "=r" (oldbit), "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return (oldbit); } } __inline static int constant_test_bit(long nr , unsigned long const volatile *addr ) { { return ((int )((unsigned long )*(addr + (unsigned long )(nr >> 6)) >> ((int )nr & 63)) & 1); } } __inline static __u32 __arch_swab32(__u32 val ) { { __asm__ ("bswapl %0": "=r" (val): "0" (val)); return (val); } } __inline static __u32 __fswab32(__u32 val ) { __u32 tmp ; { tmp = __arch_swab32(val); return (tmp); } } extern int sprintf(char * , char const * , ...) ; extern int snprintf(char * , size_t , char const * , ...) ; extern void warn_slowpath_null(char const * , int const ) ; extern void __bad_percpu_size(void) ; extern struct task_struct *current_task ; __inline static struct task_struct *get_current(void) { struct task_struct *pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& current_task)); goto ldv_2858; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_2858; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_2858; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_2858; default: __bad_percpu_size(); } ldv_2858: ; return (pfo_ret__); } } extern void *__memcpy(void * , void const * , size_t ) ; extern void *memset(void * , int , size_t ) ; extern char *strcpy(char * , char const * ) ; extern char *strcat(char * , char const * ) ; extern int __bitmap_weight(unsigned long const * , int ) ; __inline static int bitmap_weight(unsigned long const *src , int nbits ) { int tmp___0 ; { tmp___0 = __bitmap_weight(src, nbits); return (tmp___0); } } extern int nr_cpu_ids ; extern struct cpumask const * const cpu_online_mask ; __inline static unsigned int cpumask_weight(struct cpumask const *srcp ) { int tmp ; { tmp = bitmap_weight((unsigned long const *)(& srcp->bits), nr_cpu_ids); return ((unsigned int )tmp); } } __inline static unsigned long arch_local_save_flags(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-3.12-rc1.tar.xz/linux-3.12-rc1/arch/x86/include/asm/paravirt.h"), "i" (804), "i" (12UL)); ldv_4635: ; goto ldv_4635; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (44UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static long PTR_ERR(void const *ptr ) { { return ((long )ptr); } } __inline static long IS_ERR(void const *ptr ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )ptr > 0xfffffffffffff000UL, 0L); return (tmp); } } __inline static int arch_irqs_disabled_flags(unsigned long flags ) { { return ((flags & 512UL) == 0UL); } } __inline static void rep_nop(void) { { __asm__ volatile ("rep; nop": : : "memory"); return; } } __inline static void cpu_relax(void) { { rep_nop(); return; } } extern void __xchg_wrong_size(void) ; __inline static int atomic_read(atomic_t const *v ) { { return ((int )*((int volatile *)(& v->counter))); } } __inline static void atomic_set(atomic_t *v , int i ) { { v->counter = i; return; } } __inline static void atomic_inc(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; incl %0": "+m" (v->counter)); return; } } __inline static void atomic_dec(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0": "+m" (v->counter)); return; } } __inline static int atomic_dec_and_test(atomic_t *v ) { unsigned char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0; sete %1": "+m" (v->counter), "=qm" (c): : "memory"); return ((unsigned int )c != 0U); } } extern unsigned long kernel_stack ; __inline static struct thread_info *current_thread_info(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_5675; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_5675; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_5675; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_5675; default: __bad_percpu_size(); } ldv_5675: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } __inline static void INIT_LIST_HEAD(struct list_head *list ) { { list->next = list; list->prev = list; return; } } extern void __list_add(struct list_head * , struct list_head * , struct list_head * ) ; __inline static void list_add_tail(struct list_head *new , struct list_head *head ) { { __list_add(new, head->prev, head); return; } } extern void __list_del_entry(struct list_head * ) ; extern void list_del(struct list_head * ) ; __inline static void list_del_init(struct list_head *entry ) { { __list_del_entry(entry); INIT_LIST_HEAD(entry); return; } } __inline static int list_empty(struct list_head const *head ) { { return ((unsigned long )((struct list_head const *)head->next) == (unsigned long )head); } } __inline static void __list_splice(struct list_head const *list , struct list_head *prev , struct list_head *next ) { struct list_head *first ; struct list_head *last ; { first = list->next; last = list->prev; first->prev = prev; prev->next = first; last->next = next; next->prev = last; return; } } __inline static void list_splice(struct list_head const *list , struct list_head *head ) { int tmp ; { tmp = list_empty(list); if (tmp == 0) { __list_splice(list, head, head->next); } else { } return; } } __inline static void list_splice_init(struct list_head *list , struct list_head *head ) { int tmp ; { tmp = list_empty((struct list_head const *)list); if (tmp == 0) { __list_splice((struct list_head const *)list, head, head->next); INIT_LIST_HEAD(list); } else { } return; } } extern void lockdep_init_map(struct lockdep_map * , char const * , struct lock_class_key * , int ) ; extern void __raw_spin_lock_init(raw_spinlock_t * , char const * , struct lock_class_key * ) ; extern unsigned long _raw_spin_lock_irqsave(raw_spinlock_t * ) ; extern void _raw_spin_unlock_irqrestore(raw_spinlock_t * , unsigned long ) ; __inline static raw_spinlock_t *spinlock_check(spinlock_t *lock ) { { return (& lock->ldv_6105.rlock); } } __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) { { _raw_spin_unlock_irqrestore(& lock->ldv_6105.rlock, flags); return; } } extern void __init_waitqueue_head(wait_queue_head_t * , char const * , struct lock_class_key * ) ; extern void __mutex_init(struct mutex * , char const * , struct lock_class_key * ) ; extern void mutex_lock_nested(struct mutex * , unsigned int ) ; extern void mutex_unlock(struct mutex * ) ; __inline static void init_completion(struct completion *x ) { struct lock_class_key __key ; { x->done = 0U; __init_waitqueue_head(& x->wait, "&x->wait", & __key); return; } } extern void complete(struct completion * ) ; extern unsigned long volatile jiffies ; extern int del_timer(struct timer_list * ) ; int ldv_del_timer_1(struct timer_list *ldv_func_arg1 ) ; extern int mod_timer(struct timer_list * , unsigned long ) ; int ldv_mod_timer_3(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; extern void add_timer(struct timer_list * ) ; extern int del_timer_sync(struct timer_list * ) ; int ldv_del_timer_sync_4(struct timer_list *ldv_func_arg1 ) ; extern void __init_work(struct work_struct * , int ) ; extern struct workqueue_struct *__alloc_workqueue_key(char const * , unsigned int , int , struct lock_class_key * , char const * , ...) ; extern void destroy_workqueue(struct workqueue_struct * ) ; extern bool queue_work_on(int , struct workqueue_struct * , struct work_struct * ) ; extern void flush_workqueue(struct workqueue_struct * ) ; extern bool cancel_work_sync(struct work_struct * ) ; __inline static bool queue_work(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = queue_work_on(4096, wq, work); return (tmp); } } __inline static unsigned short readw(void const volatile *addr ) { unsigned short ret ; { __asm__ volatile ("movw %1,%0": "=r" (ret): "m" (*((unsigned short volatile *)addr)): "memory"); return (ret); } } __inline static unsigned int readl(void const volatile *addr ) { unsigned int ret ; { __asm__ volatile ("movl %1,%0": "=r" (ret): "m" (*((unsigned int volatile *)addr)): "memory"); return (ret); } } __inline static void writew(unsigned short val , void volatile *addr ) { { __asm__ volatile ("movw %0,%1": : "r" (val), "m" (*((unsigned short volatile *)addr)): "memory"); return; } } __inline static void writel(unsigned int val , void volatile *addr ) { { __asm__ volatile ("movl %0,%1": : "r" (val), "m" (*((unsigned int volatile *)addr)): "memory"); return; } } extern void *ioremap_nocache(resource_size_t , unsigned long ) ; __inline static void *ioremap(resource_size_t offset , unsigned long size ) { void *tmp ; { tmp = ioremap_nocache(offset, size); return (tmp); } } extern void iounmap(void volatile * ) ; extern void vfree(void const * ) ; extern struct kmem_cache *kmem_cache_create(char const * , size_t , size_t , unsigned long , void (*)(void * ) ) ; extern void kmem_cache_destroy(struct kmem_cache * ) ; extern void kfree(void const * ) ; extern void *__kmalloc(size_t , gfp_t ) ; __inline static char const *kobject_name(struct kobject const *kobj ) { { return ((char const *)kobj->name); } } extern int kobject_uevent_env(struct kobject * , enum kobject_action , char ** ) ; __inline static void *kmalloc(size_t size , gfp_t flags ) { void *tmp___2 ; { tmp___2 = __kmalloc(size, flags); return (tmp___2); } } __inline static void *kzalloc(size_t size , gfp_t flags ) { void *tmp ; { tmp = kmalloc(size, flags | 32768U); return (tmp); } } extern void *malloc(size_t size ) ; extern void *calloc(size_t nmemb , size_t size ) ; extern int __VERIFIER_nondet_int(void) ; extern loff_t __VERIFIER_nondet_loff_t(void) ; extern unsigned int __VERIFIER_nondet_uint(void) ; extern unsigned long __VERIFIER_nondet_ulong(void) ; extern void *__VERIFIER_nondet_pointer(void) ; extern void __VERIFIER_assume(int expression ) ; void *ldv_malloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = malloc(size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_zalloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } int ldv_undef_int(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); return (tmp); } } void *ldv_undef_ptr(void) { void *tmp ; { tmp = __VERIFIER_nondet_pointer(); return (tmp); } } unsigned long ldv_undef_ulong(void) { unsigned long tmp ; { tmp = __VERIFIER_nondet_ulong(); return (tmp); } } __inline static void ldv_error(void) { { ERROR: ; __VERIFIER_error(); } } __inline static void ldv_stop(void) { { LDV_STOP: ; goto LDV_STOP; } } long ldv__builtin_expect(long exp , long c ) { { return (exp); } } void ldv__builtin_trap(void) { { ldv_error(); return; } } struct file *sysfs_fw_dump_attr_group0 ; int ldv_state_variable_47 ; int ldv_state_variable_20 ; struct scsi_qla_host *qla2300_isp_ops_group0 ; int ldv_timer_state_3 = 0; int ldv_timer_state_12 = 0; struct qla_hw_data *qla24xx_isp_ops_group2 ; struct device *dev_attr_beacon_group0 ; struct scsi_qla_host *qla81xx_isp_ops_group0 ; int ldv_state_variable_54 ; int ldv_state_variable_14 ; int ldv_state_variable_37 ; int ldv_state_variable_17 ; int ldv_state_variable_51 ; struct timer_list *ldv_timer_list_11 ; struct scsi_qla_host *qlafx00_isp_ops_group0 ; int ldv_timer_state_7 = 0; int ldv_state_variable_66 ; int ldv_state_variable_19 ; int ldv_state_variable_27 ; int ldv_state_variable_9 ; int ldv_state_variable_42 ; int ldv_state_variable_7 ; struct timer_list *ldv_timer_list_1 ; struct qla_hw_data *qla83xx_isp_ops_group2 ; int ldv_state_variable_55 ; struct fc_rport *qla2xxx_transport_vport_functions_group2 ; struct timer_list *ldv_timer_list_6 ; struct fc_bsg_job *qla2xxx_transport_vport_functions_group1 ; struct Scsi_Host *qla2xxx_transport_vport_functions_group0 ; int ldv_state_variable_64 ; int ldv_state_variable_26 ; int ldv_state_variable_28 ; int LDV_IN_INTERRUPT = 1; struct timer_list *ldv_timer_list_17 ; struct qla_hw_data *qla8044_isp_ops_group2 ; int ldv_state_variable_58 ; struct kobject *sysfs_fw_dump_attr_group2 ; int ldv_state_variable_31 ; int ldv_state_variable_68 ; struct fc_rport *qla2xxx_transport_functions_group2 ; struct timer_list *ldv_timer_list_5 ; int ldv_state_variable_8 ; int ldv_state_variable_46 ; int ldv_state_variable_15 ; int ldv_timer_state_11 = 0; struct fc_port *qla8044_isp_ops_group1 ; int ldv_state_variable_21 ; int ldv_state_variable_33 ; struct qla_hw_data *qla25xx_isp_ops_group2 ; int ldv_state_variable_69 ; struct fc_port *qla81xx_isp_ops_group1 ; int ldv_timer_state_9 = 0; int ldv_state_variable_65 ; int ldv_timer_state_6 = 0; struct fc_port *qla82xx_isp_ops_group1 ; struct pci_dev *qla2xxx_pci_driver_group0 ; struct timer_list *ldv_timer_list_10 ; struct file *sysfs_optrom_attr_group0 ; int ldv_state_variable_70 ; struct fc_port *qlafx00_isp_ops_group1 ; int ldv_state_variable_62 ; int ldv_state_variable_41 ; int ldv_state_variable_40 ; int ldv_timer_state_17 = 0; int ldv_timer_state_10 = 0; struct device_attribute *dev_attr_zio_group1 ; struct device_attribute *dev_attr_zio_timer_group1 ; int ldv_state_variable_10 ; struct file *sysfs_vpd_attr_group0 ; struct inode *dfs_fce_ops_group1 ; int ldv_state_variable_63 ; struct file *sysfs_nvram_attr_group0 ; int ldv_state_variable_2 ; int ldv_state_variable_25 ; struct bin_attribute *sysfs_optrom_attr_group1 ; struct scsi_device *qla2xxx_driver_template_group2 ; struct timer_list *ldv_timer_list_9 ; struct pci_dev *qla2xxx_err_handler_group0 ; int ldv_state_variable_11 ; int ldv_state_variable_18 ; struct device_attribute *dev_attr_beacon_group1 ; struct timer_list *ldv_timer_list_13 ; int ldv_state_variable_32 ; int ldv_timer_state_5 = 0; struct fc_port *qla25xx_isp_ops_group1 ; struct scsi_target *qla2xxx_transport_vport_functions_group3 ; struct timer_list *ldv_timer_list_4 ; struct scsi_qla_host *qla24xx_isp_ops_group0 ; int ldv_state_variable_30 ; int ldv_state_variable_0 ; int ldv_state_variable_45 ; int ldv_state_variable_12 ; struct fc_port *qla2100_isp_ops_group1 ; int ldv_timer_state_16 = 0; int ldv_state_variable_22 ; struct timer_list *ldv_timer_list_16 ; struct qla_hw_data *qla2300_isp_ops_group2 ; int ldv_state_variable_29 ; struct qla_hw_data *qlafx00_isp_ops_group2 ; struct Scsi_Host *qla2xxx_transport_functions_group0 ; int ldv_state_variable_61 ; struct scsi_qla_host *qla82xx_isp_ops_group0 ; struct timer_list *ldv_timer_list_8 ; int ref_cnt ; int ldv_state_variable_23 ; int ldv_state_variable_72 ; int ldv_timer_state_8 = 0; int ldv_state_variable_59 ; struct bin_attribute *sysfs_vpd_attr_group1 ; int ldv_state_variable_6 ; struct timer_list *ldv_timer_list_12 ; int ldv_state_variable_50 ; int ldv_state_variable_44 ; int ldv_state_variable_38 ; int ldv_state_variable_39 ; int ldv_timer_state_2 = 0; int ldv_state_variable_56 ; int ldv_state_variable_3 ; struct bin_attribute *sysfs_nvram_attr_group1 ; int ldv_state_variable_52 ; struct scsi_target *qla2xxx_transport_functions_group3 ; int ldv_state_variable_4 ; struct file *apidev_fops_group2 ; int ldv_state_variable_36 ; int ldv_state_variable_60 ; struct device *dev_attr_zio_timer_group0 ; struct fc_port *qla83xx_isp_ops_group1 ; int ldv_state_variable_48 ; struct timer_list *ldv_timer_list_3 ; int ldv_state_variable_5 ; int ldv_state_variable_13 ; struct fc_port *qla2300_isp_ops_group1 ; struct qla_hw_data *qla81xx_isp_ops_group2 ; struct scsi_qla_host *qla2100_isp_ops_group0 ; struct scsi_cmnd *qla2xxx_driver_template_group0 ; struct fc_port *qla24xx_isp_ops_group1 ; struct qla_hw_data *qla82xx_isp_ops_group2 ; struct scsi_qla_host *qla25xx_isp_ops_group0 ; int ldv_timer_state_14 = 0; struct kobject *sysfs_optrom_attr_group2 ; struct timer_list *ldv_timer_list_15 ; int ldv_state_variable_49 ; struct device *dev_attr_zio_group0 ; int ldv_state_variable_24 ; int ldv_timer_state_4 = 0; struct file *dfs_fce_ops_group2 ; int ldv_state_variable_1 ; struct timer_list *ldv_timer_list_7 ; struct Scsi_Host *qla2xxx_driver_template_group1 ; struct timer_list *ldv_timer_list_14 ; struct scsi_qla_host *qla83xx_isp_ops_group0 ; int ldv_state_variable_71 ; struct fc_bsg_job *qla2xxx_transport_functions_group1 ; struct qla_hw_data *qla2100_isp_ops_group2 ; struct bin_attribute *sysfs_fw_dump_attr_group1 ; int ldv_state_variable_16 ; int ldv_state_variable_43 ; struct kobject *sysfs_vpd_attr_group2 ; int ldv_state_variable_57 ; struct kobject *sysfs_nvram_attr_group2 ; struct fc_vport *qla2xxx_transport_functions_group4 ; int ldv_state_variable_53 ; int ldv_state_variable_67 ; int ldv_timer_state_15 = 0; int ldv_timer_state_13 = 0; struct timer_list *ldv_timer_list_2 ; int ldv_timer_state_1 = 0; int ldv_state_variable_34 ; struct scsi_qla_host *qla8044_isp_ops_group0 ; void *apidev_fops_group1 ; int ldv_state_variable_35 ; int reg_timer_7(struct timer_list *timer ) ; void activate_pending_timer_2(struct timer_list *timer , unsigned long data , int pending_flag ) ; void ldv_initialize_bin_attribute_55(void) ; void ldv_initialize_bin_attribute_58(void) ; void choose_timer_1(struct timer_list *timer ) ; void choose_timer_2(struct timer_list *timer ) ; int reg_timer_2(struct timer_list *timer ) ; void ldv_initialize_isp_operations_65(void) ; void disable_suitable_timer_7(struct timer_list *timer ) ; void ldv_initialize_isp_operations_68(void) ; void ldv_initialize_isp_operations_70(void) ; void ldv_initialize_pci_error_handlers_62(void) ; void ldv_initialize_scsi_host_template_72(void) ; void ldv_initialize_isp_operations_66(void) ; void ldv_initialize_isp_operations_63(void) ; void ldv_initialize_bin_attribute_59(void) ; void activate_pending_timer_7(struct timer_list *timer , unsigned long data , int pending_flag ) ; void ldv_initialize_isp_operations_64(void) ; void ldv_initialize_fc_function_template_19(void) ; void ldv_initialize_isp_operations_71(void) ; void ldv_file_operations_60(void) ; void ldv_initialize_device_attribute_41(void) ; void ldv_initialize_fc_function_template_20(void) ; int reg_timer_1(struct timer_list *timer ) ; void disable_suitable_timer_2(struct timer_list *timer ) ; void ldv_initialize_isp_operations_67(void) ; void activate_pending_timer_1(struct timer_list *timer , unsigned long data , int pending_flag ) ; void ldv_file_operations_18(void) ; void disable_suitable_timer_1(struct timer_list *timer ) ; void ldv_initialize_pci_driver_61(void) ; void ldv_initialize_device_attribute_40(void) ; void ldv_initialize_device_attribute_39(void) ; void ldv_initialize_bin_attribute_57(void) ; void ldv_initialize_isp_operations_69(void) ; __inline static char const *dev_name(struct device const *dev ) { char const *tmp ; { if ((unsigned long )dev->init_name != (unsigned long )((char const */* const */)0)) { return ((char const *)dev->init_name); } else { } tmp = kobject_name(& dev->kobj); return (tmp); } } extern void *dev_get_drvdata(struct device const * ) ; extern int dev_set_drvdata(struct device * , void * ) ; __inline static int pci_channel_offline(struct pci_dev *pdev ) { { return (pdev->error_state != 1U); } } extern void pci_dev_put(struct pci_dev * ) ; extern struct pci_dev *pci_get_domain_bus_and_slot(int , unsigned int , unsigned int ) ; extern int pci_bus_read_config_byte(struct pci_bus * , unsigned int , int , u8 * ) ; extern int pci_bus_read_config_word(struct pci_bus * , unsigned int , int , u16 * ) ; __inline static int pci_read_config_byte(struct pci_dev const *dev , int where , u8 *val ) { int tmp ; { tmp = pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); return (tmp); } } __inline static int pci_read_config_word(struct pci_dev const *dev , int where , u16 *val ) { int tmp ; { tmp = pci_bus_read_config_word(dev->bus, dev->devfn, where, val); return (tmp); } } extern int pci_enable_device(struct pci_dev * ) ; extern int pci_enable_device_mem(struct pci_dev * ) ; extern void pci_disable_device(struct pci_dev * ) ; extern int pci_select_bars(struct pci_dev * , unsigned long ) ; extern int pci_save_state(struct pci_dev * ) ; extern void pci_restore_state(struct pci_dev * ) ; extern int pci_request_selected_regions(struct pci_dev * , int , char const * ) ; extern void pci_release_selected_regions(struct pci_dev * , int ) ; extern int __pci_register_driver(struct pci_driver * , struct module * , char const * ) ; extern void pci_unregister_driver(struct pci_driver * ) ; extern struct dma_pool *dma_pool_create(char const * , struct device * , size_t , size_t , size_t ) ; extern void dma_pool_destroy(struct dma_pool * ) ; extern void *dma_pool_alloc(struct dma_pool * , gfp_t , dma_addr_t * ) ; extern void dma_pool_free(struct dma_pool * , void * , dma_addr_t ) ; __inline static int pci_domain_nr(struct pci_bus *bus ) { struct pci_sysdata *sd ; { sd = (struct pci_sysdata *)bus->sysdata; return (sd->domain); } } __inline static int valid_dma_direction(int dma_direction ) { { return ((dma_direction == 0 || dma_direction == 1) || dma_direction == 2); } } __inline static int is_device_dma_capable(struct device *dev ) { { return ((unsigned long )dev->dma_mask != (unsigned long )((u64 *)0ULL) && *(dev->dma_mask) != 0ULL); } } extern void debug_dma_unmap_sg(struct device * , struct scatterlist * , int , int ) ; extern void debug_dma_alloc_coherent(struct device * , size_t , dma_addr_t , void * ) ; extern void debug_dma_free_coherent(struct device * , size_t , void * , dma_addr_t ) ; extern struct device x86_dma_fallback_dev ; extern struct dma_map_ops *dma_ops ; __inline static struct dma_map_ops *get_dma_ops(struct device *dev ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )dev == (unsigned long )((struct device *)0), 0L); if (tmp != 0L || (unsigned long )dev->archdata.dma_ops == (unsigned long )((struct dma_map_ops *)0)) { return (dma_ops); } else { return (dev->archdata.dma_ops); } } } __inline static void dma_unmap_sg_attrs(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (65), "i" (12UL)); ldv_21314: ; goto ldv_21314; } else { } debug_dma_unmap_sg(dev, sg, nents, (int )dir); if ((unsigned long )ops->unmap_sg != (unsigned long )((void (*)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_sg))(dev, sg, nents, dir, attrs); } else { } return; } } extern int dma_supported(struct device * , u64 ) ; extern int dma_set_mask(struct device * , u64 ) ; __inline static unsigned long dma_alloc_coherent_mask(struct device *dev , gfp_t gfp ) { unsigned long dma_mask ; { dma_mask = 0UL; dma_mask = (unsigned long )dev->coherent_dma_mask; if (dma_mask == 0UL) { dma_mask = (int )gfp & 1 ? 16777215UL : 4294967295UL; } else { } return (dma_mask); } } __inline static gfp_t dma_alloc_coherent_gfp_flags(struct device *dev , gfp_t gfp ) { unsigned long dma_mask ; unsigned long tmp ; { tmp = dma_alloc_coherent_mask(dev, gfp); dma_mask = tmp; if ((unsigned long long )dma_mask <= 16777215ULL) { gfp = gfp | 1U; } else { } if ((unsigned long long )dma_mask <= 4294967295ULL && (gfp & 1U) == 0U) { gfp = gfp | 4U; } else { } return (gfp); } } __inline static void *dma_alloc_attrs(struct device *dev , size_t size , dma_addr_t *dma_handle , gfp_t gfp , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; void *memory ; int tmp___0 ; gfp_t tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; gfp = gfp & 4294967288U; if ((unsigned long )dev == (unsigned long )((struct device *)0)) { dev = & x86_dma_fallback_dev; } else { } tmp___0 = is_device_dma_capable(dev); if (tmp___0 == 0) { return ((void *)0); } else { } if ((unsigned long )ops->alloc == (unsigned long )((void *(*)(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ))0)) { return ((void *)0); } else { } tmp___1 = dma_alloc_coherent_gfp_flags(dev, gfp); memory = (*(ops->alloc))(dev, size, dma_handle, tmp___1, attrs); debug_dma_alloc_coherent(dev, size, *dma_handle, memory); return (memory); } } __inline static void dma_free_attrs(struct device *dev , size_t size , void *vaddr , dma_addr_t bus , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int __ret_warn_on ; unsigned long _flags ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; _flags = arch_local_save_flags(); tmp___0 = arch_irqs_disabled_flags(_flags); __ret_warn_on = tmp___0 != 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/inst/current/envs/linux-3.12-rc1.tar.xz/linux-3.12-rc1/arch/x86/include/asm/dma-mapping.h", 166); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); debug_dma_free_coherent(dev, size, vaddr, bus); if ((unsigned long )ops->free != (unsigned long )((void (*)(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ))0)) { (*(ops->free))(dev, size, vaddr, bus, attrs); } else { } return; } } __inline static int dma_set_coherent_mask(struct device *dev , u64 mask ) { int tmp ; { tmp = dma_supported(dev, mask); if (tmp == 0) { return (-5); } else { } dev->coherent_dma_mask = mask; return (0); } } extern u64 dma_get_required_mask(struct device * ) ; __inline static int pci_set_consistent_dma_mask(struct pci_dev *dev , u64 mask ) { int tmp ; { tmp = dma_set_coherent_mask(& dev->dev, mask); return (tmp); } } __inline static void *pci_get_drvdata(struct pci_dev *pdev ) { void *tmp ; { tmp = dev_get_drvdata((struct device const *)(& pdev->dev)); return (tmp); } } __inline static void pci_set_drvdata(struct pci_dev *pdev , void *data ) { { dev_set_drvdata(& pdev->dev, data); return; } } __inline static char const *pci_name(struct pci_dev const *pdev ) { char const *tmp ; { tmp = dev_name(& pdev->dev); return (tmp); } } __inline static int pci_pcie_cap(struct pci_dev *dev ) { { return ((int )dev->pcie_cap); } } extern void schedule(void) ; extern void set_user_nice(struct task_struct * , long ) ; extern int wake_up_process(struct task_struct * ) ; extern mempool_t *mempool_create(int , mempool_alloc_t * , mempool_free_t * , void * ) ; extern void mempool_destroy(mempool_t * ) ; extern void *mempool_alloc(mempool_t * , gfp_t ) ; extern void mempool_free(void * , mempool_t * ) ; extern void *mempool_alloc_slab(gfp_t , void * ) ; extern void mempool_free_slab(void * , void * ) ; __inline static mempool_t *mempool_create_slab_pool(int min_nr , struct kmem_cache *kc ) { mempool_t *tmp ; { tmp = mempool_create(min_nr, & mempool_alloc_slab, & mempool_free_slab, (void *)kc); return (tmp); } } extern int request_firmware(struct firmware const ** , char const * , struct device * ) ; extern void release_firmware(struct firmware const * ) ; extern int pci_enable_pcie_error_reporting(struct pci_dev * ) ; extern int pci_disable_pcie_error_reporting(struct pci_dev * ) ; extern int pci_cleanup_aer_uncorrect_error_status(struct pci_dev * ) ; __inline static void *shost_priv(struct Scsi_Host *shost ) { { return ((void *)(& shost->hostdata)); } } extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template * , int ) ; struct Scsi_Host *ldv_scsi_host_alloc_6(struct scsi_host_template *sht , int privsize ) ; extern int scsi_add_host_with_dma(struct Scsi_Host * , struct device * , struct device * ) ; int ldv_scsi_add_host_with_dma_2(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; extern void scsi_scan_host(struct Scsi_Host * ) ; extern void scsi_remove_host(struct Scsi_Host * ) ; void ldv_scsi_remove_host_5(struct Scsi_Host *shost ) ; extern struct Scsi_Host *scsi_host_get(struct Scsi_Host * ) ; extern void scsi_host_put(struct Scsi_Host * ) ; __inline static int scsi_add_host(struct Scsi_Host *host , struct device *dev ) { int tmp ; { tmp = ldv_scsi_add_host_with_dma_2(host, dev, dev); return (tmp); } } __inline static void scsi_host_set_prot(struct Scsi_Host *shost , unsigned int mask ) { { shost->prot_capabilities = mask; return; } } __inline static void scsi_host_set_guard(struct Scsi_Host *shost , unsigned char type ) { { shost->prot_guard_type = type; return; } } extern int __register_chrdev(unsigned int , unsigned int , unsigned int , char const * , struct file_operations const * ) ; extern void __unregister_chrdev(unsigned int , unsigned int , unsigned int , char const * ) ; __inline static int register_chrdev(unsigned int major , char const *name , struct file_operations const *fops ) { int tmp ; { tmp = __register_chrdev(major, 0U, 256U, name, fops); return (tmp); } } __inline static int ldv_register_chrdev_7(unsigned int major , char const *name , struct file_operations const *fops ) ; __inline static void unregister_chrdev(unsigned int major , char const *name ) { { __unregister_chrdev(major, 0U, 256U, name); return; } } __inline static void ldv_unregister_chrdev_8(unsigned int major , char const *name ) ; extern loff_t noop_llseek(struct file * , loff_t , int ) ; extern void blk_queue_update_dma_alignment(struct request_queue * , int ) ; extern int blk_queue_init_tags(struct request_queue * , int , struct blk_queue_tag * ) ; extern void blk_queue_free_tags(struct request_queue * ) ; __inline static struct scsi_target *scsi_target(struct scsi_device *sdev ) { struct device const *__mptr ; { __mptr = (struct device const *)sdev->sdev_gendev.parent; return ((struct scsi_target *)__mptr + 0xffffffffffffffd8UL); } } extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host * , struct scsi_device * ) ; extern void scsi_adjust_queue_depth(struct scsi_device * , int , int ) ; extern int scsi_track_queue_full(struct scsi_device * , int ) ; extern void scsi_dma_unmap(struct scsi_cmnd * ) ; __inline static unsigned char scsi_get_prot_op(struct scsi_cmnd *scmd ) { { return (scmd->prot_op); } } __inline static unsigned int scsi_prot_sg_count(struct scsi_cmnd *cmd ) { { return ((unsigned long )cmd->prot_sdb != (unsigned long )((struct scsi_data_buffer *)0) ? (cmd->prot_sdb)->table.nents : 0U); } } __inline static struct scatterlist *scsi_prot_sglist(struct scsi_cmnd *cmd ) { { return ((unsigned long )cmd->prot_sdb != (unsigned long )((struct scsi_data_buffer *)0) ? (cmd->prot_sdb)->table.sgl : (struct scatterlist *)0); } } __inline static int fc_remote_port_chkready(struct fc_rport *rport ) { int result ; { switch ((unsigned int )rport->port_state) { case 2U: ; if ((int )rport->roles & 1) { result = 0; } else if ((int )rport->flags & 1) { result = 786432; } else { result = 65536; } goto ldv_38675; case 4U: ; if (((int )rport->flags & 4) != 0) { result = 983040; } else { result = 786432; } goto ldv_38675; default: result = 65536; goto ldv_38675; } ldv_38675: ; return (result); } } extern struct scsi_transport_template *fc_attach_transport(struct fc_function_template * ) ; extern void fc_release_transport(struct scsi_transport_template * ) ; extern void fc_remove_host(struct Scsi_Host * ) ; extern void fc_remote_port_delete(struct fc_rport * ) ; extern int scsi_is_fc_rport(struct device const * ) ; extern u32 fc_get_event_number(void) ; extern void fc_host_post_event(struct Scsi_Host * , u32 , enum fc_host_event_code , u32 ) ; extern int fc_vport_terminate(struct fc_vport * ) ; extern int fc_block_scsi_eh(struct scsi_cmnd * ) ; static char const * const port_state_str[5U] = { "Unknown", "UNCONFIGURED", "DEAD", "LOST", "ONLINE"}; int qla2x00_initialize_adapter(scsi_qla_host_t *vha ) ; int qla2100_pci_config(struct scsi_qla_host *vha ) ; int qla2300_pci_config(struct scsi_qla_host *vha ) ; int qla24xx_pci_config(scsi_qla_host_t *vha ) ; int qla25xx_pci_config(scsi_qla_host_t *vha ) ; void qla2x00_reset_chip(struct scsi_qla_host *vha ) ; void qla24xx_reset_chip(struct scsi_qla_host *vha ) ; int qla2x00_chip_diag(struct scsi_qla_host *vha ) ; int qla24xx_chip_diag(struct scsi_qla_host *vha ) ; void qla2x00_config_rings(struct scsi_qla_host *vha ) ; void qla24xx_config_rings(struct scsi_qla_host *vha ) ; void qla2x00_reset_adapter(struct scsi_qla_host *vha ) ; void qla24xx_reset_adapter(struct scsi_qla_host *vha ) ; int qla2x00_nvram_config(struct scsi_qla_host *vha ) ; int qla24xx_nvram_config(struct scsi_qla_host *vha ) ; int qla81xx_nvram_config(struct scsi_qla_host *vha ) ; void qla2x00_update_fw_options(struct scsi_qla_host *vha ) ; void qla24xx_update_fw_options(scsi_qla_host_t *vha ) ; void qla81xx_update_fw_options(scsi_qla_host_t *vha ) ; int qla2x00_load_risc(struct scsi_qla_host *vha , uint32_t *srisc_addr ) ; int qla24xx_load_risc(scsi_qla_host_t *vha , uint32_t *srisc_addr ) ; int qla81xx_load_risc(scsi_qla_host_t *vha , uint32_t *srisc_addr ) ; int qla2x00_perform_loop_resync(scsi_qla_host_t *ha ) ; int qla2x00_loop_resync(scsi_qla_host_t *vha ) ; int qla2x00_find_new_loop_id(scsi_qla_host_t *vha , fc_port_t *dev ) ; int qla2x00_fabric_login(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t *next_loopid ) ; int qla2x00_local_device_login(scsi_qla_host_t *vha , fc_port_t *fcport ) ; void qla2x00_update_fcports(scsi_qla_host_t *base_vha ) ; int qla2x00_abort_isp(scsi_qla_host_t *vha ) ; void qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha ) ; void qla2x00_quiesce_io(scsi_qla_host_t *vha ) ; void qla2x00_update_fcport(scsi_qla_host_t *vha , fc_port_t *fcport ) ; void qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha ) ; void qla84xx_put_chip(struct scsi_qla_host *vha ) ; int qla2x00_async_login(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int qla2x00_async_logout(struct scsi_qla_host *vha , fc_port_t *fcport ) ; int qla2x00_async_adisc(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; void qla2x00_async_login_done(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; void qla2x00_async_logout_done(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; void qla2x00_async_adisc_done(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int __qla83xx_set_idc_control(scsi_qla_host_t *vha , uint32_t idc_control ) ; int __qla83xx_get_idc_control(scsi_qla_host_t *vha , uint32_t *idc_control ) ; void qla83xx_idc_audit(scsi_qla_host_t *vha , int audit_type ) ; int qla83xx_nic_core_reset(scsi_qla_host_t *vha ) ; void qla83xx_reset_ownership(scsi_qla_host_t *vha ) ; int qla2xxx_mctp_dump(scsi_qla_host_t *vha ) ; char qla2x00_version_str[40U] ; int ql2xlogintimeout ; int qlport_down_retry ; int ql2xplogiabsentdevice ; int ql2xloginretrycount ; int ql2xfdmienable ; int ql2xmaxqdepth ; int ql2xallocfwdump ; int ql2xextended_error_logging ; int ql2xiidmaenable ; int ql2xmaxqueues ; int ql2xmultique_tag ; int ql2xfwloadbin ; int ql2xetsenable ; int ql2xshiftctondsd ; int ql2xdbwr ; int ql2xasynctmfenable ; int ql2xgffidenable ; int ql2xenabledif ; int ql2xenablehba_err_chk ; int ql2xtargetreset ; int ql2xdontresethba ; unsigned int ql2xmaxlun ; int ql2xmdcapmask ; int ql2xmdenable ; int qla2x00_loop_reset(scsi_qla_host_t *vha ) ; void qla2x00_abort_all_cmds(scsi_qla_host_t *vha , int res ) ; int qla2x00_post_aen_work(struct scsi_qla_host *vha , enum fc_host_event_code code , u32 data ) ; int qla2x00_post_idc_ack_work(struct scsi_qla_host *vha , uint16_t *mb ) ; int qla2x00_post_async_login_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int qla2x00_post_async_login_done_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int qla2x00_post_async_logout_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int qla2x00_post_async_logout_done_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int qla2x00_post_async_adisc_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) ; struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht , struct qla_hw_data *ha ) ; void qla2x00_relogin(struct scsi_qla_host *vha ) ; void qla2x00_do_work(struct scsi_qla_host *vha ) ; void qla2x00_free_fcports(struct scsi_qla_host *vha ) ; void qla83xx_schedule_work(scsi_qla_host_t *base_vha , int work_code ) ; void qla83xx_service_idc_aen(struct work_struct *work ) ; void qla83xx_nic_core_unrecoverable_work(struct work_struct *work ) ; void qla83xx_idc_state_handler_work(struct work_struct *work ) ; void qla83xx_nic_core_reset_work(struct work_struct *work ) ; void qla83xx_idc_lock(scsi_qla_host_t *base_vha , uint16_t requester_id ) ; void qla83xx_idc_unlock(scsi_qla_host_t *base_vha , uint16_t requester_id ) ; int qla83xx_idc_state_handler(scsi_qla_host_t *base_vha ) ; int qla83xx_set_drv_presence(scsi_qla_host_t *vha ) ; int __qla83xx_set_drv_presence(scsi_qla_host_t *vha ) ; int qla83xx_clear_drv_presence(scsi_qla_host_t *vha ) ; int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha ) ; int qla2x00_post_uevent_work(struct scsi_qla_host *vha , u32 code ) ; struct scsi_host_template qla2xxx_driver_template ; struct scsi_transport_template *qla2xxx_transport_vport_template ; void qla2x00_timer(scsi_qla_host_t *vha ) ; __inline void qla2x00_start_timer(scsi_qla_host_t *vha , void *func , unsigned long interval ) ; int qla2x00_send_change_request(scsi_qla_host_t *vha , uint16_t format , uint16_t vp_idx ) ; void qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha ) ; void qla2x00_sp_free_dma(void *vha , void *ptr ) ; void qla2x00_mark_device_lost(scsi_qla_host_t *vha , fc_port_t *fcport , int do_login , int defer ) ; void qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha , int defer ) ; struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *vha ) ; int qla2x00_wait_for_hba_online(scsi_qla_host_t *vha ) ; int qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha ) ; void qla2xxx_wake_dpc(struct scsi_qla_host *vha ) ; int qla2x00_vp_abort_isp(scsi_qla_host_t *vha ) ; uint16_t qla2x00_calc_iocbs_32(uint16_t dsds ) ; uint16_t qla2x00_calc_iocbs_64(uint16_t dsds ) ; void qla2x00_build_scsi_iocbs_32(srb_t *sp , cmd_entry_t *cmd_pkt , uint16_t tot_dsds ) ; void qla2x00_build_scsi_iocbs_64(srb_t *sp , cmd_entry_t *cmd_pkt , uint16_t tot_dsds ) ; int qla2x00_start_scsi(srb_t *sp ) ; int qla24xx_start_scsi(srb_t *sp ) ; int qla24xx_dif_start_scsi(srb_t *sp ) ; int qla2x00_abort_command(srb_t *sp ) ; int qla2x00_abort_target(struct fc_port *fcport , unsigned int l , int tag ) ; int qla2x00_lun_reset(struct fc_port *fcport , unsigned int l , int tag ) ; int qla2x00_get_port_database(scsi_qla_host_t *vha , fc_port_t *fcport , uint8_t opt ) ; int qla2x00_lip_reset(scsi_qla_host_t *vha ) ; int qla2x00_login_fabric(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa , uint16_t *mb , uint8_t opt ) ; int qla24xx_login_fabric(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa , uint16_t *mb , uint8_t opt ) ; int qla2x00_fabric_logout(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa ) ; int qla24xx_fabric_logout(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa ) ; int qla2x00_full_login_lip(scsi_qla_host_t *vha ) ; int qla24xx_abort_command(srb_t *sp ) ; int qla24xx_abort_target(struct fc_port *fcport , unsigned int l , int tag ) ; int qla24xx_lun_reset(struct fc_port *fcport , unsigned int l , int tag ) ; int qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha , unsigned int t , unsigned int l , enum nexus_wait_type type ) ; int qla2x00_disable_eft_trace(scsi_qla_host_t *vha ) ; int qla2x00_disable_fce_trace(scsi_qla_host_t *vha , uint64_t *wr , uint64_t *rd ) ; int qla81xx_idc_ack(scsi_qla_host_t *vha , uint16_t *mb ) ; irqreturn_t qla2100_intr_handler(int irq , void *dev_id ) ; irqreturn_t qla2300_intr_handler(int irq , void *dev_id ) ; irqreturn_t qla24xx_intr_handler(int irq , void *dev_id ) ; int qla2x00_request_irqs(struct qla_hw_data *ha , struct rsp_que *rsp ) ; void qla2x00_free_irqs(scsi_qla_host_t *vha ) ; uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) ; uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) ; int qla2x00_write_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) ; int qla24xx_write_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) ; uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) ; int qla25xx_write_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) ; int qla2x00_beacon_on(struct scsi_qla_host *vha ) ; int qla2x00_beacon_off(struct scsi_qla_host *vha ) ; void qla2x00_beacon_blink(struct scsi_qla_host *vha ) ; int qla24xx_beacon_on(struct scsi_qla_host *vha ) ; int qla24xx_beacon_off(struct scsi_qla_host *vha ) ; void qla24xx_beacon_blink(struct scsi_qla_host *vha ) ; void qla83xx_beacon_blink(struct scsi_qla_host *vha ) ; int qla82xx_beacon_on(struct scsi_qla_host *vha ) ; int qla82xx_beacon_off(struct scsi_qla_host *vha ) ; int qla83xx_wr_reg(scsi_qla_host_t *vha , uint32_t reg , uint32_t data ) ; int qla83xx_rd_reg(scsi_qla_host_t *vha , uint32_t reg , uint32_t *data ) ; int qla83xx_restart_nic_firmware(scsi_qla_host_t *vha ) ; int qla83xx_access_control(scsi_qla_host_t *vha , uint16_t options , uint32_t start_addr , uint32_t end_addr , uint16_t *sector_size ) ; uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; int qla2x00_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; int qla24xx_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; void qla8044_watchdog(struct scsi_qla_host *vha ) ; int qla2x00_get_flash_version(scsi_qla_host_t *vha , void *mbuf ) ; int qla24xx_get_flash_version(scsi_qla_host_t *vha , void *mbuf ) ; int qla82xx_get_flash_version(scsi_qla_host_t *vha , void *mbuf ) ; void qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha ) ; void qla2100_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void qla2300_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void qla24xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void qla25xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void qla81xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void *qla2x00_prep_ms_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) ; void *qla24xx_prep_ms_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) ; void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) ; void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) ; struct device_attribute *qla2x00_host_attrs[31U] ; struct fc_function_template qla2xxx_transport_functions ; struct fc_function_template qla2xxx_transport_vport_functions ; void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha ) ; void qla2x00_free_sysfs_attr(scsi_qla_host_t *vha ) ; void qla2x00_init_host_attr(scsi_qla_host_t *vha ) ; int qla2x00_dfs_setup(scsi_qla_host_t *vha ) ; int qla2x00_dfs_remove(scsi_qla_host_t *vha ) ; int qla25xx_create_req_que(struct qla_hw_data *ha , uint16_t options , uint8_t vp_idx , uint16_t rid , int rsp_que , uint8_t qos ) ; int qla25xx_create_rsp_que(struct qla_hw_data *ha , uint16_t options , uint8_t vp_idx , uint16_t rid , int req ) ; int qla25xx_delete_queues(struct scsi_qla_host *vha ) ; int qlafx00_pci_config(struct scsi_qla_host *vha ) ; int qlafx00_initialize_adapter(struct scsi_qla_host *vha ) ; void qlafx00_soft_reset(scsi_qla_host_t *vha ) ; int qlafx00_chip_diag(scsi_qla_host_t *vha ) ; void qlafx00_config_rings(struct scsi_qla_host *vha ) ; char *qlafx00_pci_info_str(struct scsi_qla_host *vha , char *str ) ; char *qlafx00_fw_version_str(struct scsi_qla_host *vha , char *str ) ; irqreturn_t qlafx00_intr_handler(int irq , void *dev_id ) ; void qlafx00_enable_intrs(struct qla_hw_data *ha ) ; void qlafx00_disable_intrs(struct qla_hw_data *ha ) ; int qlafx00_abort_command(srb_t *sp ) ; int qlafx00_abort_target(fc_port_t *fcport , unsigned int l , int tag ) ; int qlafx00_lun_reset(fc_port_t *fcport , unsigned int l , int tag ) ; int qlafx00_start_scsi(srb_t *sp ) ; int qlafx00_abort_isp(scsi_qla_host_t *vha ) ; int qlafx00_iospace_config(struct qla_hw_data *ha ) ; int qlafx00_driver_shutdown(scsi_qla_host_t *vha , int tmo ) ; int qlafx00_reset_initialize(scsi_qla_host_t *vha ) ; int qlafx00_fx_disc(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t fx_type ) ; int qlafx00_process_aen(struct scsi_qla_host *vha , struct qla_work_evt *evt ) ; int qlafx00_post_aenfx_work(struct scsi_qla_host *vha , uint32_t evtcode , uint32_t *data , int cnt ) ; void qlafx00_timer_routine(scsi_qla_host_t *vha ) ; int qlafx00_rescan_isp(scsi_qla_host_t *vha ) ; int qlafx00_loop_reset(scsi_qla_host_t *vha ) ; int qla82xx_pci_config(struct scsi_qla_host *vha ) ; int qla82xx_iospace_config(struct qla_hw_data *ha ) ; void qla82xx_reset_chip(struct scsi_qla_host *vha ) ; void qla82xx_config_rings(struct scsi_qla_host *vha ) ; void qla82xx_watchdog(scsi_qla_host_t *vha ) ; int qla82xx_start_firmware(scsi_qla_host_t *vha ) ; int qla82xx_load_risc(scsi_qla_host_t *vha , uint32_t *srisc_addr ) ; uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; int qla82xx_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; int qla82xx_abort_isp(scsi_qla_host_t *vha ) ; int qla82xx_restart_isp(scsi_qla_host_t *vha ) ; int qla82xx_start_scsi(srb_t *sp ) ; irqreturn_t qla82xx_intr_handler(int irq , void *dev_id ) ; void qla82xx_enable_intrs(struct qla_hw_data *ha ) ; void qla82xx_disable_intrs(struct qla_hw_data *ha ) ; void qla82xx_init_flags(struct qla_hw_data *ha ) ; void qla82xx_set_drv_active(scsi_qla_host_t *vha ) ; int qla82xx_wr_32(struct qla_hw_data *ha , ulong off , u32 data ) ; int qla82xx_rd_32(struct qla_hw_data *ha , ulong off ) ; void qla82xx_clear_drv_active(struct qla_hw_data *ha ) ; int qla82xx_idc_lock(struct qla_hw_data *ha ) ; void qla82xx_idc_unlock(struct qla_hw_data *ha ) ; int qla82xx_device_state_handler(scsi_qla_host_t *vha ) ; void qla8xxx_dev_failed_handler(scsi_qla_host_t *vha ) ; void qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha ) ; int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha ) ; void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha ) ; void qla83xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) ; void qla82xx_md_free(scsi_qla_host_t *vha ) ; int qla8044_idc_lock(struct qla_hw_data *ha ) ; void qla8044_idc_unlock(struct qla_hw_data *ha ) ; void qla8044_wr_direct(struct scsi_qla_host *vha , uint32_t const crb_reg , uint32_t const value ) ; int qla8044_device_state_handler(struct scsi_qla_host *vha ) ; void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha ) ; void qla8044_clear_drv_active(struct scsi_qla_host *vha ) ; int qla8044_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; irqreturn_t qla8044_intr_handler(int irq , void *dev_id ) ; int qla8044_abort_isp(scsi_qla_host_t *vha ) ; int ql_errlev ; void ql_dbg(uint32_t level , scsi_qla_host_t *vha , int32_t id , char const *fmt , ...) ; void ql_dbg_pci(uint32_t level , struct pci_dev *pdev , int32_t id , char const *fmt , ...) ; void ql_log(uint32_t level , scsi_qla_host_t *vha , int32_t id , char const *fmt , ...) ; void ql_log_pci(uint32_t level , struct pci_dev *pdev , int32_t id , char const *fmt , ...) ; __inline static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha ) { int i ; { if ((ha->device_type & 134217728U) != 0U) { return; } else { } i = 0; goto ldv_43381; ldv_43380: set_bit((long )i, (unsigned long volatile *)ha->loop_id_map); i = i + 1; ldv_43381: ; if (i <= 128) { goto ldv_43380; } else { } set_bit(254L, (unsigned long volatile *)ha->loop_id_map); set_bit(255L, (unsigned long volatile *)ha->loop_id_map); return; } } __inline static int qla2x00_is_reserved_id(scsi_qla_host_t *vha , uint16_t loop_id ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((ha->device_type & 134217728U) != 0U) { return ((unsigned int )loop_id > 2031U); } else { } return ((((int )ha->max_loop_id < (int )loop_id && (unsigned int )loop_id <= 128U) || (unsigned int )loop_id == 254U) || (unsigned int )loop_id == 255U); } } __inline static void qla2x00_clear_loop_id(fc_port_t *fcport ) { struct qla_hw_data *ha ; int tmp ; { ha = (fcport->vha)->hw; if ((unsigned int )fcport->loop_id == 4096U) { return; } else { tmp = qla2x00_is_reserved_id(fcport->vha, (int )fcport->loop_id); if (tmp != 0) { return; } else { } } clear_bit((long )fcport->loop_id, (unsigned long volatile *)ha->loop_id_map); fcport->loop_id = 4096U; return; } } __inline static void qla2x00_clean_dsd_pool(struct qla_hw_data *ha , srb_t *sp ) { struct dsd_dma *dsd_ptr ; struct dsd_dma *tdsd_ptr ; struct crc_context *ctx ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { ctx = (struct crc_context *)sp->u.scmd.ctx; __mptr = (struct list_head const *)ctx->dsd_list.next; dsd_ptr = (struct dsd_dma *)__mptr; __mptr___0 = (struct list_head const *)dsd_ptr->list.next; tdsd_ptr = (struct dsd_dma *)__mptr___0; goto ldv_43406; ldv_43405: dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); list_del(& dsd_ptr->list); kfree((void const *)dsd_ptr); dsd_ptr = tdsd_ptr; __mptr___1 = (struct list_head const *)tdsd_ptr->list.next; tdsd_ptr = (struct dsd_dma *)__mptr___1; ldv_43406: ; if ((unsigned long )(& dsd_ptr->list) != (unsigned long )(& ctx->dsd_list)) { goto ldv_43405; } else { } INIT_LIST_HEAD(& ctx->dsd_list); return; } } __inline static void qla2x00_set_fcport_state(fc_port_t *fcport , int state ) { int old_state ; { old_state = atomic_read((atomic_t const *)(& fcport->state)); atomic_set(& fcport->state, state); if (old_state != 0 && old_state != state) { ql_dbg(268435456U, fcport->vha, 8317, "FCPort state transitioned from %s to %s - portid=%02x%02x%02x.\n", port_state_str[old_state], port_state_str[state], (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } return; } } __inline static srb_t *qla2x00_get_sp(scsi_qla_host_t *vha , fc_port_t *fcport , gfp_t flag ) { srb_t *sp ; struct qla_hw_data *ha ; uint8_t bail ; long tmp ; void *tmp___0 ; { sp = (srb_t *)0; ha = vha->hw; atomic_inc(& vha->vref_count); __asm__ volatile ("mfence": : : "memory"); if (*((unsigned long *)vha + 19UL) != 0UL) { atomic_dec(& vha->vref_count); bail = 1U; } else { bail = 0U; } tmp = ldv__builtin_expect((unsigned int )bail != 0U, 0L); if (tmp != 0L) { return ((srb_t *)0); } else { } tmp___0 = mempool_alloc(ha->srb_mempool, flag); sp = (srb_t *)tmp___0; if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } memset((void *)sp, 0, 376UL); sp->fcport = fcport; sp->iocbs = 1; done: ; if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { atomic_dec(& vha->vref_count); } else { } return (sp); } } __inline static void qla2x00_rel_sp(scsi_qla_host_t *vha , srb_t *sp ) { { mempool_free((void *)sp, (vha->hw)->srb_mempool); atomic_dec(& vha->vref_count); return; } } __inline static int qla2x00_gid_list_size(struct qla_hw_data *ha ) { { if ((ha->device_type & 131072U) != 0U) { return (128); } else { return ((int )((unsigned int )ha->max_fibre_devices * 8U)); } } } extern void msleep(unsigned int ) ; extern struct task_struct *kthread_create_on_node(int (*)(void * ) , void * , int , char const * , ...) ; extern int kthread_stop(struct task_struct * ) ; extern bool kthread_should_stop(void) ; __inline static int scsi_get_tag_type(struct scsi_device *sdev ) { { if ((unsigned int )*((unsigned char *)sdev + 297UL) == 0U) { return (0); } else { } if ((unsigned int )*((unsigned char *)sdev + 297UL) != 0U) { return (34); } else { } if ((unsigned int )*((unsigned char *)sdev + 297UL) != 0U) { return (32); } else { } return (0); } } __inline static void scsi_set_tag_type(struct scsi_device *sdev , int tag ) { { switch (tag) { case 34: sdev->ordered_tags = 1U; case 32: sdev->simple_tags = 1U; goto ldv_43566; case 0: ; default: sdev->ordered_tags = 0U; sdev->simple_tags = 0U; goto ldv_43566; } ldv_43566: ; return; } } __inline static void scsi_activate_tcq(struct scsi_device *sdev , int depth ) { int tmp ; int tmp___0 ; { if ((unsigned int )*((unsigned char *)sdev + 297UL) == 0U) { return; } else { } tmp = constant_test_bit(1L, (unsigned long const volatile *)(& (sdev->request_queue)->queue_flags)); if (tmp == 0) { blk_queue_init_tags(sdev->request_queue, depth, (sdev->host)->bqt); } else { } tmp___0 = scsi_get_tag_type(sdev); scsi_adjust_queue_depth(sdev, tmp___0, depth); return; } } __inline static void scsi_deactivate_tcq(struct scsi_device *sdev , int depth ) { int tmp ; { tmp = constant_test_bit(1L, (unsigned long const volatile *)(& (sdev->request_queue)->queue_flags)); if (tmp != 0) { blk_queue_free_tags(sdev->request_queue); } else { } scsi_adjust_queue_depth(sdev, 0, depth); return; } } int qlt_add_target(struct qla_hw_data *ha , struct scsi_qla_host *base_vha ) ; int qlt_remove_target(struct qla_hw_data *ha , struct scsi_qla_host *vha ) ; void qlt_fc_port_deleted(struct scsi_qla_host *vha , fc_port_t *fcport ) ; int qlt_init(void) ; void qlt_exit(void) ; __inline static bool qla_ini_mode_enabled(struct scsi_qla_host *ha ) { { return (((int )(ha->host)->active_mode & 1) != 0); } } void qlt_probe_one_stage1(struct scsi_qla_host *base_vha , struct qla_hw_data *ha ) ; int qlt_mem_alloc(struct qla_hw_data *ha ) ; void qlt_mem_free(struct qla_hw_data *ha ) ; void qlt_83xx_iospace_config(struct qla_hw_data *ha ) ; static int apidev_major ; static struct kmem_cache *srb_cachep ; static struct kmem_cache *ctx_cachep ; int ql_errlev = 3; static int ql2xenableclass2 ; int ql2xlogintimeout = 20; int ql2xloginretrycount = 0; int ql2xallocfwdump = 1; int ql2xshiftctondsd = 6; int ql2xfdmienable = 1; int ql2xmaxqdepth = 32; int ql2xenabledif = 2; int ql2xenablehba_err_chk = 2; int ql2xiidmaenable = 1; int ql2xmaxqueues = 1; int ql2xdbwr = 1; int ql2xtargetreset = 1; unsigned int ql2xmaxlun = 65535U; int ql2xmdcapmask = 31; int ql2xmdenable = 1; static int qla2xxx_slave_configure(struct scsi_device *sdev ) ; static int qla2xxx_slave_alloc(struct scsi_device *sdev ) ; static int qla2xxx_scan_finished(struct Scsi_Host *shost , unsigned long time ) ; static void qla2xxx_scan_start(struct Scsi_Host *shost ) ; static void qla2xxx_slave_destroy(struct scsi_device *sdev ) ; static int qla2xxx_queuecommand(struct Scsi_Host *host , struct scsi_cmnd *cmd ) ; static int qla2xxx_eh_abort(struct scsi_cmnd *cmd ) ; static int qla2xxx_eh_device_reset(struct scsi_cmnd *cmd ) ; static int qla2xxx_eh_target_reset(struct scsi_cmnd *cmd ) ; static int qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd ) ; static int qla2xxx_eh_host_reset(struct scsi_cmnd *cmd ) ; static int qla2x00_change_queue_depth(struct scsi_device *sdev , int qdepth , int reason ) ; static int qla2x00_change_queue_type(struct scsi_device *sdev , int tag_type ) ; static void qla2x00_free_device(scsi_qla_host_t *vha ) ; struct scsi_host_template qla2xxx_driver_template = {& __this_module, "qla2xxx", 0, 0, 0, 0, 0, & qla2xxx_queuecommand, 0, & qla2xxx_eh_abort, & qla2xxx_eh_device_reset, & qla2xxx_eh_target_reset, & qla2xxx_eh_bus_reset, & qla2xxx_eh_host_reset, & qla2xxx_slave_alloc, & qla2xxx_slave_configure, & qla2xxx_slave_destroy, 0, 0, & qla2xxx_scan_finished, & qla2xxx_scan_start, & qla2x00_change_queue_depth, & qla2x00_change_queue_type, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 128U, (unsigned short)0, 65535U, 0UL, 3, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, 0U, (struct device_attribute **)(& qla2x00_host_attrs), 0, {0, 0}, 0ULL}; static struct scsi_transport_template *qla2xxx_transport_template = (struct scsi_transport_template *)0; struct scsi_transport_template *qla2xxx_transport_vport_template = (struct scsi_transport_template *)0; __inline void qla2x00_start_timer(scsi_qla_host_t *vha , void *func , unsigned long interval ) { { reg_timer_7(& vha->timer); vha->timer.expires = interval * 250UL + (unsigned long )jiffies; vha->timer.data = (unsigned long )vha; vha->timer.function = (void (*)(unsigned long ))func; add_timer(& vha->timer); vha->timer_active = 1U; return; } } __inline static void qla2x00_restart_timer(scsi_qla_host_t *vha , unsigned long interval ) { { if ((vha->device_flags & 32U) != 0U) { ql_dbg(16777216U, vha, 24589, "Device in a failed state, returning.\n"); return; } else { } ldv_mod_timer_3(& vha->timer, interval * 250UL + (unsigned long )jiffies); return; } } __inline static void qla2x00_stop_timer(scsi_qla_host_t *vha ) { { ldv_del_timer_sync_4(& vha->timer); vha->timer_active = 0U; return; } } static int qla2x00_do_dpc(void *data ) ; static void qla2x00_rst_aen(scsi_qla_host_t *vha ) ; static int qla2x00_mem_alloc(struct qla_hw_data *ha , uint16_t req_len , uint16_t rsp_len , struct req_que **req , struct rsp_que **rsp ) ; static void qla2x00_free_fw_dump(struct qla_hw_data *ha ) ; static void qla2x00_mem_free(struct qla_hw_data *ha ) ; static int qla2x00_alloc_queues(struct qla_hw_data *ha , struct req_que *req , struct rsp_que *rsp ) { scsi_qla_host_t *vha ; void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = kzalloc((unsigned long )ha->max_req_queues * 8UL, 208U); ha->req_q_map = (struct req_que **)tmp___0; if ((unsigned long )ha->req_q_map == (unsigned long )((struct req_que **)0)) { ql_log(0U, vha, 59, "Unable to allocate memory for request queue ptrs.\n"); goto fail_req_map; } else { } tmp___1 = kzalloc((unsigned long )ha->max_rsp_queues * 8UL, 208U); ha->rsp_q_map = (struct rsp_que **)tmp___1; if ((unsigned long )ha->rsp_q_map == (unsigned long )((struct rsp_que **)0)) { ql_log(0U, vha, 60, "Unable to allocate memory for response queue ptrs.\n"); goto fail_rsp_map; } else { } *(ha->rsp_q_map) = rsp; *(ha->req_q_map) = req; set_bit(0L, (unsigned long volatile *)(& ha->rsp_qid_map)); set_bit(0L, (unsigned long volatile *)(& ha->req_qid_map)); return (1); fail_rsp_map: kfree((void const *)ha->req_q_map); ha->req_q_map = (struct req_que **)0; fail_req_map: ; return (-12); } } static void qla2x00_free_req_que(struct qla_hw_data *ha , struct req_que *req ) { { if ((ha->device_type & 131072U) != 0U) { if ((unsigned long )req != (unsigned long )((struct req_que *)0) && (unsigned long )req->ring_fx00 != (unsigned long )((request_t *)0)) { dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )req->length_fx00 + 1) * 64UL, (void *)req->ring_fx00, req->dma_fx00, (struct dma_attrs *)0); } else { } } else if ((unsigned long )req != (unsigned long )((struct req_que *)0) && (unsigned long )req->ring != (unsigned long )((request_t *)0)) { dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )req->length + 1) * 64UL, (void *)req->ring, req->dma, (struct dma_attrs *)0); } else { } if ((unsigned long )req != (unsigned long )((struct req_que *)0)) { kfree((void const *)req->outstanding_cmds); } else { } kfree((void const *)req); req = (struct req_que *)0; return; } } static void qla2x00_free_rsp_que(struct qla_hw_data *ha , struct rsp_que *rsp ) { { if ((ha->device_type & 131072U) != 0U) { if ((unsigned long )rsp != (unsigned long )((struct rsp_que *)0) && (unsigned long )rsp->ring != (unsigned long )((response_t *)0)) { dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )rsp->length_fx00 + 1) * 64UL, (void *)rsp->ring_fx00, rsp->dma_fx00, (struct dma_attrs *)0); } else { } } else if ((unsigned long )rsp != (unsigned long )((struct rsp_que *)0) && (unsigned long )rsp->ring != (unsigned long )((response_t *)0)) { dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )rsp->length + 1) * 64UL, (void *)rsp->ring, rsp->dma, (struct dma_attrs *)0); } else { } kfree((void const *)rsp); rsp = (struct rsp_que *)0; return; } } static void qla2x00_free_queues(struct qla_hw_data *ha ) { struct req_que *req ; struct rsp_que *rsp ; int cnt ; { cnt = 0; goto ldv_61453; ldv_61452: req = *(ha->req_q_map + (unsigned long )cnt); qla2x00_free_req_que(ha, req); cnt = cnt + 1; ldv_61453: ; if ((int )ha->max_req_queues > cnt) { goto ldv_61452; } else { } kfree((void const *)ha->req_q_map); ha->req_q_map = (struct req_que **)0; cnt = 0; goto ldv_61456; ldv_61455: rsp = *(ha->rsp_q_map + (unsigned long )cnt); qla2x00_free_rsp_que(ha, rsp); cnt = cnt + 1; ldv_61456: ; if ((int )ha->max_rsp_queues > cnt) { goto ldv_61455; } else { } kfree((void const *)ha->rsp_q_map); ha->rsp_q_map = (struct rsp_que **)0; return; } } static int qla25xx_setup_mode(struct scsi_qla_host *vha ) { uint16_t options ; int ques ; int req ; int ret ; struct qla_hw_data *ha ; struct lock_class_key __key ; char const *__lock_name ; struct workqueue_struct *tmp ; uint8_t tmp___0 ; { options = 0U; ha = vha->hw; if (((int )ha->fw_attributes & 64) == 0) { ql_log(1U, vha, 216, "Firmware is not multi-queue capable.\n"); goto fail; } else { } if (ql2xmultique_tag != 0) { options = (uint16_t )((unsigned int )options | 128U); req = qla25xx_create_req_que(ha, (int )options, 0, 0, -1, 5); if (req == 0) { ql_log(1U, vha, 224, "Failed to create request queue.\n"); goto fail; } else { } __lock_name = "qla2xxx_wq"; tmp = __alloc_workqueue_key("qla2xxx_wq", 8U, 1, & __key, __lock_name); ha->wq = tmp; vha->req = *(ha->req_q_map + (unsigned long )req); options = (uint16_t )((unsigned int )options | 2U); ques = 1; goto ldv_61472; ldv_61471: ret = qla25xx_create_rsp_que(ha, (int )options, 0, 0, req); if (ret == 0) { ql_log(1U, vha, 232, "Failed to create response queue.\n"); goto fail2; } else { } ques = ques + 1; ldv_61472: ; if ((int )ha->max_rsp_queues > ques) { goto ldv_61471; } else { } ha->flags.cpu_affinity_enabled = 1U; ql_dbg(1048576U, vha, 49159, "CPU affinity mode enalbed, no. of response queues:%d no. of request queues:%d.\n", (int )ha->max_rsp_queues, (int )ha->max_req_queues); ql_dbg(1073741824U, vha, 233, "CPU affinity mode enalbed, no. of response queues:%d no. of request queues:%d.\n", (int )ha->max_rsp_queues, (int )ha->max_req_queues); } else { } return (0); fail2: qla25xx_delete_queues(vha); destroy_workqueue(ha->wq); ha->wq = (struct workqueue_struct *)0; vha->req = *(ha->req_q_map); fail: ha->mqenable = 0U; kfree((void const *)ha->req_q_map); kfree((void const *)ha->rsp_q_map); tmp___0 = 1U; ha->max_rsp_queues = tmp___0; ha->max_req_queues = tmp___0; return (1); } } static char *qla2x00_pci_info_str(struct scsi_qla_host *vha , char *str ) { struct qla_hw_data *ha ; char *pci_bus_modes[4U] ; uint16_t pci_bus ; { ha = vha->hw; pci_bus_modes[0] = (char *)"33"; pci_bus_modes[1] = (char *)"66"; pci_bus_modes[2] = (char *)"100"; pci_bus_modes[3] = (char *)"133"; strcpy(str, "PCI"); pci_bus = (uint16_t )((ha->pci_attr & 1536U) >> 9); if ((unsigned int )pci_bus != 0U) { strcat(str, "-X ("); strcat(str, (char const *)pci_bus_modes[(int )pci_bus]); } else { pci_bus = (uint16_t )((ha->pci_attr & 256U) >> 8); strcat(str, " ("); strcat(str, (char const *)pci_bus_modes[(int )pci_bus]); } strcat(str, " MHz)"); return (str); } } static char *qla24xx_pci_info_str(struct scsi_qla_host *vha , char *str ) { char *pci_bus_modes[4U] ; struct qla_hw_data *ha ; uint32_t pci_bus ; int pcie_reg ; char lwstr[6U] ; uint16_t pcie_lstat ; uint16_t lspeed ; uint16_t lwidth ; { pci_bus_modes[0] = (char *)"33"; pci_bus_modes[1] = (char *)"66"; pci_bus_modes[2] = (char *)"100"; pci_bus_modes[3] = (char *)"133"; ha = vha->hw; pcie_reg = pci_pcie_cap(ha->pdev); if (pcie_reg != 0) { pcie_reg = pcie_reg + 12; pci_read_config_word((struct pci_dev const *)ha->pdev, pcie_reg, & pcie_lstat); lspeed = (unsigned int )pcie_lstat & 15U; lwidth = (uint16_t )(((int )pcie_lstat & 1008) >> 4); strcpy(str, "PCIe ("); switch ((int )lspeed) { case 1: strcat(str, "2.5GT/s "); goto ldv_61494; case 2: strcat(str, "5.0GT/s "); goto ldv_61494; case 3: strcat(str, "8.0GT/s "); goto ldv_61494; default: strcat(str, " "); goto ldv_61494; } ldv_61494: snprintf((char *)(& lwstr), 6UL, "x%d)", (int )lwidth); strcat(str, (char const *)(& lwstr)); return (str); } else { } strcpy(str, "PCI"); pci_bus = (ha->pci_attr & 3840U) >> 8; if (pci_bus == 0U || pci_bus == 8U) { strcat(str, " ("); strcat(str, (char const *)pci_bus_modes[pci_bus >> 3]); } else { strcat(str, "-X "); if ((pci_bus & 4U) != 0U) { strcat(str, "Mode 2"); } else { strcat(str, "Mode 1"); } strcat(str, " ("); strcat(str, (char const *)pci_bus_modes[pci_bus & 4294967291U]); } strcat(str, " MHz)"); return (str); } } static char *qla2x00_fw_version_str(struct scsi_qla_host *vha , char *str ) { char un_str[10U] ; struct qla_hw_data *ha ; { ha = vha->hw; sprintf(str, "%d.%02d.%02d ", (int )ha->fw_major_version, (int )ha->fw_minor_version, (int )ha->fw_subminor_version); if (((int )ha->fw_attributes & 512) != 0) { strcat(str, "FLX"); return (str); } else { } switch ((int )ha->fw_attributes & 255) { case 7: strcat(str, "EF"); goto ldv_61505; case 23: strcat(str, "TP"); goto ldv_61505; case 55: strcat(str, "IP"); goto ldv_61505; case 119: strcat(str, "VI"); goto ldv_61505; default: sprintf((char *)(& un_str), "(%x)", (int )ha->fw_attributes); strcat(str, (char const *)(& un_str)); goto ldv_61505; } ldv_61505: ; if (((int )ha->fw_attributes & 256) != 0) { strcat(str, "X"); } else { } return (str); } } static char *qla24xx_fw_version_str(struct scsi_qla_host *vha , char *str ) { struct qla_hw_data *ha ; { ha = vha->hw; sprintf(str, "%d.%02d.%02d (%x)", (int )ha->fw_major_version, (int )ha->fw_minor_version, (int )ha->fw_subminor_version, (int )ha->fw_attributes); return (str); } } void qla2x00_sp_free_dma(void *vha , void *ptr ) { srb_t *sp ; struct scsi_cmnd *cmd ; struct qla_hw_data *ha ; void *ctx ; unsigned int tmp ; struct scatterlist *tmp___0 ; struct ct6_dsd *ctx1 ; { sp = (srb_t *)ptr; cmd = sp->u.scmd.cmd; ha = ((sp->fcport)->vha)->hw; ctx = sp->u.scmd.ctx; if ((int )sp->flags & 1) { scsi_dma_unmap(cmd); sp->flags = (unsigned int )sp->flags & 65534U; } else { } if (((int )sp->flags & 16) != 0) { tmp = scsi_prot_sg_count(cmd); tmp___0 = scsi_prot_sglist(cmd); dma_unmap_sg_attrs(& (ha->pdev)->dev, tmp___0, (int )tmp, cmd->sc_data_direction, (struct dma_attrs *)0); sp->flags = (unsigned int )sp->flags & 65519U; } else { } if (((int )sp->flags & 32) != 0) { qla2x00_clean_dsd_pool(ha, sp); sp->flags = (unsigned int )sp->flags & 65503U; } else { } if (((int )sp->flags & 4) != 0) { dma_pool_free(ha->dl_dma_pool, ctx, ((struct crc_context *)ctx)->crc_ctx_dma); sp->flags = (unsigned int )sp->flags & 65531U; } else { } if (((int )sp->flags & 4096) != 0) { ctx1 = (struct ct6_dsd *)ctx; dma_pool_free(ha->fcp_cmnd_dma_pool, (void *)ctx1->fcp_cmnd, ctx1->fcp_cmnd_dma); list_splice((struct list_head const *)(& ctx1->dsd_list), & ha->gbl_dsd_list); ha->gbl_dsd_inuse = (int )ha->gbl_dsd_inuse - (int )((uint16_t )ctx1->dsd_use_cnt); ha->gbl_dsd_avail = (int )ha->gbl_dsd_avail + (int )((uint16_t )ctx1->dsd_use_cnt); mempool_free((void *)ctx1, ha->ctx_mempool); ctx1 = (struct ct6_dsd *)0; } else { } cmd->SCp.ptr = (char *)0; qla2x00_rel_sp((sp->fcport)->vha, sp); return; } } static void qla2x00_sp_compl(void *data , void *ptr , int res ) { struct qla_hw_data *ha ; srb_t *sp ; struct scsi_cmnd *cmd ; int tmp ; int tmp___0 ; { ha = (struct qla_hw_data *)data; sp = (srb_t *)ptr; cmd = sp->u.scmd.cmd; cmd->result = res; tmp = atomic_read((atomic_t const *)(& sp->ref_count)); if (tmp == 0) { ql_dbg(134217728U, (sp->fcport)->vha, 12309, "SP reference-count to ZERO -- sp=%p cmd=%p.\n", sp, sp->u.scmd.cmd); if ((ql2xextended_error_logging & 134217728) != 0) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_os.o.c.prepared"), "i" (960), "i" (12UL)); ldv_61532: ; goto ldv_61532; } else { } return; } else { } tmp___0 = atomic_dec_and_test(& sp->ref_count); if (tmp___0 == 0) { return; } else { } qla2x00_sp_free_dma((void *)ha, (void *)sp); (*(cmd->scsi_done))(cmd); return; } } static int qla2xxx_queuecommand(struct Scsi_Host *host , struct scsi_cmnd *cmd ) { scsi_qla_host_t *vha ; void *tmp ; fc_port_t *fcport ; struct fc_rport *rport ; struct device const *__mptr ; struct scsi_target *tmp___2 ; struct fc_rport *tmp___3 ; struct scsi_target *tmp___4 ; int tmp___5 ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp___6 ; srb_t *sp ; int rval ; unsigned char tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; { tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; fcport = (fc_port_t *)(cmd->device)->hostdata; tmp___4 = scsi_target(cmd->device); tmp___5 = scsi_is_fc_rport((struct device const *)tmp___4->dev.parent); if (tmp___5 != 0) { tmp___2 = scsi_target(cmd->device); __mptr = (struct device const *)tmp___2->dev.parent; tmp___3 = (struct fc_rport *)__mptr + 0xffffffffffffffa0UL; } else { tmp___3 = (struct fc_rport *)0; } rport = tmp___3; ha = vha->hw; tmp___6 = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp___6; if (*((unsigned long *)ha + 2UL) != 0UL) { if (*((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(2097152U, vha, 36880, "PCI Channel IO permanent failure, exiting cmd=%p.\n", cmd); cmd->result = 65536; } else { ql_dbg(2097152U, vha, 36881, "EEH_Busy, Requeuing the cmd=%p.\n", cmd); cmd->result = 851968; } goto qc24_fail_command; } else { } rval = fc_remote_port_chkready(rport); if (rval != 0) { cmd->result = rval; ql_dbg(134250496U, vha, 12291, "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", cmd, rval); goto qc24_fail_command; } else { } if (*((unsigned long *)vha + 19UL) == 0UL) { tmp___7 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___7 != 0U) { ql_dbg(134217728U, vha, 12292, "DIF Cap not reg, fail DIF capable cmd\'s:%p.\n", cmd); cmd->result = 65536; goto qc24_fail_command; } else { } } else { } if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { cmd->result = 65536; goto qc24_fail_command; } else { } tmp___12 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___12 != 4) { tmp___10 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___10 == 2) { tmp___8 = atomic_read((atomic_t const *)(& base_vha->loop_state)); tmp___9 = atomic_read((atomic_t const *)(& fcport->state)); ql_dbg(134217728U, vha, 12293, "Returning DNC, fcport_state=%d loop_state=%d.\n", tmp___9, tmp___8); cmd->result = 65536; goto qc24_fail_command; } else { tmp___11 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___11 == 6) { tmp___8 = atomic_read((atomic_t const *)(& base_vha->loop_state)); tmp___9 = atomic_read((atomic_t const *)(& fcport->state)); ql_dbg(134217728U, vha, 12293, "Returning DNC, fcport_state=%d loop_state=%d.\n", tmp___9, tmp___8); cmd->result = 65536; goto qc24_fail_command; } else { } } goto qc24_target_busy; } else { } sp = qla2x00_get_sp(vha, fcport, 32U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { set_bit(22L, (unsigned long volatile *)(& vha->dpc_flags)); goto qc24_host_busy; } else { } sp->u.scmd.cmd = cmd; sp->type = 8U; atomic_set(& sp->ref_count, 1); cmd->SCp.ptr = (char *)sp; sp->free = & qla2x00_sp_free_dma; sp->done = & qla2x00_sp_compl; rval = (*((ha->isp_ops)->start_scsi))(sp); if (rval != 0) { ql_dbg(134250496U, vha, 12307, "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); set_bit(22L, (unsigned long volatile *)(& vha->dpc_flags)); goto qc24_host_busy_free_sp; } else { } return (0); qc24_host_busy_free_sp: qla2x00_sp_free_dma((void *)ha, (void *)sp); qc24_host_busy: ; return (4181); qc24_target_busy: ; return (4184); qc24_fail_command: (*(cmd->scsi_done))(cmd); return (0); } } static int qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd ) { unsigned long wait_iter ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int ret ; int tmp___0 ; long tmp___1 ; unsigned long tmp___2 ; { wait_iter = 10UL; tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; ret = 0; tmp___0 = pci_channel_offline(ha->pdev); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 0L); if (tmp___1 != 0L || *((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(4194304U, vha, 32773, "Return:eh_wait.\n"); return (ret); } else { } goto ldv_61558; ldv_61557: msleep(1000U); ldv_61558: ; if ((unsigned long )cmd->SCp.ptr != (unsigned long )((char *)0)) { tmp___2 = wait_iter; wait_iter = wait_iter - 1UL; if (tmp___2 != 0UL) { goto ldv_61557; } else { goto ldv_61559; } } else { } ldv_61559: ; if ((unsigned long )cmd->SCp.ptr != (unsigned long )((char *)0)) { ret = 258; } else { } return (ret); } } int qla2x00_wait_for_hba_online(scsi_qla_host_t *vha ) { int return_status ; unsigned long wait_online ; struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; wait_online = (unsigned long )jiffies + 75000UL; goto ldv_61574; ldv_61573: msleep(1000U); ldv_61574: tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___0 != 0) { goto _L; } else { tmp___1 = constant_test_bit(3L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___1 != 0) { goto _L; } else { tmp___2 = constant_test_bit(10L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___2 != 0) { goto _L; } else if ((unsigned int )ha->dpc_active != 0U) { _L: /* CIL Label */ if ((long )((unsigned long )jiffies - wait_online) < 0L) { goto ldv_61573; } else { goto ldv_61575; } } else { goto ldv_61575; } } } ldv_61575: ; if (*((unsigned long *)base_vha + 19UL) != 0UL) { return_status = 0; } else { return_status = 258; } return (return_status); } } static int qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha ) { int return_status ; unsigned long wait_online ; struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; wait_online = (unsigned long )jiffies + 75000UL; goto ldv_61590; ldv_61589: msleep(1000U); ldv_61590: tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___0 != 0) { goto _L; } else { tmp___1 = constant_test_bit(3L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___1 != 0) { goto _L; } else { tmp___2 = constant_test_bit(10L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___2 != 0) { goto _L; } else if (ha->optrom_state != 0) { goto _L; } else if ((unsigned int )ha->dpc_active != 0U) { _L: /* CIL Label */ if ((long )((unsigned long )jiffies - wait_online) < 0L) { goto ldv_61589; } else { goto ldv_61591; } } else { goto ldv_61591; } } } ldv_61591: ; if (*((unsigned long *)base_vha + 19UL) != 0UL && ha->optrom_state == 0) { return_status = 0; } else { return_status = 258; } ql_dbg(4194304U, vha, 32793, "%s return status=%d.\n", "qla2x00_wait_for_reset_ready", return_status); return (return_status); } } int qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha ) { int return_status ; unsigned long wait_reset ; struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; wait_reset = (unsigned long )jiffies + 75000UL; goto ldv_61608; ldv_61607: msleep(1000U); tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___0 == 0 && *((unsigned long *)ha + 2UL) != 0UL) { goto ldv_61606; } else { } ldv_61608: tmp___1 = constant_test_bit(2L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___1 != 0) { goto _L; } else { tmp___2 = constant_test_bit(3L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___2 != 0) { goto _L; } else { tmp___3 = constant_test_bit(10L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___3 != 0) { goto _L; } else if ((unsigned int )ha->dpc_active != 0U) { _L: /* CIL Label */ if ((long )((unsigned long )jiffies - wait_reset) < 0L) { goto ldv_61607; } else { goto ldv_61606; } } else { goto ldv_61606; } } } ldv_61606: ; if (*((unsigned long *)ha + 2UL) != 0UL) { return_status = 0; } else { return_status = 258; } return (return_status); } } static void sp_get(struct srb *sp ) { { atomic_inc(& sp->ref_count); return; } } static int qla2xxx_eh_abort(struct scsi_cmnd *cmd ) { scsi_qla_host_t *vha ; void *tmp ; srb_t *sp ; int ret ; unsigned int id ; unsigned int lun ; unsigned long flags ; int wait ; struct qla_hw_data *ha ; raw_spinlock_t *tmp___0 ; int tmp___1 ; raw_spinlock_t *tmp___2 ; int tmp___3 ; { tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; wait = 0; ha = vha->hw; if ((unsigned long )cmd->SCp.ptr == (unsigned long )((char *)0)) { return (8194); } else { } ret = fc_block_scsi_eh(cmd); if (ret != 0) { return (ret); } else { } ret = 8194; id = (cmd->device)->id; lun = (cmd->device)->lun; tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); sp = (srb_t *)cmd->SCp.ptr; if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { spin_unlock_irqrestore(& ha->hardware_lock, flags); return (8194); } else { } ql_dbg(4194304U, vha, 32770, "Aborting from RISC nexus=%ld:%d:%d sp=%p cmd=%p\n", vha->host_no, id, lun, sp, cmd); sp_get(sp); spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___1 = (*((ha->isp_ops)->abort_command))(sp); if (tmp___1 != 0) { ret = 8195; ql_dbg(4194304U, vha, 32771, "Abort command mbx failed cmd=%p.\n", cmd); } else { ql_dbg(4194304U, vha, 32772, "Abort command mbx success cmd=%p.\n", cmd); wait = 1; } tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); (*(sp->done))((void *)ha, (void *)sp, 0); spin_unlock_irqrestore(& ha->hardware_lock, flags); if (ret == 8195 && (unsigned long )cmd->SCp.ptr == (unsigned long )((char *)0)) { ret = 8194; } else { } if (wait != 0) { tmp___3 = qla2x00_eh_wait_on_command(cmd); if (tmp___3 != 0) { ql_log(1U, vha, 32774, "Abort handler timed out cmd=%p.\n", cmd); ret = 8195; } else { } } else { } ql_log(2U, vha, 32796, "Abort command issued nexus=%ld:%d:%d -- %d %x.\n", vha->host_no, id, lun, wait, ret); return (ret); } } int qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha , unsigned int t , unsigned int l , enum nexus_wait_type type ) { int cnt ; int match ; int status ; unsigned long flags ; struct qla_hw_data *ha ; struct req_que *req ; srb_t *sp ; struct scsi_cmnd *cmd ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; { ha = vha->hw; status = 0; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); req = vha->req; cnt = 1; goto ldv_61655; ldv_61654: sp = *(req->outstanding_cmds + (unsigned long )cnt); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto ldv_61646; } else { } if ((unsigned int )sp->type != 8U) { goto ldv_61646; } else { } if ((int )vha->vp_idx != (int )((sp->fcport)->vha)->vp_idx) { goto ldv_61646; } else { } match = 0; cmd = sp->u.scmd.cmd; switch ((unsigned int )type) { case 0U: match = 1; goto ldv_61648; case 1U: match = (cmd->device)->id == t; goto ldv_61648; case 2U: match = (cmd->device)->id == t && (cmd->device)->lun == l; goto ldv_61648; } ldv_61648: ; if (match == 0) { goto ldv_61646; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); status = qla2x00_eh_wait_on_command(cmd); tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); ldv_61646: cnt = cnt + 1; ldv_61655: ; if (status == 0 && (int )req->num_outstanding_cmds > cnt) { goto ldv_61654; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (status); } } static char *reset_errors[4U] = { (char *)"HBA not online", (char *)"HBA not ready", (char *)"Task management failed", (char *)"Waiting for command completions"}; static int __qla2xxx_eh_generic_reset(char *name , enum nexus_wait_type type , struct scsi_cmnd *cmd , int (*do_reset)(struct fc_port * , unsigned int , int ) ) { scsi_qla_host_t *vha ; void *tmp ; fc_port_t *fcport ; int err ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; fcport = (fc_port_t *)(cmd->device)->hostdata; if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return (8195); } else { } err = fc_block_scsi_eh(cmd); if (err != 0) { return (err); } else { } ql_log(2U, vha, 32777, "%s RESET ISSUED nexus=%ld:%d:%d cmd=%p.\n", name, vha->host_no, (cmd->device)->id, (cmd->device)->lun, cmd); err = 0; tmp___0 = qla2x00_wait_for_hba_online(vha); if (tmp___0 != 0) { ql_log(1U, vha, 32778, "Wait for hba online failed for cmd=%p.\n", cmd); goto eh_reset_failed; } else { } err = 2; tmp___1 = (*do_reset)(fcport, (cmd->device)->lun, (cmd->request)->cpu + 1); if (tmp___1 != 0) { ql_log(1U, vha, 32780, "do_reset failed for cmd=%p.\n", cmd); goto eh_reset_failed; } else { } err = 3; tmp___2 = qla2x00_eh_wait_for_pending_commands(vha, (cmd->device)->id, (cmd->device)->lun, type); if (tmp___2 != 0) { ql_log(1U, vha, 32781, "wait for pending cmds failed for cmd=%p.\n", cmd); goto eh_reset_failed; } else { } ql_log(2U, vha, 32782, "%s RESET SUCCEEDED nexus:%ld:%d:%d cmd=%p.\n", name, vha->host_no, (cmd->device)->id, (cmd->device)->lun, cmd); return (8194); eh_reset_failed: ql_log(2U, vha, 32783, "%s RESET FAILED: %s nexus=%ld:%d:%d cmd=%p.\n", name, reset_errors[err], vha->host_no, (cmd->device)->id, (cmd->device)->lun, cmd); return (8195); } } static int qla2xxx_eh_device_reset(struct scsi_cmnd *cmd ) { scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = __qla2xxx_eh_generic_reset((char *)"DEVICE", 2, cmd, (ha->isp_ops)->lun_reset); return (tmp___0); } } static int qla2xxx_eh_target_reset(struct scsi_cmnd *cmd ) { scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = __qla2xxx_eh_generic_reset((char *)"TARGET", 1, cmd, (ha->isp_ops)->target_reset); return (tmp___0); } } static int qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd ) { scsi_qla_host_t *vha ; void *tmp ; fc_port_t *fcport ; int ret ; unsigned int id ; unsigned int lun ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; fcport = (fc_port_t *)(cmd->device)->hostdata; ret = 8195; id = (cmd->device)->id; lun = (cmd->device)->lun; if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return (ret); } else { } ret = fc_block_scsi_eh(cmd); if (ret != 0) { return (ret); } else { } ret = 8195; ql_log(2U, vha, 32786, "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); tmp___0 = qla2x00_wait_for_hba_online(vha); if (tmp___0 != 0) { ql_log(0U, vha, 32787, "Wait for hba online failed board disabled.\n"); goto eh_bus_reset_done; } else { } tmp___1 = qla2x00_loop_reset(vha); if (tmp___1 == 0) { ret = 8194; } else { } if (ret == 8195) { goto eh_bus_reset_done; } else { } tmp___2 = qla2x00_eh_wait_for_pending_commands(vha, 0U, 0U, 0); if (tmp___2 != 0) { ql_log(1U, vha, 32788, "Wait for pending commands failed.\n"); ret = 8195; } else { } eh_bus_reset_done: ql_log(1U, vha, 32811, "BUS RESET %s nexus=%ld:%d:%d.\n", ret == 8195 ? (char *)"FAILED" : (char *)"SUCCEEDED", vha->host_no, id, lun); return (ret); } } static int qla2xxx_eh_host_reset(struct scsi_cmnd *cmd ) { scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int ret ; unsigned int id ; unsigned int lun ; scsi_qla_host_t *base_vha ; void *tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { tmp = shost_priv((cmd->device)->host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; ret = 8195; tmp___0 = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp___0; id = (cmd->device)->id; lun = (cmd->device)->lun; ql_log(2U, vha, 32792, "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); tmp___1 = qla2x00_wait_for_reset_ready(vha); if (tmp___1 != 0) { goto eh_host_reset_lock; } else { } if ((unsigned long )vha != (unsigned long )base_vha) { tmp___2 = qla2x00_vp_abort_isp(vha); if (tmp___2 != 0) { goto eh_host_reset_lock; } else { } } else { if (((vha->hw)->device_type & 16384U) != 0U || ((vha->hw)->device_type & 262144U) != 0U) { tmp___3 = qla82xx_fcoe_ctx_reset(vha); if (tmp___3 == 0) { ret = 8194; goto eh_host_reset_lock; } else { } } else { } if ((unsigned long )ha->wq != (unsigned long )((struct workqueue_struct *)0)) { flush_workqueue(ha->wq); } else { } set_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); tmp___5 = (*((ha->isp_ops)->abort_isp))(base_vha); if (tmp___5 != 0) { clear_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); set_bit(2L, (unsigned long volatile *)(& base_vha->dpc_flags)); tmp___4 = qla2x00_wait_for_hba_online(vha); if (tmp___4 != 0) { ql_log(1U, vha, 32810, "wait for hba online failed.\n"); goto eh_host_reset_lock; } else { } } else { } clear_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); } tmp___6 = qla2x00_eh_wait_for_pending_commands(vha, 0U, 0U, 0); if (tmp___6 == 0) { ret = 8194; } else { } eh_host_reset_lock: ql_log(2U, vha, 32791, "ADAPTER RESET %s nexus=%ld:%d:%d.\n", ret == 8195 ? (char *)"FAILED" : (char *)"SUCCEEDED", vha->host_no, id, lun); return (ret); } } int qla2x00_loop_reset(scsi_qla_host_t *vha ) { int ret ; struct fc_port *fcport ; struct qla_hw_data *ha ; int tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { ha = vha->hw; if ((ha->device_type & 131072U) != 0U) { tmp = qlafx00_loop_reset(vha); return (tmp); } else { } if (ql2xtargetreset == 1 && *((unsigned long *)ha + 2UL) != 0UL) { __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (struct fc_port *)__mptr; goto ldv_61712; ldv_61711: ; if ((unsigned int )fcport->port_type != 5U) { goto ldv_61710; } else { } ret = (*((ha->isp_ops)->target_reset))(fcport, 0U, 0); if (ret != 0) { ql_dbg(4194304U, vha, 32812, "Bus Reset failed: Reset=%d d_id=%x.\n", ret, (int )fcport->d_id.b24); } else { } ldv_61710: __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (struct fc_port *)__mptr___0; ldv_61712: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_61711; } else { } } else { } if (*((unsigned long *)ha + 2UL) != 0UL && ((((ha->device_type & 8192U) == 0U && (ha->device_type & 16384U) == 0U) && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U)) { atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); qla2x00_mark_all_devices_lost(vha, 0); ret = qla2x00_full_login_lip(vha); if (ret != 0) { ql_dbg(4194304U, vha, 32813, "full_login_lip=%d.\n", ret); } else { } } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { ret = qla2x00_lip_reset(vha); if (ret != 0) { ql_dbg(4194304U, vha, 32814, "lip_reset failed (%d).\n", ret); } else { } } else { } vha->marker_needed = 1U; return (0); } } void qla2x00_abort_all_cmds(scsi_qla_host_t *vha , int res ) { int que ; int cnt ; unsigned long flags ; srb_t *sp ; struct qla_hw_data *ha ; struct req_que *req ; raw_spinlock_t *tmp ; { ha = vha->hw; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); que = 0; goto ldv_61732; ldv_61731: req = *(ha->req_q_map + (unsigned long )que); if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { goto ldv_61727; } else { } if ((unsigned long )req->outstanding_cmds == (unsigned long )((srb_t **)0)) { goto ldv_61727; } else { } cnt = 1; goto ldv_61729; ldv_61728: sp = *(req->outstanding_cmds + (unsigned long )cnt); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { *(req->outstanding_cmds + (unsigned long )cnt) = (srb_t *)0; (*(sp->done))((void *)vha, (void *)sp, res); } else { } cnt = cnt + 1; ldv_61729: ; if ((int )req->num_outstanding_cmds > cnt) { goto ldv_61728; } else { } ldv_61727: que = que + 1; ldv_61732: ; if ((int )ha->max_req_queues > que) { goto ldv_61731; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static int qla2xxx_slave_alloc(struct scsi_device *sdev ) { struct fc_rport *rport ; struct device const *__mptr ; struct scsi_target *tmp___1 ; struct fc_rport *tmp___2 ; struct scsi_target *tmp___3 ; int tmp___4 ; int tmp___5 ; { tmp___3 = scsi_target(sdev); tmp___4 = scsi_is_fc_rport((struct device const *)tmp___3->dev.parent); if (tmp___4 != 0) { tmp___1 = scsi_target(sdev); __mptr = (struct device const *)tmp___1->dev.parent; tmp___2 = (struct fc_rport *)__mptr + 0xffffffffffffffa0UL; } else { tmp___2 = (struct fc_rport *)0; } rport = tmp___2; if ((unsigned long )rport == (unsigned long )((struct fc_rport *)0)) { return (-6); } else { tmp___5 = fc_remote_port_chkready(rport); if (tmp___5 != 0) { return (-6); } else { } } sdev->hostdata = (void *)*((fc_port_t **)rport->dd_data); return (0); } } static int qla2xxx_slave_configure(struct scsi_device *sdev ) { scsi_qla_host_t *vha ; void *tmp ; struct req_que *req ; { tmp = shost_priv(sdev->host); vha = (scsi_qla_host_t *)tmp; req = vha->req; if (((vha->hw)->device_type & 33554432U) != 0U) { blk_queue_update_dma_alignment(sdev->request_queue, 7); } else { } if ((unsigned int )*((unsigned char *)sdev + 297UL) != 0U) { scsi_activate_tcq(sdev, req->max_q_depth); } else { scsi_deactivate_tcq(sdev, req->max_q_depth); } return (0); } } static void qla2xxx_slave_destroy(struct scsi_device *sdev ) { { sdev->hostdata = (void *)0; return; } } static void qla2x00_handle_queue_full(struct scsi_device *sdev , int qdepth ) { fc_port_t *fcport ; int tmp ; { fcport = (fc_port_t *)sdev->hostdata; tmp = scsi_track_queue_full(sdev, qdepth); if (tmp == 0) { return; } else { } ql_dbg(134217728U, fcport->vha, 12329, "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n", (int )sdev->queue_depth, (fcport->vha)->host_no, sdev->id, sdev->lun); return; } } static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev , int qdepth ) { fc_port_t *fcport ; struct scsi_qla_host *vha ; struct req_que *req ; { fcport = (fc_port_t *)sdev->hostdata; vha = fcport->vha; req = (struct req_que *)0; req = vha->req; if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { return; } else { } if (req->max_q_depth <= (int )sdev->queue_depth || req->max_q_depth < qdepth) { return; } else { } if ((unsigned int )*((unsigned char *)sdev + 297UL) != 0U) { scsi_adjust_queue_depth(sdev, 34, qdepth); } else { scsi_adjust_queue_depth(sdev, 32, qdepth); } ql_dbg(134217728U, vha, 12330, "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n", (int )sdev->queue_depth, (fcport->vha)->host_no, sdev->id, sdev->lun); return; } } static int qla2x00_change_queue_depth(struct scsi_device *sdev , int qdepth , int reason ) { int tmp ; { switch (reason) { case 0: tmp = scsi_get_tag_type(sdev); scsi_adjust_queue_depth(sdev, tmp, qdepth); goto ldv_61766; case 1: qla2x00_handle_queue_full(sdev, qdepth); goto ldv_61766; case 2: qla2x00_adjust_sdev_qdepth_up(sdev, qdepth); goto ldv_61766; default: ; return (-95); } ldv_61766: ; return ((int )sdev->queue_depth); } } static int qla2x00_change_queue_type(struct scsi_device *sdev , int tag_type ) { { if ((unsigned int )*((unsigned char *)sdev + 297UL) != 0U) { scsi_set_tag_type(sdev, tag_type); if (tag_type != 0) { scsi_activate_tcq(sdev, (int )sdev->queue_depth); } else { scsi_deactivate_tcq(sdev, (int )sdev->queue_depth); } } else { tag_type = 0; } return (tag_type); } } static void qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha ) { scsi_qla_host_t *vp ; struct Scsi_Host *shost ; struct scsi_device *sdev ; struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { ha = vha->hw; ha->host_last_rampdown_time = jiffies; if (ha->cfg_lun_q_depth <= (int )(vha->host)->cmd_per_lun) { return; } else { } if (ha->cfg_lun_q_depth / 2 < (int )(vha->host)->cmd_per_lun) { ha->cfg_lun_q_depth = (int )(vha->host)->cmd_per_lun; } else { ha->cfg_lun_q_depth = ha->cfg_lun_q_depth / 2; } tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)ha->vp_list.next; vp = (scsi_qla_host_t *)__mptr; goto ldv_61794; ldv_61793: shost = vp->host; sdev = __scsi_iterate_devices(shost, (struct scsi_device *)0); goto ldv_61791; ldv_61790: ; if ((int )sdev->queue_depth > (int )shost->cmd_per_lun) { if ((int )sdev->queue_depth < ha->cfg_lun_q_depth) { goto ldv_61789; } else { } ql_dbg(134217728U, vp, 12337, "%ld:%d:%d: Ramping down queue depth to %d", vp->host_no, sdev->id, sdev->lun, ha->cfg_lun_q_depth); qla2x00_change_queue_depth(sdev, ha->cfg_lun_q_depth, 0); } else { } ldv_61789: sdev = __scsi_iterate_devices(shost, sdev); ldv_61791: ; if ((unsigned long )sdev != (unsigned long )((struct scsi_device *)0)) { goto ldv_61790; } else { } __mptr___0 = (struct list_head const *)vp->list.next; vp = (scsi_qla_host_t *)__mptr___0; ldv_61794: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_61793; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); return; } } static void qla2x00_host_ramp_up_queuedepth(scsi_qla_host_t *vha ) { scsi_qla_host_t *vp ; struct Scsi_Host *shost ; struct scsi_device *sdev ; struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { ha = vha->hw; ha->host_last_rampup_time = jiffies; ha->cfg_lun_q_depth = ha->cfg_lun_q_depth + 1; tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)ha->vp_list.next; vp = (scsi_qla_host_t *)__mptr; goto ldv_61816; ldv_61815: shost = vp->host; sdev = __scsi_iterate_devices(shost, (struct scsi_device *)0); goto ldv_61813; ldv_61812: ; if ((int )sdev->queue_depth > ha->cfg_lun_q_depth) { goto ldv_61811; } else { } qla2x00_change_queue_depth(sdev, ha->cfg_lun_q_depth, 2); ldv_61811: sdev = __scsi_iterate_devices(shost, sdev); ldv_61813: ; if ((unsigned long )sdev != (unsigned long )((struct scsi_device *)0)) { goto ldv_61812; } else { } __mptr___0 = (struct list_head const *)vp->list.next; vp = (scsi_qla_host_t *)__mptr___0; ldv_61816: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_61815; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); return; } } static void qla2x00_config_dma_addressing(struct qla_hw_data *ha ) { u64 tmp ; int tmp___0 ; int tmp___1 ; { ha->flags.enable_64bit_addressing = 0U; tmp___1 = dma_set_mask(& (ha->pdev)->dev, 0xffffffffffffffffULL); if (tmp___1 == 0) { tmp = dma_get_required_mask(& (ha->pdev)->dev); if ((unsigned int )(tmp >> 32ULL) != 0U) { tmp___0 = pci_set_consistent_dma_mask(ha->pdev, 0xffffffffffffffffULL); if (tmp___0 == 0) { ha->flags.enable_64bit_addressing = 1U; (ha->isp_ops)->calc_req_entries = & qla2x00_calc_iocbs_64; (ha->isp_ops)->build_iocbs = & qla2x00_build_scsi_iocbs_64; return; } else { } } else { } } else { } dma_set_mask(& (ha->pdev)->dev, 4294967295ULL); pci_set_consistent_dma_mask(ha->pdev, 4294967295ULL); return; } } static void qla2x00_enable_intrs(struct qla_hw_data *ha ) { unsigned long flags ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; { flags = 0UL; reg = & (ha->iobase)->isp; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ha->interrupts_on = 1U; writew(32776, (void volatile *)(& reg->ictrl)); readw((void const volatile *)(& reg->ictrl)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qla2x00_disable_intrs(struct qla_hw_data *ha ) { unsigned long flags ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; { flags = 0UL; reg = & (ha->iobase)->isp; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ha->interrupts_on = 0U; writew(0, (void volatile *)(& reg->ictrl)); readw((void const volatile *)(& reg->ictrl)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qla24xx_enable_intrs(struct qla_hw_data *ha ) { unsigned long flags ; struct device_reg_24xx *reg ; raw_spinlock_t *tmp ; { flags = 0UL; reg = & (ha->iobase)->isp24; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ha->interrupts_on = 1U; writel(8U, (void volatile *)(& reg->ictrl)); readl((void const volatile *)(& reg->ictrl)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qla24xx_disable_intrs(struct qla_hw_data *ha ) { unsigned long flags ; struct device_reg_24xx *reg ; raw_spinlock_t *tmp ; { flags = 0UL; reg = & (ha->iobase)->isp24; if ((((ha->device_type & 2048U) != 0U || (ha->device_type & 8192U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) && *((unsigned long *)ha + 2UL) != 0UL) { return; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ha->interrupts_on = 0U; writel(0U, (void volatile *)(& reg->ictrl)); readl((void const volatile *)(& reg->ictrl)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static int qla2x00_iospace_config(struct qla_hw_data *ha ) { resource_size_t pio ; uint16_t msix ; int cpus ; char const *tmp ; int tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; char const *tmp___4 ; void *tmp___5 ; char const *tmp___6 ; uint8_t tmp___7 ; void *tmp___8 ; unsigned int tmp___9 ; { tmp___0 = pci_request_selected_regions(ha->pdev, ha->bars, "qla2xxx"); if (tmp___0 != 0) { tmp = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 17, "Failed to reserve PIO/MMIO regions (%s), aborting.\n", tmp); goto iospace_error_exit; } else { } if ((ha->bars & 1) == 0) { goto skip_pio; } else { } pio = (ha->pdev)->resource[0].start; if (((ha->pdev)->resource[0].flags & 256UL) != 0UL) { if (((ha->pdev)->resource[0].start == 0ULL && (ha->pdev)->resource[0].end == (ha->pdev)->resource[0].start) || ((ha->pdev)->resource[0].end - (ha->pdev)->resource[0].start) + 1ULL <= 255ULL) { tmp___1 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 18, "Invalid pci I/O region size (%s).\n", tmp___1); pio = 0ULL; } else { } } else { tmp___2 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 19, "Region #0 no a PIO resource (%s).\n", tmp___2); pio = 0ULL; } ha->pio_address = pio; ql_dbg_pci(1073741824U, ha->pdev, 20, "PIO address=%llu.\n", ha->pio_address); skip_pio: ; if (((ha->pdev)->resource[1].flags & 512UL) == 0UL) { tmp___3 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 21, "Region #1 not an MMIO resource (%s), aborting.\n", tmp___3); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[1].start == 0ULL && (ha->pdev)->resource[1].end == (ha->pdev)->resource[1].start) || ((ha->pdev)->resource[1].end - (ha->pdev)->resource[1].start) + 1ULL <= 255ULL) { tmp___4 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 22, "Invalid PCI mem region size (%s), aborting.\n", tmp___4); goto iospace_error_exit; } else { } tmp___5 = ioremap((ha->pdev)->resource[1].start, 256UL); ha->iobase = (device_reg_t *)tmp___5; if ((unsigned long )ha->iobase == (unsigned long )((device_reg_t *)0)) { tmp___6 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 23, "Cannot remap MMIO (%s), aborting.\n", tmp___6); goto iospace_error_exit; } else { } tmp___7 = 1U; ha->max_rsp_queues = tmp___7; ha->max_req_queues = tmp___7; if (((ql2xmaxqueues <= 1 && ql2xmultique_tag == 0) || (ql2xmaxqueues > 1 && ql2xmultique_tag != 0)) || ((ha->device_type & 2048U) == 0U && (ha->device_type & 8192U) == 0U)) { goto mqiobase_exit; } else { } tmp___8 = ioremap((ha->pdev)->resource[3].start, (ha->pdev)->resource[3].start != 0ULL || (ha->pdev)->resource[3].end != (ha->pdev)->resource[3].start ? (unsigned long )(((ha->pdev)->resource[3].end - (ha->pdev)->resource[3].start) + 1ULL) : 0UL); ha->mqiobase = (device_reg_t *)tmp___8; if ((unsigned long )ha->mqiobase != (unsigned long )((device_reg_t *)0)) { ql_dbg_pci(1073741824U, ha->pdev, 24, "MQIO Base=%p.\n", ha->mqiobase); pci_read_config_word((struct pci_dev const *)ha->pdev, 162, & msix); ha->msix_count = msix; if (ql2xmultique_tag != 0) { tmp___9 = cpumask_weight(cpu_online_mask); cpus = (int )tmp___9; ha->max_rsp_queues = (int )ha->msix_count + -1 > cpus ? (unsigned int )((uint8_t )cpus) + 1U : (unsigned int )((uint8_t )ha->msix_count) + 255U; ha->max_req_queues = 2U; } else if (ql2xmaxqueues > 1) { ha->max_req_queues = (uint8_t )(32 < ql2xmaxqueues ? 32 : ql2xmaxqueues); ql_dbg_pci(1048576U, ha->pdev, 49160, "QoS mode set, max no of request queues:%d.\n", (int )ha->max_req_queues); ql_dbg_pci(1073741824U, ha->pdev, 25, "QoS mode set, max no of request queues:%d.\n", (int )ha->max_req_queues); } else { } ql_log_pci(2U, ha->pdev, 26, "MSI-X vector count: %d.\n", (int )msix); } else { ql_log_pci(2U, ha->pdev, 27, "BAR 3 not enabled.\n"); } mqiobase_exit: ha->msix_count = (unsigned int )((uint16_t )ha->max_rsp_queues) + 1U; ql_dbg_pci(1073741824U, ha->pdev, 28, "MSIX Count:%d.\n", (int )ha->msix_count); return (0); iospace_error_exit: ; return (-12); } } static int qla83xx_iospace_config(struct qla_hw_data *ha ) { uint16_t msix ; int cpus ; char const *tmp ; int tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; void *tmp___3 ; char const *tmp___4 ; uint8_t tmp___5 ; void *tmp___6 ; void *tmp___7 ; unsigned int tmp___8 ; { tmp___0 = pci_request_selected_regions(ha->pdev, ha->bars, "qla2xxx"); if (tmp___0 != 0) { tmp = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 279, "Failed to reserve PIO/MMIO regions (%s), aborting.\n", tmp); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[0].flags & 512UL) == 0UL) { tmp___1 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 280, "Invalid pci I/O region size (%s).\n", tmp___1); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[0].start == 0ULL && (ha->pdev)->resource[0].end == (ha->pdev)->resource[0].start) || ((ha->pdev)->resource[0].end - (ha->pdev)->resource[0].start) + 1ULL <= 255ULL) { tmp___2 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 281, "Invalid PCI mem region size (%s), aborting\n", tmp___2); goto iospace_error_exit; } else { } tmp___3 = ioremap((ha->pdev)->resource[0].start, 256UL); ha->iobase = (device_reg_t *)tmp___3; if ((unsigned long )ha->iobase == (unsigned long )((device_reg_t *)0)) { tmp___4 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 282, "Cannot remap MMIO (%s), aborting.\n", tmp___4); goto iospace_error_exit; } else { } tmp___5 = 1U; ha->max_rsp_queues = tmp___5; ha->max_req_queues = tmp___5; tmp___6 = ioremap((ha->pdev)->resource[4].start, (ha->pdev)->resource[4].start != 0ULL || (ha->pdev)->resource[4].end != (ha->pdev)->resource[4].start ? (unsigned long )(((ha->pdev)->resource[4].end - (ha->pdev)->resource[4].start) + 1ULL) : 0UL); ha->mqiobase = (device_reg_t *)tmp___6; if ((unsigned long )ha->mqiobase == (unsigned long )((device_reg_t *)0)) { ql_log_pci(0U, ha->pdev, 285, "BAR2/region4 not enabled\n"); goto mqiobase_exit; } else { } tmp___7 = ioremap((ha->pdev)->resource[2].start, (ha->pdev)->resource[2].start != 0ULL || (ha->pdev)->resource[2].end != (ha->pdev)->resource[2].start ? (unsigned long )(((ha->pdev)->resource[2].end - (ha->pdev)->resource[2].start) + 1ULL) : 0UL); ha->msixbase = (device_reg_t *)tmp___7; if ((unsigned long )ha->msixbase != (unsigned long )((device_reg_t *)0)) { pci_read_config_word((struct pci_dev const *)ha->pdev, 146, & msix); ha->msix_count = msix; if (ql2xmultique_tag != 0) { tmp___8 = cpumask_weight(cpu_online_mask); cpus = (int )tmp___8; ha->max_rsp_queues = (int )ha->msix_count + -1 > cpus ? (unsigned int )((uint8_t )cpus) + 1U : (unsigned int )((uint8_t )ha->msix_count) + 255U; ha->max_req_queues = 2U; } else if (ql2xmaxqueues > 1) { ha->max_req_queues = (uint8_t )(32 < ql2xmaxqueues ? 32 : ql2xmaxqueues); ql_dbg_pci(1048576U, ha->pdev, 49164, "QoS mode set, max no of request queues:%d.\n", (int )ha->max_req_queues); ql_dbg_pci(1073741824U, ha->pdev, 283, "QoS mode set, max no of request queues:%d.\n", (int )ha->max_req_queues); } else { } ql_log_pci(2U, ha->pdev, 284, "MSI-X vector count: %d.\n", (int )msix); } else { ql_log_pci(2U, ha->pdev, 286, "BAR 1 not enabled.\n"); } mqiobase_exit: ha->msix_count = (unsigned int )((uint16_t )ha->max_rsp_queues) + 1U; qlt_83xx_iospace_config(ha); ql_dbg_pci(1073741824U, ha->pdev, 287, "MSIX Count:%d.\n", (int )ha->msix_count); return (0); iospace_error_exit: ; return (-12); } } static struct isp_operations qla2100_isp_ops = {& qla2100_pci_config, & qla2x00_reset_chip, & qla2x00_chip_diag, & qla2x00_config_rings, & qla2x00_reset_adapter, & qla2x00_nvram_config, & qla2x00_update_fw_options, & qla2x00_load_risc, & qla2x00_pci_info_str, & qla2x00_fw_version_str, & qla2100_intr_handler, & qla2x00_enable_intrs, & qla2x00_disable_intrs, & qla2x00_abort_command, & qla2x00_abort_target, & qla2x00_lun_reset, & qla2x00_login_fabric, & qla2x00_fabric_logout, & qla2x00_calc_iocbs_32, & qla2x00_build_scsi_iocbs_32, & qla2x00_prep_ms_iocb, & qla2x00_prep_ms_fdmi_iocb, & qla2x00_read_nvram_data, & qla2x00_write_nvram_data, & qla2100_fw_dump, (int (*)(struct scsi_qla_host * ))0, (int (*)(struct scsi_qla_host * ))0, (void (*)(struct scsi_qla_host * ))0, & qla2x00_read_optrom_data, & qla2x00_write_optrom_data, & qla2x00_get_flash_version, & qla2x00_start_scsi, & qla2x00_abort_isp, & qla2x00_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla2300_isp_ops = {& qla2300_pci_config, & qla2x00_reset_chip, & qla2x00_chip_diag, & qla2x00_config_rings, & qla2x00_reset_adapter, & qla2x00_nvram_config, & qla2x00_update_fw_options, & qla2x00_load_risc, & qla2x00_pci_info_str, & qla2x00_fw_version_str, & qla2300_intr_handler, & qla2x00_enable_intrs, & qla2x00_disable_intrs, & qla2x00_abort_command, & qla2x00_abort_target, & qla2x00_lun_reset, & qla2x00_login_fabric, & qla2x00_fabric_logout, & qla2x00_calc_iocbs_32, & qla2x00_build_scsi_iocbs_32, & qla2x00_prep_ms_iocb, & qla2x00_prep_ms_fdmi_iocb, & qla2x00_read_nvram_data, & qla2x00_write_nvram_data, & qla2300_fw_dump, & qla2x00_beacon_on, & qla2x00_beacon_off, & qla2x00_beacon_blink, & qla2x00_read_optrom_data, & qla2x00_write_optrom_data, & qla2x00_get_flash_version, & qla2x00_start_scsi, & qla2x00_abort_isp, & qla2x00_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla24xx_isp_ops = {& qla24xx_pci_config, & qla24xx_reset_chip, & qla24xx_chip_diag, & qla24xx_config_rings, & qla24xx_reset_adapter, & qla24xx_nvram_config, & qla24xx_update_fw_options, & qla24xx_load_risc, & qla24xx_pci_info_str, & qla24xx_fw_version_str, & qla24xx_intr_handler, & qla24xx_enable_intrs, & qla24xx_disable_intrs, & qla24xx_abort_command, & qla24xx_abort_target, & qla24xx_lun_reset, & qla24xx_login_fabric, & qla24xx_fabric_logout, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, & qla24xx_read_nvram_data, & qla24xx_write_nvram_data, & qla24xx_fw_dump, & qla24xx_beacon_on, & qla24xx_beacon_off, & qla24xx_beacon_blink, & qla24xx_read_optrom_data, & qla24xx_write_optrom_data, & qla24xx_get_flash_version, & qla24xx_start_scsi, & qla2x00_abort_isp, & qla2x00_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla25xx_isp_ops = {& qla25xx_pci_config, & qla24xx_reset_chip, & qla24xx_chip_diag, & qla24xx_config_rings, & qla24xx_reset_adapter, & qla24xx_nvram_config, & qla24xx_update_fw_options, & qla24xx_load_risc, & qla24xx_pci_info_str, & qla24xx_fw_version_str, & qla24xx_intr_handler, & qla24xx_enable_intrs, & qla24xx_disable_intrs, & qla24xx_abort_command, & qla24xx_abort_target, & qla24xx_lun_reset, & qla24xx_login_fabric, & qla24xx_fabric_logout, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, & qla25xx_read_nvram_data, & qla25xx_write_nvram_data, & qla25xx_fw_dump, & qla24xx_beacon_on, & qla24xx_beacon_off, & qla24xx_beacon_blink, & qla25xx_read_optrom_data, & qla24xx_write_optrom_data, & qla24xx_get_flash_version, & qla24xx_dif_start_scsi, & qla2x00_abort_isp, & qla2x00_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla81xx_isp_ops = {& qla25xx_pci_config, & qla24xx_reset_chip, & qla24xx_chip_diag, & qla24xx_config_rings, & qla24xx_reset_adapter, & qla81xx_nvram_config, & qla81xx_update_fw_options, & qla81xx_load_risc, & qla24xx_pci_info_str, & qla24xx_fw_version_str, & qla24xx_intr_handler, & qla24xx_enable_intrs, & qla24xx_disable_intrs, & qla24xx_abort_command, & qla24xx_abort_target, & qla24xx_lun_reset, & qla24xx_login_fabric, & qla24xx_fabric_logout, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, (uint8_t *(*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, (int (*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, & qla81xx_fw_dump, & qla24xx_beacon_on, & qla24xx_beacon_off, & qla83xx_beacon_blink, & qla25xx_read_optrom_data, & qla24xx_write_optrom_data, & qla24xx_get_flash_version, & qla24xx_dif_start_scsi, & qla2x00_abort_isp, & qla2x00_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla82xx_isp_ops = {& qla82xx_pci_config, & qla82xx_reset_chip, & qla24xx_chip_diag, & qla82xx_config_rings, & qla24xx_reset_adapter, & qla81xx_nvram_config, & qla24xx_update_fw_options, & qla82xx_load_risc, & qla24xx_pci_info_str, & qla24xx_fw_version_str, & qla82xx_intr_handler, & qla82xx_enable_intrs, & qla82xx_disable_intrs, & qla24xx_abort_command, & qla24xx_abort_target, & qla24xx_lun_reset, & qla24xx_login_fabric, & qla24xx_fabric_logout, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, & qla24xx_read_nvram_data, & qla24xx_write_nvram_data, & qla24xx_fw_dump, & qla82xx_beacon_on, & qla82xx_beacon_off, (void (*)(struct scsi_qla_host * ))0, & qla82xx_read_optrom_data, & qla82xx_write_optrom_data, & qla82xx_get_flash_version, & qla82xx_start_scsi, & qla82xx_abort_isp, & qla82xx_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla8044_isp_ops = {& qla82xx_pci_config, & qla82xx_reset_chip, & qla24xx_chip_diag, & qla82xx_config_rings, & qla24xx_reset_adapter, & qla81xx_nvram_config, & qla24xx_update_fw_options, & qla82xx_load_risc, & qla24xx_pci_info_str, & qla24xx_fw_version_str, & qla8044_intr_handler, & qla82xx_enable_intrs, & qla82xx_disable_intrs, & qla24xx_abort_command, & qla24xx_abort_target, & qla24xx_lun_reset, & qla24xx_login_fabric, & qla24xx_fabric_logout, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, (uint8_t *(*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, (int (*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, & qla24xx_fw_dump, & qla82xx_beacon_on, & qla82xx_beacon_off, (void (*)(struct scsi_qla_host * ))0, & qla82xx_read_optrom_data, & qla8044_write_optrom_data, & qla82xx_get_flash_version, & qla82xx_start_scsi, & qla8044_abort_isp, & qla82xx_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qla83xx_isp_ops = {& qla25xx_pci_config, & qla24xx_reset_chip, & qla24xx_chip_diag, & qla24xx_config_rings, & qla24xx_reset_adapter, & qla81xx_nvram_config, & qla81xx_update_fw_options, & qla81xx_load_risc, & qla24xx_pci_info_str, & qla24xx_fw_version_str, & qla24xx_intr_handler, & qla24xx_enable_intrs, & qla24xx_disable_intrs, & qla24xx_abort_command, & qla24xx_abort_target, & qla24xx_lun_reset, & qla24xx_login_fabric, & qla24xx_fabric_logout, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, (uint8_t *(*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, (int (*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0, & qla83xx_fw_dump, & qla24xx_beacon_on, & qla24xx_beacon_off, & qla83xx_beacon_blink, & qla25xx_read_optrom_data, & qla24xx_write_optrom_data, & qla24xx_get_flash_version, & qla24xx_dif_start_scsi, & qla2x00_abort_isp, & qla83xx_iospace_config, & qla2x00_initialize_adapter}; static struct isp_operations qlafx00_isp_ops = {& qlafx00_pci_config, & qlafx00_soft_reset, & qlafx00_chip_diag, & qlafx00_config_rings, & qlafx00_soft_reset, (int (*)(struct scsi_qla_host * ))0, (void (*)(struct scsi_qla_host * ))0, (int (*)(struct scsi_qla_host * , uint32_t * ))0, & qlafx00_pci_info_str, & qlafx00_fw_version_str, & qlafx00_intr_handler, & qlafx00_enable_intrs, & qlafx00_disable_intrs, & qlafx00_abort_command, & qlafx00_abort_target, & qlafx00_lun_reset, (int (*)(struct scsi_qla_host * , uint16_t , uint8_t , uint8_t , uint8_t , uint16_t * , uint8_t ))0, (int (*)(struct scsi_qla_host * , uint16_t , uint8_t , uint8_t , uint8_t ))0, (uint16_t (*)(uint16_t ))0, (void (*)(srb_t * , cmd_entry_t * , uint16_t ))0, & qla24xx_prep_ms_iocb, & qla24xx_prep_ms_fdmi_iocb, & qla24xx_read_nvram_data, & qla24xx_write_nvram_data, (void (*)(struct scsi_qla_host * , int ))0, & qla24xx_beacon_on, & qla24xx_beacon_off, (void (*)(struct scsi_qla_host * ))0, & qla24xx_read_optrom_data, & qla24xx_write_optrom_data, & qla24xx_get_flash_version, & qlafx00_start_scsi, & qlafx00_abort_isp, & qlafx00_iospace_config, & qlafx00_initialize_adapter}; __inline static void qla2x00_set_isp_flags(struct qla_hw_data *ha ) { { ha->device_type = 2147483648U; switch ((int )(ha->pdev)->device) { case 8448: ha->device_type = ha->device_type | 1U; ha->device_type = ha->device_type & 2147483647U; ha->fw_srisc_address = 4096U; goto ldv_61882; case 8704: ha->device_type = ha->device_type | 2U; ha->device_type = ha->device_type & 2147483647U; ha->fw_srisc_address = 4096U; goto ldv_61882; case 8960: ha->device_type = ha->device_type | 4U; ha->device_type = ha->device_type | 268435456U; ha->fw_srisc_address = 2048U; goto ldv_61882; case 8978: ha->device_type = ha->device_type | 8U; ha->device_type = ha->device_type | 268435456U; ha->fw_srisc_address = 2048U; goto ldv_61882; case 8994: ha->device_type = ha->device_type | 16U; ha->device_type = ha->device_type | 268435456U; if ((unsigned int )(ha->pdev)->subsystem_vendor == 4136U && (unsigned int )(ha->pdev)->subsystem_device == 368U) { ha->device_type = ha->device_type | 536870912U; } else { } ha->fw_srisc_address = 2048U; goto ldv_61882; case 25362: ha->device_type = ha->device_type | 32U; ha->fw_srisc_address = 2048U; goto ldv_61882; case 25378: ha->device_type = ha->device_type | 64U; ha->fw_srisc_address = 2048U; goto ldv_61882; case 9250: ha->device_type = ha->device_type | 128U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->fw_srisc_address = 1048576U; goto ldv_61882; case 9266: ha->device_type = ha->device_type | 256U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->fw_srisc_address = 1048576U; goto ldv_61882; case 33842: ha->device_type = ha->device_type | 4096U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->fw_srisc_address = 1048576U; goto ldv_61882; case 21538: ha->device_type = ha->device_type | 512U; ha->device_type = ha->device_type | 134217728U; ha->fw_srisc_address = 1048576U; goto ldv_61882; case 21554: ha->device_type = ha->device_type | 1024U; ha->device_type = ha->device_type | 134217728U; ha->fw_srisc_address = 1048576U; goto ldv_61882; case 9522: ha->device_type = ha->device_type | 2048U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->fw_srisc_address = 1048576U; goto ldv_61882; case 32769: ha->device_type = ha->device_type | 8192U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->fw_srisc_address = 1048576U; goto ldv_61882; case 32801: ha->device_type = ha->device_type | 16384U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->fw_srisc_address = 1048576U; qla82xx_init_flags(ha); goto ldv_61882; case 32836: ha->device_type = ha->device_type | 262144U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->fw_srisc_address = 1048576U; qla82xx_init_flags(ha); goto ldv_61882; case 8241: ha->device_type = ha->device_type | 32768U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->device_type = ha->device_type | 33554432U; ha->fw_srisc_address = 1048576U; goto ldv_61882; case 32817: ha->device_type = ha->device_type | 65536U; ha->device_type = ha->device_type | 268435456U; ha->device_type = ha->device_type | 134217728U; ha->device_type = ha->device_type | 67108864U; ha->device_type = ha->device_type | 33554432U; ha->fw_srisc_address = 1048576U; goto ldv_61882; case 61441: ha->device_type = ha->device_type | 131072U; goto ldv_61882; } ldv_61882: ; if ((ha->device_type & 16384U) != 0U) { ha->port_no = ((int )ha->portnum & 1) == 0; } else { pci_read_config_byte((struct pci_dev const *)ha->pdev, 61, & ha->port_no); } if ((int )ha->port_no & 1) { ha->flags.port0 = 1U; } else { ha->flags.port0 = 0U; } ql_dbg_pci(1073741824U, ha->pdev, 11, "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", ha->device_type, (int )ha->flags.port0, ha->fw_srisc_address); return; } } static void qla2xxx_scan_start(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; if (*((unsigned long *)vha->hw + 2UL) != 0UL) { return; } else { } set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(7L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(16L, (unsigned long volatile *)(& vha->dpc_flags)); return; } } static int qla2xxx_scan_finished(struct Scsi_Host *shost , unsigned long time ) { scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; if ((unsigned long )vha->host == (unsigned long )((struct Scsi_Host *)0)) { return (1); } else { } if ((unsigned long )((int )(vha->hw)->loop_reset_delay * 250) < time) { return (1); } else { } tmp___0 = atomic_read((atomic_t const *)(& vha->loop_state)); return (tmp___0 == 5); } } static int qla2x00_probe_one(struct pci_dev *pdev , struct pci_device_id const *id ) { int ret ; struct Scsi_Host *host ; scsi_qla_host_t *base_vha ; struct qla_hw_data *ha ; char pci_info[30U] ; char fw_str[30U] ; char wq_name[30U] ; struct scsi_host_template *sht ; int bars ; int mem_only ; uint16_t req_length ; uint16_t rsp_length ; struct req_que *req ; struct rsp_que *rsp ; int tmp ; int tmp___0 ; void *tmp___1 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; struct lock_class_key __key___2 ; struct scsi_qla_host *tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; long tmp___6 ; long tmp___7 ; struct lock_class_key __key___3 ; char const *__lock_name ; struct workqueue_struct *tmp___8 ; struct lock_class_key __key___4 ; atomic_long_t __constr_expr_0 ; struct lock_class_key __key___5 ; char const *__lock_name___0 ; struct workqueue_struct *tmp___9 ; struct lock_class_key __key___6 ; atomic_long_t __constr_expr_1 ; struct lock_class_key __key___7 ; atomic_long_t __constr_expr_2 ; struct lock_class_key __key___8 ; atomic_long_t __constr_expr_3 ; int prot ; int guard ; bool tmp___10 ; char *tmp___11 ; char const *tmp___12 ; char *tmp___13 ; uint8_t tmp___14 ; struct task_struct *t ; { ret = -19; base_vha = (scsi_qla_host_t *)0; mem_only = 0; req_length = 0U; rsp_length = 0U; req = (struct req_que *)0; rsp = (struct rsp_que *)0; bars = pci_select_bars(pdev, 768UL); sht = & qla2xxx_driver_template; if ((((((((((((unsigned int )pdev->device == 9250U || (unsigned int )pdev->device == 9266U) || (unsigned int )pdev->device == 33842U) || (unsigned int )pdev->device == 21538U) || (unsigned int )pdev->device == 21554U) || (unsigned int )pdev->device == 9522U) || (unsigned int )pdev->device == 32769U) || (unsigned int )pdev->device == 32801U) || (unsigned int )pdev->device == 8241U) || (unsigned int )pdev->device == 32817U) || (unsigned int )pdev->device == 61441U) || (unsigned int )pdev->device == 32836U) { bars = pci_select_bars(pdev, 512UL); mem_only = 1; ql_dbg_pci(1073741824U, pdev, 7, "Mem only adapter.\n"); } else { } ql_dbg_pci(1073741824U, pdev, 8, "Bars=%d.\n", bars); if (mem_only != 0) { tmp = pci_enable_device_mem(pdev); if (tmp != 0) { goto probe_out; } else { } } else { tmp___0 = pci_enable_device(pdev); if (tmp___0 != 0) { goto probe_out; } else { } } pci_enable_pcie_error_reporting(pdev); tmp___1 = kzalloc(12480UL, 208U); ha = (struct qla_hw_data *)tmp___1; if ((unsigned long )ha == (unsigned long )((struct qla_hw_data *)0)) { ql_log_pci(0U, pdev, 9, "Unable to allocate memory for ha.\n"); goto probe_out; } else { } ql_dbg_pci(1073741824U, pdev, 10, "Memory allocated for ha=%p.\n", ha); ha->pdev = pdev; ha->tgt.enable_class_2 = (unsigned char )ql2xenableclass2; ha->bars = bars; ha->mem_only = mem_only; spinlock_check(& ha->hardware_lock); __raw_spin_lock_init(& ha->hardware_lock.ldv_6105.rlock, "&(&ha->hardware_lock)->rlock", & __key); spinlock_check(& ha->vport_slock); __raw_spin_lock_init(& ha->vport_slock.ldv_6105.rlock, "&(&ha->vport_slock)->rlock", & __key___0); __mutex_init(& ha->selflogin_lock, "&ha->selflogin_lock", & __key___1); qla2x00_set_isp_flags(ha); if (((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || (ha->device_type & 2048U) != 0U) || (ha->device_type & 8192U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { pdev->needs_freset = 1U; } else { } ha->prev_topology = 0U; ha->init_cb_size = 96; ha->link_data_rate = 65535U; ha->optrom_size = 131072U; ha->cfg_lun_q_depth = ql2xmaxqdepth; if ((int )ha->device_type & 1) { ha->max_fibre_devices = 512U; ha->mbx_count = 8U; req_length = 128U; rsp_length = 64U; ha->max_loop_id = 254U; ha->gid_list_info_size = 4; ha->flash_conf_off = 4294967295U; ha->flash_data_off = 4294967295U; ha->nvram_conf_off = 4294967295U; ha->nvram_data_off = 4294967295U; ha->isp_ops = & qla2100_isp_ops; } else if ((ha->device_type & 2U) != 0U) { ha->max_fibre_devices = 512U; ha->mbx_count = 24U; req_length = 2048U; rsp_length = 64U; ha->max_loop_id = 254U; ha->gid_list_info_size = 4; ha->flash_conf_off = 4294967295U; ha->flash_data_off = 4294967295U; ha->nvram_conf_off = 4294967295U; ha->nvram_data_off = 4294967295U; ha->isp_ops = & qla2100_isp_ops; } else if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { ha->max_fibre_devices = 512U; ha->mbx_count = 32U; req_length = 2048U; rsp_length = 512U; ha->max_loop_id = 2047U; ha->gid_list_info_size = 6; if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { ha->optrom_size = 1048576U; } else { } ha->flash_conf_off = 4294967295U; ha->flash_data_off = 4294967295U; ha->nvram_conf_off = 4294967295U; ha->nvram_data_off = 4294967295U; ha->isp_ops = & qla2300_isp_ops; } else if ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) { ha->max_fibre_devices = 2048U; ha->mbx_count = 32U; req_length = 2048U; rsp_length = 512U; ha->tgt.atio_q_length = 4096U; ha->max_loop_id = 2047U; ha->init_cb_size = 5252; ha->gid_list_info_size = 8; ha->optrom_size = 1048576U; ha->nvram_npiv_size = 128U; ha->isp_ops = & qla24xx_isp_ops; ha->flash_conf_off = 2147287040U; ha->flash_data_off = 2146435072U; ha->nvram_conf_off = 2147418112U; ha->nvram_data_off = 2147352576U; } else if ((ha->device_type & 2048U) != 0U) { ha->max_fibre_devices = 2048U; ha->mbx_count = 32U; req_length = 2048U; rsp_length = 512U; ha->tgt.atio_q_length = 4096U; ha->max_loop_id = 2047U; ha->init_cb_size = 5252; ha->gid_list_info_size = 8; ha->optrom_size = 2097152U; ha->nvram_npiv_size = 256U; ha->isp_ops = & qla25xx_isp_ops; ha->flash_conf_off = 2147287040U; ha->flash_data_off = 2146435072U; ha->nvram_conf_off = 2147418112U; ha->nvram_data_off = 2147352576U; } else if ((ha->device_type & 8192U) != 0U) { ha->max_fibre_devices = 2048U; ha->mbx_count = 32U; req_length = 2048U; rsp_length = 512U; ha->tgt.atio_q_length = 4096U; ha->max_loop_id = 2047U; ha->init_cb_size = 5252; ha->gid_list_info_size = 8; ha->optrom_size = 4194304U; ha->nvram_npiv_size = 256U; ha->isp_ops = & qla81xx_isp_ops; ha->flash_conf_off = 2147287040U; ha->flash_data_off = 2139095040U; ha->nvram_conf_off = 4294967295U; ha->nvram_data_off = 4294967295U; } else if ((ha->device_type & 16384U) != 0U) { ha->max_fibre_devices = 2048U; ha->mbx_count = 32U; req_length = 128U; rsp_length = 128U; ha->max_loop_id = 2047U; ha->init_cb_size = 5252; ha->gid_list_info_size = 8; ha->optrom_size = 8388608U; ha->nvram_npiv_size = 256U; ha->isp_ops = & qla82xx_isp_ops; ha->flash_conf_off = 2147287040U; ha->flash_data_off = 2146435072U; ha->nvram_conf_off = 2147418112U; ha->nvram_data_off = 2147352576U; } else if ((ha->device_type & 262144U) != 0U) { ha->max_fibre_devices = 2048U; ha->mbx_count = 32U; req_length = 128U; rsp_length = 128U; ha->max_loop_id = 2047U; ha->init_cb_size = 5252; ha->gid_list_info_size = 8; ha->optrom_size = 16777216U; ha->nvram_npiv_size = 256U; ha->isp_ops = & qla8044_isp_ops; ha->flash_conf_off = 2147287040U; ha->flash_data_off = 2146435072U; ha->nvram_conf_off = 2147418112U; ha->nvram_data_off = 2147352576U; } else if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { ha->portnum = (unsigned int )((uint16_t )(ha->pdev)->devfn) & 7U; ha->max_fibre_devices = 2048U; ha->mbx_count = 32U; req_length = 2048U; rsp_length = 512U; ha->tgt.atio_q_length = 4096U; ha->max_loop_id = 2047U; ha->init_cb_size = 5252; ha->gid_list_info_size = 8; ha->optrom_size = 16777216U; ha->nvram_npiv_size = 256U; ha->isp_ops = & qla83xx_isp_ops; ha->flash_conf_off = 2147287040U; ha->flash_data_off = 2139095040U; ha->nvram_conf_off = 4294967295U; ha->nvram_data_off = 4294967295U; } else if ((ha->device_type & 131072U) != 0U) { ha->max_fibre_devices = 512U; ha->mbx_count = 16U; ha->aen_mbx_count = 8U; req_length = 512U; rsp_length = 256U; ha->init_cb_size = 128; ha->isp_ops = & qlafx00_isp_ops; ha->port_down_retry_count = 30; ha->mr.fw_hbt_cnt = 6U; ha->mr.fw_reset_timer_tick = 120U; ha->mr.fw_critemp_timer_tick = 60U; ha->mr.fw_hbt_en = 1U; } else { } ql_dbg_pci(1073741824U, pdev, 30, "mbx_count=%d, req_length=%d, rsp_length=%d, max_loop_id=%d, init_cb_size=%d, gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, max_fibre_devices=%d.\n", (int )ha->mbx_count, (int )req_length, (int )rsp_length, (int )ha->max_loop_id, ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, (int )ha->nvram_npiv_size, (int )ha->max_fibre_devices); ql_dbg_pci(1073741824U, pdev, 31, "isp_ops=%p, flash_conf_off=%d, flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, ha->nvram_conf_off, ha->nvram_data_off); ret = (*((ha->isp_ops)->iospace_config))(ha); if (ret != 0) { goto iospace_config_failed; } else { } ql_log_pci(2U, pdev, 29, "Found an ISP%04X irq %d iobase 0x%p.\n", (int )pdev->device, pdev->irq, ha->iobase); __mutex_init(& ha->vport_lock, "&ha->vport_lock", & __key___2); init_completion(& ha->mbx_cmd_comp); complete(& ha->mbx_cmd_comp); init_completion(& ha->mbx_intr_comp); init_completion(& ha->dcbx_comp); init_completion(& ha->lb_portup_comp); set_bit(0L, (unsigned long volatile *)(& ha->vp_idx_map)); qla2x00_config_dma_addressing(ha); ql_dbg_pci(1073741824U, pdev, 32, "64 Bit addressing is %s.\n", *((unsigned long *)ha + 2UL) != 0UL ? (char *)"enable" : (char *)"disable"); ret = qla2x00_mem_alloc(ha, (int )req_length, (int )rsp_length, & req, & rsp); if (ret == 0) { ql_log_pci(0U, pdev, 49, "Failed to allocate memory for adapter, aborting.\n"); goto probe_hw_failed; } else { } req->max_q_depth = 32; if (ql2xmaxqdepth != 0 && (unsigned int )ql2xmaxqdepth <= 65535U) { req->max_q_depth = ql2xmaxqdepth; } else { } tmp___2 = qla2x00_create_host(sht, ha); base_vha = tmp___2; if ((unsigned long )base_vha == (unsigned long )((scsi_qla_host_t *)0)) { ret = -12; qla2x00_mem_free(ha); qla2x00_free_req_que(ha, req); qla2x00_free_rsp_que(ha, rsp); goto probe_hw_failed; } else { } pci_set_drvdata(pdev, (void *)base_vha); host = base_vha->host; base_vha->req = req; if ((ha->device_type & 131072U) != 0U) { host->can_queue = 1024; } else { host->can_queue = (int )req->length + 128; } if ((((((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) || (ha->device_type & 8192U) != 0U) || (ha->device_type & 16384U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || (ha->device_type & 262144U) != 0U) { base_vha->mgmt_svr_loop_id = (unsigned int )base_vha->vp_idx + 10U; } else { base_vha->mgmt_svr_loop_id = (unsigned int )base_vha->vp_idx + 254U; } ha->mr.fcport.vha = base_vha; ha->mr.fcport.port_type = 0; ha->mr.fcport.loop_id = 4096U; qla2x00_set_fcport_state(& ha->mr.fcport, 1); ha->mr.fcport.supported_classes = 0U; ha->mr.fcport.scan_state = 1U; if ((ha->device_type & 134217728U) == 0U) { if ((int )ha->device_type & 1) { host->sg_tablesize = 32U; } else { } } else if ((ha->device_type & 16384U) == 0U) { host->sg_tablesize = 1024U; } else { } ql_dbg(1073741824U, base_vha, 50, "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", host->can_queue, base_vha->req, (int )base_vha->mgmt_svr_loop_id, (int )host->sg_tablesize); host->max_id = (unsigned int )ha->max_fibre_devices; host->cmd_per_lun = 3; host->unique_id = host->host_no; if ((ha->device_type & 33554432U) != 0U && ql2xenabledif != 0) { host->max_cmd_len = 32U; } else { host->max_cmd_len = 16U; } host->max_channel = 0U; host->max_lun = ql2xmaxlun; host->transportt = qla2xxx_transport_template; sht->vendor_id = 72057594037932151ULL; ql_dbg(1073741824U, base_vha, 51, "max_id=%d this_id=%d cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d max_lun=%d transportt=%p, vendor_id=%llu.\n", host->max_id, host->this_id, (int )host->cmd_per_lun, host->unique_id, (int )host->max_cmd_len, host->max_channel, host->max_lun, host->transportt, sht->vendor_id); que_init: tmp___3 = qla2x00_alloc_queues(ha, req, rsp); if (tmp___3 == 0) { ql_log(0U, base_vha, 61, "Failed to allocate memory for queue pointers...aborting.\n"); goto probe_init_failed; } else { } qlt_probe_one_stage1(base_vha, ha); ret = qla2x00_request_irqs(ha, rsp); if (ret != 0) { goto probe_init_failed; } else { } pci_save_state(pdev); rsp->req = req; req->rsp = rsp; if ((ha->device_type & 131072U) != 0U) { *(ha->rsp_q_map) = rsp; *(ha->req_q_map) = req; set_bit(0L, (unsigned long volatile *)(& ha->req_qid_map)); set_bit(0L, (unsigned long volatile *)(& ha->rsp_qid_map)); } else { } req->req_q_in = & (ha->iobase)->isp24.req_q_in; req->req_q_out = & (ha->iobase)->isp24.req_q_out; rsp->rsp_q_in = & (ha->iobase)->isp24.rsp_q_in; rsp->rsp_q_out = & (ha->iobase)->isp24.rsp_q_out; if ((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { req->req_q_in = & (ha->mqiobase)->isp25mq.req_q_in; req->req_q_out = & (ha->mqiobase)->isp25mq.req_q_out; rsp->rsp_q_in = & (ha->mqiobase)->isp25mq.rsp_q_in; rsp->rsp_q_out = & (ha->mqiobase)->isp25mq.rsp_q_out; } else { } if ((ha->device_type & 131072U) != 0U) { req->req_q_in = & (ha->iobase)->ispfx00.req_q_in; req->req_q_out = & (ha->iobase)->ispfx00.req_q_out; rsp->rsp_q_in = & (ha->iobase)->ispfx00.rsp_q_in; rsp->rsp_q_out = & (ha->iobase)->ispfx00.rsp_q_out; } else { } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { req->req_q_out = (uint32_t *)(& (ha->iobase)->isp82.req_q_out); rsp->rsp_q_in = (uint32_t *)(& (ha->iobase)->isp82.rsp_q_in); rsp->rsp_q_out = (uint32_t *)(& (ha->iobase)->isp82.rsp_q_out); } else { } ql_dbg(1048576U, base_vha, 49161, "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); ql_dbg(1048576U, base_vha, 49162, "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); ql_dbg(1073741824U, base_vha, 62, "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); ql_dbg(1073741824U, base_vha, 63, "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); tmp___4 = (*((ha->isp_ops)->initialize_adapter))(base_vha); if (tmp___4 != 0) { ql_log(0U, base_vha, 214, "Failed to initialize adapter - Adapter flags %x.\n", base_vha->device_flags); if ((ha->device_type & 16384U) != 0U) { qla82xx_idc_lock(ha); qla82xx_wr_32(ha, 136323392UL, 6U); qla82xx_idc_unlock(ha); ql_log(0U, base_vha, 215, "HW State: FAILED.\n"); } else if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); qla8044_wr_direct(base_vha, 4U, 6U); qla8044_idc_unlock(ha); ql_log(0U, base_vha, 336, "HW State: FAILED.\n"); } else { } ret = -19; goto probe_failed; } else { } if ((unsigned int )ha->mqenable != 0U) { tmp___5 = qla25xx_setup_mode(base_vha); if (tmp___5 != 0) { ql_log(1U, base_vha, 236, "Failed to create queues, falling back to single queue mode.\n"); goto que_init; } else { } } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { goto skip_dpc; } else { } ha->dpc_thread = kthread_create_on_node(& qla2x00_do_dpc, (void *)ha, -1, "%s_dpc", (uint8_t *)(& base_vha->host_str)); tmp___7 = IS_ERR((void const *)ha->dpc_thread); if (tmp___7 != 0L) { ql_log(0U, base_vha, 237, "Failed to start DPC thread.\n"); tmp___6 = PTR_ERR((void const *)ha->dpc_thread); ret = (int )tmp___6; goto probe_failed; } else { } ql_dbg(1073741824U, base_vha, 238, "DPC thread started successfully.\n"); qla2xxx_wake_dpc(base_vha); if ((ha->device_type & 65536U) != 0U || ((ha->device_type & 32768U) != 0U && (int )ha->fw_attributes_ext[0] & 1)) { sprintf((char *)(& wq_name), "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); __lock_name = "%s"; tmp___8 = __alloc_workqueue_key("%s", 10U, 1, & __key___3, __lock_name, (char *)(& wq_name)); ha->dpc_lp_wq = tmp___8; __init_work(& ha->idc_aen, 0); __constr_expr_0.counter = 137438953408L; ha->idc_aen.data = __constr_expr_0; lockdep_init_map(& ha->idc_aen.lockdep_map, "(&ha->idc_aen)", & __key___4, 0); INIT_LIST_HEAD(& ha->idc_aen.entry); ha->idc_aen.func = & qla83xx_service_idc_aen; sprintf((char *)(& wq_name), "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); __lock_name___0 = "%s"; tmp___9 = __alloc_workqueue_key("%s", 10U, 1, & __key___5, __lock_name___0, (char *)(& wq_name)); ha->dpc_hp_wq = tmp___9; __init_work(& ha->nic_core_reset, 0); __constr_expr_1.counter = 137438953408L; ha->nic_core_reset.data = __constr_expr_1; lockdep_init_map(& ha->nic_core_reset.lockdep_map, "(&ha->nic_core_reset)", & __key___6, 0); INIT_LIST_HEAD(& ha->nic_core_reset.entry); ha->nic_core_reset.func = & qla83xx_nic_core_reset_work; __init_work(& ha->idc_state_handler, 0); __constr_expr_2.counter = 137438953408L; ha->idc_state_handler.data = __constr_expr_2; lockdep_init_map(& ha->idc_state_handler.lockdep_map, "(&ha->idc_state_handler)", & __key___7, 0); INIT_LIST_HEAD(& ha->idc_state_handler.entry); ha->idc_state_handler.func = & qla83xx_idc_state_handler_work; __init_work(& ha->nic_core_unrecoverable, 0); __constr_expr_3.counter = 137438953408L; ha->nic_core_unrecoverable.data = __constr_expr_3; lockdep_init_map(& ha->nic_core_unrecoverable.lockdep_map, "(&ha->nic_core_unrecoverable)", & __key___8, 0); INIT_LIST_HEAD(& ha->nic_core_unrecoverable.entry); ha->nic_core_unrecoverable.func = & qla83xx_nic_core_unrecoverable_work; } else { } skip_dpc: list_add_tail(& base_vha->list, & ha->vp_list); (base_vha->host)->irq = (ha->pdev)->irq; qla2x00_start_timer(base_vha, (void *)(& qla2x00_timer), 1UL); ql_dbg(1073741824U, base_vha, 239, "Started qla2x00_timer with interval=%d.\n", 1); ql_dbg(1073741824U, base_vha, 240, "Detected hba at address=%p.\n", ha); if ((ha->device_type & 33554432U) != 0U && ql2xenabledif != 0) { if (((int )ha->fw_attributes & 16) != 0) { prot = 0; base_vha->flags.difdix_supported = 1U; ql_dbg(1073741824U, base_vha, 241, "Registering for DIF/DIX type 1 and 3 protection.\n"); if (ql2xenabledif == 1) { prot = 8; } else { } scsi_host_set_prot(host, (unsigned int )(prot | 119)); guard = 1; if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) && ql2xenabledif > 1) { guard = guard | 2; } else { } scsi_host_set_guard(host, (int )((unsigned char )guard)); } else { base_vha->flags.difdix_supported = 0U; } } else { } (*((ha->isp_ops)->enable_intrs))(ha); if ((ha->device_type & 131072U) != 0U) { ret = qlafx00_fx_disc(base_vha, & (base_vha->hw)->mr.fcport, 1); host->sg_tablesize = (int )ha->mr.extended_io_enabled ? 1024U : 128U; } else { } ret = scsi_add_host(host, & pdev->dev); if (ret != 0) { goto probe_failed; } else { } base_vha->flags.init_done = 1U; base_vha->flags.online = 1U; ql_dbg(1073741824U, base_vha, 242, "Init done and hba is online.\n"); tmp___10 = qla_ini_mode_enabled(base_vha); if ((int )tmp___10) { scsi_scan_host(host); } else { ql_dbg(1073741824U, base_vha, 290, "skipping scsi_scan_host() for non-initiator port\n"); } qla2x00_alloc_sysfs_attr(base_vha); if ((ha->device_type & 131072U) != 0U) { ret = qlafx00_fx_disc(base_vha, & (base_vha->hw)->mr.fcport, 2); ret = qlafx00_fx_disc(base_vha, & (base_vha->hw)->mr.fcport, 153); } else { } qla2x00_init_host_attr(base_vha); qla2x00_dfs_setup(base_vha); ql_log(2U, base_vha, 251, "QLogic %s - %s.\n", (uint8_t *)(& ha->model_number), (char *)(& ha->model_desc)); tmp___11 = (*((ha->isp_ops)->fw_version_str))(base_vha, (char *)(& fw_str)); tmp___12 = pci_name((struct pci_dev const *)pdev); tmp___13 = (*((ha->isp_ops)->pci_info_str))(base_vha, (char *)(& pci_info)); ql_log(2U, base_vha, 252, "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", (int )pdev->device, tmp___13, tmp___12, *((unsigned long *)ha + 2UL) != 0UL ? 43 : 45, base_vha->host_no, tmp___11); qlt_add_target(ha, base_vha); return (0); probe_init_failed: qla2x00_free_req_que(ha, req); *(ha->req_q_map) = (struct req_que *)0; clear_bit(0L, (unsigned long volatile *)(& ha->req_qid_map)); qla2x00_free_rsp_que(ha, rsp); *(ha->rsp_q_map) = (struct rsp_que *)0; clear_bit(0L, (unsigned long volatile *)(& ha->rsp_qid_map)); tmp___14 = 0U; ha->max_rsp_queues = tmp___14; ha->max_req_queues = tmp___14; probe_failed: ; if (base_vha->timer_active != 0U) { qla2x00_stop_timer(base_vha); } else { } base_vha->flags.online = 0U; if ((unsigned long )ha->dpc_thread != (unsigned long )((struct task_struct *)0)) { t = ha->dpc_thread; ha->dpc_thread = (struct task_struct *)0; kthread_stop(t); } else { } qla2x00_free_device(base_vha); scsi_host_put(base_vha->host); probe_hw_failed: ; if ((ha->device_type & 16384U) != 0U) { qla82xx_idc_lock(ha); qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); } else { } if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); qla8044_clear_drv_active(base_vha); qla8044_idc_unlock(ha); } else { } iospace_config_failed: ; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { if (ha->nx_pcibase == 0UL) { iounmap((void volatile *)ha->nx_pcibase); } else { } if (ql2xdbwr == 0) { iounmap((void volatile *)ha->nxdb_wr_ptr); } else { } } else { if ((unsigned long )ha->iobase != (unsigned long )((device_reg_t *)0)) { iounmap((void volatile *)ha->iobase); } else { } if ((unsigned long )ha->cregbase != (unsigned long )((void *)0)) { iounmap((void volatile *)ha->cregbase); } else { } } pci_release_selected_regions(ha->pdev, ha->bars); kfree((void const *)ha); ha = (struct qla_hw_data *)0; probe_out: pci_disable_device(pdev); return (ret); } } static void qla2x00_stop_dpc_thread(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct task_struct *t ; { ha = vha->hw; t = ha->dpc_thread; if ((unsigned long )ha->dpc_thread == (unsigned long )((struct task_struct *)0)) { return; } else { } ha->dpc_thread = (struct task_struct *)0; kthread_stop(t); return; } } static void qla2x00_shutdown(struct pci_dev *pdev ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; int tmp ; void *tmp___0 ; { tmp = atomic_read((atomic_t const *)(& pdev->enable_cnt)); if (tmp == 0) { return; } else { } tmp___0 = pci_get_drvdata(pdev); vha = (scsi_qla_host_t *)tmp___0; ha = vha->hw; if ((ha->device_type & 131072U) != 0U) { qlafx00_driver_shutdown(vha, 20); } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { qla2x00_disable_fce_trace(vha, (uint64_t *)0ULL, (uint64_t *)0ULL); ha->flags.fce_enabled = 0U; } else { } if ((unsigned long )ha->eft != (unsigned long )((void *)0)) { qla2x00_disable_eft_trace(vha); } else { } qla2x00_try_to_stop_firmware(vha); vha->flags.online = 0U; if ((unsigned int )ha->interrupts_on != 0U) { vha->flags.init_done = 0U; (*((ha->isp_ops)->disable_intrs))(ha); } else { } qla2x00_free_irqs(vha); qla2x00_free_fw_dump(ha); return; } } static void qla2x00_remove_one(struct pci_dev *pdev ) { scsi_qla_host_t *base_vha ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; unsigned long flags ; int tmp ; void *tmp___0 ; raw_spinlock_t *tmp___1 ; long tmp___2 ; struct list_head const *__mptr ; int tmp___3 ; struct task_struct *t ; { tmp = atomic_read((atomic_t const *)(& pdev->enable_cnt)); if (tmp == 0) { return; } else { } tmp___0 = pci_get_drvdata(pdev); base_vha = (scsi_qla_host_t *)tmp___0; ha = base_vha->hw; ha->flags.host_shutting_down = 1U; set_bit(15L, (unsigned long volatile *)(& base_vha->dpc_flags)); if ((ha->device_type & 131072U) != 0U) { qlafx00_driver_shutdown(base_vha, 20); } else { } mutex_lock_nested(& ha->vport_lock, 0U); goto ldv_61980; ldv_61979: tmp___1 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___1); tmp___2 = ldv__builtin_expect((unsigned long )base_vha->list.next == (unsigned long )(& ha->vp_list), 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_os.o.c.prepared"), "i" (3372), "i" (12UL)); ldv_61976: ; goto ldv_61976; } else { } __mptr = (struct list_head const *)base_vha->list.next; vha = (scsi_qla_host_t *)__mptr; scsi_host_get(vha->host); spin_unlock_irqrestore(& ha->vport_slock, flags); mutex_unlock(& ha->vport_lock); fc_vport_terminate(vha->fc_vport); scsi_host_put(vha->host); mutex_lock_nested(& ha->vport_lock, 0U); ldv_61980: ; if (ha->cur_vport_count != 0) { goto ldv_61979; } else { } mutex_unlock(& ha->vport_lock); if ((ha->device_type & 65536U) != 0U) { ql_dbg(524288U, base_vha, 45182, "Clearing fcoe driver presence.\n"); tmp___3 = qla83xx_clear_drv_presence(base_vha); if (tmp___3 != 0) { ql_dbg(524288U, base_vha, 45177, "Error while clearing DRV-Presence.\n"); } else { } } else { } qla2x00_abort_all_cmds(base_vha, 65536); qla2x00_dfs_remove(base_vha); qla84xx_put_chip(base_vha); if (base_vha->timer_active != 0U) { qla2x00_stop_timer(base_vha); } else { } base_vha->flags.online = 0U; if ((unsigned long )ha->wq != (unsigned long )((struct workqueue_struct *)0)) { flush_workqueue(ha->wq); destroy_workqueue(ha->wq); ha->wq = (struct workqueue_struct *)0; } else { } if ((unsigned long )ha->dpc_lp_wq != (unsigned long )((struct workqueue_struct *)0)) { cancel_work_sync(& ha->idc_aen); destroy_workqueue(ha->dpc_lp_wq); ha->dpc_lp_wq = (struct workqueue_struct *)0; } else { } if ((unsigned long )ha->dpc_hp_wq != (unsigned long )((struct workqueue_struct *)0)) { cancel_work_sync(& ha->nic_core_reset); cancel_work_sync(& ha->idc_state_handler); cancel_work_sync(& ha->nic_core_unrecoverable); destroy_workqueue(ha->dpc_hp_wq); ha->dpc_hp_wq = (struct workqueue_struct *)0; } else { } if ((unsigned long )ha->dpc_thread != (unsigned long )((struct task_struct *)0)) { t = ha->dpc_thread; ha->dpc_thread = (struct task_struct *)0; kthread_stop(t); } else { } qlt_remove_target(ha, base_vha); qla2x00_free_sysfs_attr(base_vha); fc_remove_host(base_vha->host); ldv_scsi_remove_host_5(base_vha->host); qla2x00_free_device(base_vha); scsi_host_put(base_vha->host); if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); qla8044_clear_drv_active(base_vha); qla8044_idc_unlock(ha); } else { } if ((ha->device_type & 16384U) != 0U) { qla82xx_idc_lock(ha); qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); iounmap((void volatile *)ha->nx_pcibase); if (ql2xdbwr == 0) { iounmap((void volatile *)ha->nxdb_wr_ptr); } else { } } else { if ((unsigned long )ha->iobase != (unsigned long )((device_reg_t *)0)) { iounmap((void volatile *)ha->iobase); } else { } if ((unsigned long )ha->cregbase != (unsigned long )((void *)0)) { iounmap((void volatile *)ha->cregbase); } else { } if ((unsigned long )ha->mqiobase != (unsigned long )((device_reg_t *)0)) { iounmap((void volatile *)ha->mqiobase); } else { } if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) && (unsigned long )ha->msixbase != (unsigned long )((device_reg_t *)0)) { iounmap((void volatile *)ha->msixbase); } else { } } pci_release_selected_regions(ha->pdev, ha->bars); kfree((void const *)ha); ha = (struct qla_hw_data *)0; pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, (void *)0); return; } } static void qla2x00_free_device(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; qla2x00_abort_all_cmds(vha, 65536); if (vha->timer_active != 0U) { qla2x00_stop_timer(vha); } else { } qla2x00_stop_dpc_thread(vha); qla25xx_delete_queues(vha); if (*((unsigned long *)ha + 2UL) != 0UL) { qla2x00_disable_fce_trace(vha, (uint64_t *)0ULL, (uint64_t *)0ULL); } else { } if ((unsigned long )ha->eft != (unsigned long )((void *)0)) { qla2x00_disable_eft_trace(vha); } else { } qla2x00_try_to_stop_firmware(vha); vha->flags.online = 0U; if ((unsigned int )ha->interrupts_on != 0U) { vha->flags.init_done = 0U; (*((ha->isp_ops)->disable_intrs))(ha); } else { } qla2x00_free_irqs(vha); qla2x00_free_fcports(vha); qla2x00_mem_free(ha); qla82xx_md_free(vha); qla2x00_free_queues(ha); return; } } void qla2x00_free_fcports(struct scsi_qla_host *vha ) { fc_port_t *fcport ; fc_port_t *tfcport ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; __mptr___0 = (struct list_head const *)fcport->list.next; tfcport = (fc_port_t *)__mptr___0; goto ldv_61999; ldv_61998: list_del(& fcport->list); qla2x00_clear_loop_id(fcport); kfree((void const *)fcport); fcport = (fc_port_t *)0; fcport = tfcport; __mptr___1 = (struct list_head const *)tfcport->list.next; tfcport = (fc_port_t *)__mptr___1; ldv_61999: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_61998; } else { } return; } } __inline static void qla2x00_schedule_rport_del(struct scsi_qla_host *vha , fc_port_t *fcport , int defer ) { struct fc_rport *rport ; scsi_qla_host_t *base_vha ; unsigned long flags ; void *tmp ; raw_spinlock_t *tmp___0 ; { if ((unsigned long )fcport->rport == (unsigned long )((struct fc_rport *)0)) { return; } else { } rport = fcport->rport; if (defer != 0) { tmp = pci_get_drvdata((vha->hw)->pdev); base_vha = (scsi_qla_host_t *)tmp; tmp___0 = spinlock_check((vha->host)->host_lock); flags = _raw_spin_lock_irqsave(tmp___0); fcport->drport = rport; spin_unlock_irqrestore((vha->host)->host_lock, flags); set_bit(13L, (unsigned long volatile *)(& base_vha->dpc_flags)); qla2xxx_wake_dpc(base_vha); } else { fc_remote_port_delete(rport); qlt_fc_port_deleted(vha, fcport); } return; } } void qla2x00_mark_device_lost(scsi_qla_host_t *vha , fc_port_t *fcport , int do_login , int defer ) { int tmp ; int tmp___0 ; { if (((vha->hw)->device_type & 131072U) != 0U) { qla2x00_set_fcport_state(fcport, 3); qla2x00_schedule_rport_del(vha, fcport, defer); return; } else { } tmp = atomic_read((atomic_t const *)(& fcport->state)); if (tmp == 4 && (int )vha->vp_idx == (int )(fcport->vha)->vp_idx) { qla2x00_set_fcport_state(fcport, 3); qla2x00_schedule_rport_del(vha, fcport, defer); } else { } tmp___0 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___0 != 2) { qla2x00_set_fcport_state(fcport, 3); } else { } if (do_login == 0) { return; } else { } if (fcport->login_retry == 0) { fcport->login_retry = (int )(vha->hw)->login_retry_count; set_bit(8L, (unsigned long volatile *)(& vha->dpc_flags)); ql_dbg(268435456U, vha, 8295, "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n", (uint8_t *)(& fcport->port_name), (int )fcport->loop_id, fcport->login_retry); } else { } return; } } void qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha , int defer ) { fc_port_t *fcport ; struct list_head const *__mptr ; int tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_62029; ldv_62028: ; if ((unsigned int )vha->vp_idx != 0U && (int )vha->vp_idx != (int )(fcport->vha)->vp_idx) { goto ldv_62027; } else { } tmp = atomic_read((atomic_t const *)(& fcport->state)); if (tmp == 2) { goto ldv_62027; } else { } tmp___0 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___0 == 4) { qla2x00_set_fcport_state(fcport, 3); if (defer != 0) { qla2x00_schedule_rport_del(vha, fcport, defer); } else if ((int )vha->vp_idx == (int )(fcport->vha)->vp_idx) { qla2x00_schedule_rport_del(vha, fcport, defer); } else { } } else { } ldv_62027: __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_62029: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_62028; } else { } return; } } static int qla2x00_mem_alloc(struct qla_hw_data *ha , uint16_t req_len , uint16_t rsp_len , struct req_que **req , struct rsp_que **rsp ) { char name[16U] ; void *tmp ; int tmp___0 ; int tmp___1 ; void *tmp___2 ; void *tmp___3 ; void *tmp___4 ; void *tmp___5 ; void *tmp___6 ; void *tmp___7 ; void *tmp___8 ; void *tmp___9 ; void *tmp___10 ; void *tmp___11 ; int tmp___12 ; { tmp = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )ha->init_cb_size, & ha->init_cb_dma, 208U, (struct dma_attrs *)0); ha->init_cb = (init_cb_t *)tmp; if ((unsigned long )ha->init_cb == (unsigned long )((init_cb_t *)0)) { goto fail; } else { } tmp___0 = qlt_mem_alloc(ha); if (tmp___0 < 0) { goto fail_free_init_cb; } else { } tmp___1 = qla2x00_gid_list_size(ha); tmp___2 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )tmp___1, & ha->gid_list_dma, 208U, (struct dma_attrs *)0); ha->gid_list = (struct gid_list_info *)tmp___2; if ((unsigned long )ha->gid_list == (unsigned long )((struct gid_list_info *)0)) { goto fail_free_tgt_mem; } else { } ha->srb_mempool = mempool_create_slab_pool(128, srb_cachep); if ((unsigned long )ha->srb_mempool == (unsigned long )((mempool_t *)0)) { goto fail_free_gid_list; } else { } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { if ((unsigned long )ctx_cachep == (unsigned long )((struct kmem_cache *)0)) { ctx_cachep = kmem_cache_create("qla2xxx_ctx", 48UL, 0UL, 8192UL, (void (*)(void * ))0); if ((unsigned long )ctx_cachep == (unsigned long )((struct kmem_cache *)0)) { goto fail_free_gid_list; } else { } } else { } ha->ctx_mempool = mempool_create_slab_pool(128, ctx_cachep); if ((unsigned long )ha->ctx_mempool == (unsigned long )((mempool_t *)0)) { goto fail_free_srb_mempool; } else { } ql_dbg_pci(1073741824U, ha->pdev, 33, "ctx_cachep=%p ctx_mempool=%p.\n", ctx_cachep, ha->ctx_mempool); } else { } ha->nvram = kzalloc(4096UL, 208U); if ((unsigned long )ha->nvram == (unsigned long )((void *)0)) { goto fail_free_ctx_mempool; } else { } snprintf((char *)(& name), 16UL, "%s_%d", (char *)"qla2xxx", (int )(ha->pdev)->device); ha->s_dma_pool = dma_pool_create((char const *)(& name), & (ha->pdev)->dev, 256UL, 8UL, 0UL); if ((unsigned long )ha->s_dma_pool == (unsigned long )((struct dma_pool *)0)) { goto fail_free_nvram; } else { } ql_dbg_pci(1073741824U, ha->pdev, 34, "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); if (((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) || ql2xenabledif != 0) { ha->dl_dma_pool = dma_pool_create((char const *)(& name), & (ha->pdev)->dev, 512UL, 8UL, 0UL); if ((unsigned long )ha->dl_dma_pool == (unsigned long )((struct dma_pool *)0)) { ql_log_pci(0U, ha->pdev, 35, "Failed to allocate memory for dl_dma_pool.\n"); goto fail_s_dma_pool; } else { } ha->fcp_cmnd_dma_pool = dma_pool_create((char const *)(& name), & (ha->pdev)->dev, 512UL, 8UL, 0UL); if ((unsigned long )ha->fcp_cmnd_dma_pool == (unsigned long )((struct dma_pool *)0)) { ql_log_pci(0U, ha->pdev, 36, "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); goto fail_dl_dma_pool; } else { } ql_dbg_pci(1073741824U, ha->pdev, 37, "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n", ha->dl_dma_pool, ha->fcp_cmnd_dma_pool); } else { } if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp___3 = dma_alloc_attrs(& (ha->pdev)->dev, 2064UL, & ha->sns_cmd_dma, 208U, (struct dma_attrs *)0); ha->sns_cmd = (struct sns_cmd_pkt *)tmp___3; if ((unsigned long )ha->sns_cmd == (unsigned long )((struct sns_cmd_pkt *)0)) { goto fail_dma_pool; } else { } ql_dbg_pci(1073741824U, ha->pdev, 38, "sns_cmd: %p.\n", ha->sns_cmd); } else { tmp___4 = dma_pool_alloc(ha->s_dma_pool, 208U, & ha->ms_iocb_dma); ha->ms_iocb = (ms_iocb_entry_t *)tmp___4; if ((unsigned long )ha->ms_iocb == (unsigned long )((ms_iocb_entry_t *)0)) { goto fail_dma_pool; } else { } tmp___5 = dma_alloc_attrs(& (ha->pdev)->dev, 8208UL, & ha->ct_sns_dma, 208U, (struct dma_attrs *)0); ha->ct_sns = (struct ct_sns_pkt *)tmp___5; if ((unsigned long )ha->ct_sns == (unsigned long )((struct ct_sns_pkt *)0)) { goto fail_free_ms_iocb; } else { } ql_dbg_pci(1073741824U, ha->pdev, 39, "ms_iocb=%p ct_sns=%p.\n", ha->ms_iocb, ha->ct_sns); } tmp___6 = kzalloc(184UL, 208U); *req = (struct req_que *)tmp___6; if ((unsigned long )*req == (unsigned long )((struct req_que *)0)) { ql_log_pci(0U, ha->pdev, 40, "Failed to allocate memory for req.\n"); goto fail_req; } else { } (*req)->length = req_len; tmp___7 = dma_alloc_attrs(& (ha->pdev)->dev, (unsigned long )((int )(*req)->length + 1) * 64UL, & (*req)->dma, 208U, (struct dma_attrs *)0); (*req)->ring = (request_t *)tmp___7; if ((unsigned long )(*req)->ring == (unsigned long )((request_t *)0)) { ql_log_pci(0U, ha->pdev, 41, "Failed to allocate memory for req_ring.\n"); goto fail_req_ring; } else { } tmp___8 = kzalloc(256UL, 208U); *rsp = (struct rsp_que *)tmp___8; if ((unsigned long )*rsp == (unsigned long )((struct rsp_que *)0)) { ql_log_pci(0U, ha->pdev, 42, "Failed to allocate memory for rsp.\n"); goto fail_rsp; } else { } (*rsp)->hw = ha; (*rsp)->length = rsp_len; tmp___9 = dma_alloc_attrs(& (ha->pdev)->dev, (unsigned long )((int )(*rsp)->length + 1) * 64UL, & (*rsp)->dma, 208U, (struct dma_attrs *)0); (*rsp)->ring = (response_t *)tmp___9; if ((unsigned long )(*rsp)->ring == (unsigned long )((response_t *)0)) { ql_log_pci(0U, ha->pdev, 43, "Failed to allocate memory for rsp_ring.\n"); goto fail_rsp_ring; } else { } (*req)->rsp = *rsp; (*rsp)->req = *req; ql_dbg_pci(1073741824U, ha->pdev, 44, "req=%p req->length=%d req->ring=%p rsp=%p rsp->length=%d rsp->ring=%p.\n", *req, (int )(*req)->length, (*req)->ring, *rsp, (int )(*rsp)->length, (*rsp)->ring); if ((unsigned int )ha->nvram_npiv_size != 0U) { tmp___10 = kzalloc((unsigned long )ha->nvram_npiv_size * 24UL, 208U); ha->npiv_info = (struct qla_npiv_entry *)tmp___10; if ((unsigned long )ha->npiv_info == (unsigned long )((struct qla_npiv_entry *)0)) { ql_log_pci(0U, ha->pdev, 45, "Failed to allocate memory for npiv_info.\n"); goto fail_npiv_info; } else { } } else { ha->npiv_info = (struct qla_npiv_entry *)0; } if (((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) || (ha->device_type & 32768U) != 0U) { tmp___11 = dma_pool_alloc(ha->s_dma_pool, 208U, & ha->ex_init_cb_dma); ha->ex_init_cb = (struct ex_init_cb_81xx *)tmp___11; if ((unsigned long )ha->ex_init_cb == (unsigned long )((struct ex_init_cb_81xx *)0)) { goto fail_ex_init_cb; } else { } ql_dbg_pci(1073741824U, ha->pdev, 46, "ex_init_cb=%p.\n", ha->ex_init_cb); } else { } INIT_LIST_HEAD(& ha->gbl_dsd_list); if ((ha->device_type & 134217728U) == 0U) { ha->async_pd = dma_pool_alloc(ha->s_dma_pool, 208U, & ha->async_pd_dma); if ((unsigned long )ha->async_pd == (unsigned long )((void *)0)) { goto fail_async_pd; } else { } ql_dbg_pci(1073741824U, ha->pdev, 47, "async_pd=%p.\n", ha->async_pd); } else { } INIT_LIST_HEAD(& ha->vp_list); ha->loop_id_map = kzalloc((((unsigned long )ha->max_fibre_devices + 63UL) / 64UL) * 8UL, 208U); if ((unsigned long )ha->loop_id_map == (unsigned long )((void *)0)) { goto fail_async_pd; } else { qla2x00_set_reserved_loop_ids(ha); ql_dbg_pci(1073741824U, ha->pdev, 291, "loop_id_map=%p. \n", ha->loop_id_map); } return (1); fail_async_pd: dma_pool_free(ha->s_dma_pool, (void *)ha->ex_init_cb, ha->ex_init_cb_dma); fail_ex_init_cb: kfree((void const *)ha->npiv_info); fail_npiv_info: dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )(*rsp)->length + 1) * 64UL, (void *)(*rsp)->ring, (*rsp)->dma, (struct dma_attrs *)0); (*rsp)->ring = (response_t *)0; (*rsp)->dma = 0ULL; fail_rsp_ring: kfree((void const *)*rsp); fail_rsp: dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )(*req)->length + 1) * 64UL, (void *)(*req)->ring, (*req)->dma, (struct dma_attrs *)0); (*req)->ring = (request_t *)0; (*req)->dma = 0ULL; fail_req_ring: kfree((void const *)*req); fail_req: dma_free_attrs(& (ha->pdev)->dev, 8208UL, (void *)ha->ct_sns, ha->ct_sns_dma, (struct dma_attrs *)0); ha->ct_sns = (struct ct_sns_pkt *)0; ha->ct_sns_dma = 0ULL; fail_free_ms_iocb: dma_pool_free(ha->s_dma_pool, (void *)ha->ms_iocb, ha->ms_iocb_dma); ha->ms_iocb = (ms_iocb_entry_t *)0; ha->ms_iocb_dma = 0ULL; fail_dma_pool: ; if ((ha->device_type & 16384U) != 0U || ql2xenabledif != 0) { dma_pool_destroy(ha->fcp_cmnd_dma_pool); ha->fcp_cmnd_dma_pool = (struct dma_pool *)0; } else { } fail_dl_dma_pool: ; if ((ha->device_type & 16384U) != 0U || ql2xenabledif != 0) { dma_pool_destroy(ha->dl_dma_pool); ha->dl_dma_pool = (struct dma_pool *)0; } else { } fail_s_dma_pool: dma_pool_destroy(ha->s_dma_pool); ha->s_dma_pool = (struct dma_pool *)0; fail_free_nvram: kfree((void const *)ha->nvram); ha->nvram = (void *)0; fail_free_ctx_mempool: mempool_destroy(ha->ctx_mempool); ha->ctx_mempool = (mempool_t *)0; fail_free_srb_mempool: mempool_destroy(ha->srb_mempool); ha->srb_mempool = (mempool_t *)0; fail_free_gid_list: tmp___12 = qla2x00_gid_list_size(ha); dma_free_attrs(& (ha->pdev)->dev, (size_t )tmp___12, (void *)ha->gid_list, ha->gid_list_dma, (struct dma_attrs *)0); ha->gid_list = (struct gid_list_info *)0; ha->gid_list_dma = 0ULL; fail_free_tgt_mem: qlt_mem_free(ha); fail_free_init_cb: dma_free_attrs(& (ha->pdev)->dev, (size_t )ha->init_cb_size, (void *)ha->init_cb, ha->init_cb_dma, (struct dma_attrs *)0); ha->init_cb = (init_cb_t *)0; ha->init_cb_dma = 0ULL; fail: ql_log(0U, (scsi_qla_host_t *)0, 48, "Memory allocation failure.\n"); return (-12); } } static void qla2x00_free_fw_dump(struct qla_hw_data *ha ) { __u32 tmp ; { if ((unsigned long )ha->fce != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 65536UL, ha->fce, ha->fce_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->fw_dump != (unsigned long )((struct qla2xxx_fw_dump *)0)) { if ((unsigned long )ha->eft != (unsigned long )((void *)0)) { tmp = __fswab32((ha->fw_dump)->eft_size); dma_free_attrs(& (ha->pdev)->dev, (size_t )tmp, ha->eft, ha->eft_dma, (struct dma_attrs *)0); } else { } vfree((void const *)ha->fw_dump); } else { } ha->fce = (void *)0; ha->fce_dma = 0ULL; ha->eft = (void *)0; ha->eft_dma = 0ULL; ha->fw_dump = (struct qla2xxx_fw_dump *)0; ha->fw_dumped = 0; ha->fw_dump_reading = 0; return; } } static void qla2x00_mem_free(struct qla_hw_data *ha ) { int tmp ; struct dsd_dma *dsd_ptr ; struct dsd_dma *tdsd_ptr ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int tmp___0 ; { qla2x00_free_fw_dump(ha); if ((unsigned long )ha->mctp_dump != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 548964UL, ha->mctp_dump, ha->mctp_dump_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->srb_mempool != (unsigned long )((mempool_t *)0)) { mempool_destroy(ha->srb_mempool); } else { } if ((unsigned long )ha->dcbx_tlv != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 4096UL, ha->dcbx_tlv, ha->dcbx_tlv_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->xgmac_data != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 4096UL, ha->xgmac_data, ha->xgmac_data_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->sns_cmd != (unsigned long )((struct sns_cmd_pkt *)0)) { dma_free_attrs(& (ha->pdev)->dev, 2064UL, (void *)ha->sns_cmd, ha->sns_cmd_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->ct_sns != (unsigned long )((struct ct_sns_pkt *)0)) { dma_free_attrs(& (ha->pdev)->dev, 8208UL, (void *)ha->ct_sns, ha->ct_sns_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->sfp_data != (unsigned long )((void *)0)) { dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); } else { } if ((unsigned long )ha->ms_iocb != (unsigned long )((ms_iocb_entry_t *)0)) { dma_pool_free(ha->s_dma_pool, (void *)ha->ms_iocb, ha->ms_iocb_dma); } else { } if ((unsigned long )ha->ex_init_cb != (unsigned long )((struct ex_init_cb_81xx *)0)) { dma_pool_free(ha->s_dma_pool, (void *)ha->ex_init_cb, ha->ex_init_cb_dma); } else { } if ((unsigned long )ha->async_pd != (unsigned long )((void *)0)) { dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); } else { } if ((unsigned long )ha->s_dma_pool != (unsigned long )((struct dma_pool *)0)) { dma_pool_destroy(ha->s_dma_pool); } else { } if ((unsigned long )ha->gid_list != (unsigned long )((struct gid_list_info *)0)) { tmp = qla2x00_gid_list_size(ha); dma_free_attrs(& (ha->pdev)->dev, (size_t )tmp, (void *)ha->gid_list, ha->gid_list_dma, (struct dma_attrs *)0); } else { } if ((ha->device_type & 16384U) != 0U) { tmp___0 = list_empty((struct list_head const *)(& ha->gbl_dsd_list)); if (tmp___0 == 0) { __mptr = (struct list_head const *)ha->gbl_dsd_list.next; dsd_ptr = (struct dsd_dma *)__mptr; __mptr___0 = (struct list_head const *)dsd_ptr->list.next; tdsd_ptr = (struct dsd_dma *)__mptr___0; goto ldv_62072; ldv_62071: dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); list_del(& dsd_ptr->list); kfree((void const *)dsd_ptr); dsd_ptr = tdsd_ptr; __mptr___1 = (struct list_head const *)tdsd_ptr->list.next; tdsd_ptr = (struct dsd_dma *)__mptr___1; ldv_62072: ; if ((unsigned long )(& dsd_ptr->list) != (unsigned long )(& ha->gbl_dsd_list)) { goto ldv_62071; } else { } } else { } } else { } if ((unsigned long )ha->dl_dma_pool != (unsigned long )((struct dma_pool *)0)) { dma_pool_destroy(ha->dl_dma_pool); } else { } if ((unsigned long )ha->fcp_cmnd_dma_pool != (unsigned long )((struct dma_pool *)0)) { dma_pool_destroy(ha->fcp_cmnd_dma_pool); } else { } if ((unsigned long )ha->ctx_mempool != (unsigned long )((mempool_t *)0)) { mempool_destroy(ha->ctx_mempool); } else { } qlt_mem_free(ha); if ((unsigned long )ha->init_cb != (unsigned long )((init_cb_t *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )ha->init_cb_size, (void *)ha->init_cb, ha->init_cb_dma, (struct dma_attrs *)0); } else { } vfree((void const *)ha->optrom_buffer); kfree((void const *)ha->nvram); kfree((void const *)ha->npiv_info); kfree((void const *)ha->swl); kfree((void const *)ha->loop_id_map); ha->srb_mempool = (mempool_t *)0; ha->ctx_mempool = (mempool_t *)0; ha->sns_cmd = (struct sns_cmd_pkt *)0; ha->sns_cmd_dma = 0ULL; ha->ct_sns = (struct ct_sns_pkt *)0; ha->ct_sns_dma = 0ULL; ha->ms_iocb = (ms_iocb_entry_t *)0; ha->ms_iocb_dma = 0ULL; ha->init_cb = (init_cb_t *)0; ha->init_cb_dma = 0ULL; ha->ex_init_cb = (struct ex_init_cb_81xx *)0; ha->ex_init_cb_dma = 0ULL; ha->async_pd = (void *)0; ha->async_pd_dma = 0ULL; ha->s_dma_pool = (struct dma_pool *)0; ha->dl_dma_pool = (struct dma_pool *)0; ha->fcp_cmnd_dma_pool = (struct dma_pool *)0; ha->gid_list = (struct gid_list_info *)0; ha->gid_list_dma = 0ULL; ha->tgt.atio_ring = (struct atio *)0; ha->tgt.atio_dma = 0ULL; ha->tgt.tgt_vp_map = (struct qla_tgt_vp_map *)0; return; } } struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht , struct qla_hw_data *ha ) { struct Scsi_Host *host ; struct scsi_qla_host *vha ; void *tmp ; struct lock_class_key __key ; char const *tmp___0 ; { vha = (struct scsi_qla_host *)0; host = ldv_scsi_host_alloc_6(sht, 992); if ((unsigned long )host == (unsigned long )((struct Scsi_Host *)0)) { ql_log_pci(0U, ha->pdev, 263, "Failed to allocate host from the scsi layer, aborting.\n"); goto fail; } else { } tmp = shost_priv(host); vha = (struct scsi_qla_host *)tmp; memset((void *)vha, 0, 992UL); vha->host = host; vha->host_no = (unsigned long )host->host_no; vha->hw = ha; INIT_LIST_HEAD(& vha->vp_fcports); INIT_LIST_HEAD(& vha->work_list); INIT_LIST_HEAD(& vha->list); spinlock_check(& vha->work_lock); __raw_spin_lock_init(& vha->work_lock.ldv_6105.rlock, "&(&vha->work_lock)->rlock", & __key); sprintf((char *)(& vha->host_str), "%s_%ld", (char *)"qla2xxx", vha->host_no); tmp___0 = dev_name((struct device const *)(& (ha->pdev)->dev)); ql_dbg(1073741824U, vha, 65, "Allocated the host=%p hw=%p vha=%p dev_name=%s", vha->host, vha->hw, vha, tmp___0); return (vha); fail: ; return (vha); } } static struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *vha , enum qla_work_type type ) { struct qla_work_evt *e ; uint8_t bail ; void *tmp ; { atomic_inc(& vha->vref_count); __asm__ volatile ("mfence": : : "memory"); if (*((unsigned long *)vha + 19UL) != 0UL) { atomic_dec(& vha->vref_count); bail = 1U; } else { bail = 0U; } if ((unsigned int )bail != 0U) { return ((struct qla_work_evt *)0); } else { } tmp = kzalloc(64UL, 32U); e = (struct qla_work_evt *)tmp; if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { atomic_dec(& vha->vref_count); return ((struct qla_work_evt *)0); } else { } INIT_LIST_HEAD(& e->list); e->type = type; e->flags = 1U; return (e); } } static int qla2x00_post_work(struct scsi_qla_host *vha , struct qla_work_evt *e ) { unsigned long flags ; raw_spinlock_t *tmp ; { tmp = spinlock_check(& vha->work_lock); flags = _raw_spin_lock_irqsave(tmp); list_add_tail(& e->list, & vha->work_list); spin_unlock_irqrestore(& vha->work_lock, flags); qla2xxx_wake_dpc(vha); return (0); } } int qla2x00_post_aen_work(struct scsi_qla_host *vha , enum fc_host_event_code code , u32 data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 0); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.aen.code = code; e->u.aen.data = data; tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_idc_ack_work(struct scsi_qla_host *vha , uint16_t *mb ) { struct qla_work_evt *e ; size_t __len ; void *__ret ; int tmp ; { e = qla2x00_alloc_work(vha, 1); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } __len = 14UL; if (__len > 63UL) { __ret = __memcpy((void *)(& e->u.idc_ack.mb), (void const *)mb, __len); } else { __ret = __builtin_memcpy((void *)(& e->u.idc_ack.mb), (void const *)mb, __len); } tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_async_login_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 2); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.logio.fcport = fcport; if ((unsigned long )data != (unsigned long )((uint16_t *)0U)) { e->u.logio.data[0] = *data; e->u.logio.data[1] = *(data + 1UL); } else { } tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_async_login_done_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 3); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.logio.fcport = fcport; if ((unsigned long )data != (unsigned long )((uint16_t *)0U)) { e->u.logio.data[0] = *data; e->u.logio.data[1] = *(data + 1UL); } else { } tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_async_logout_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 4); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.logio.fcport = fcport; if ((unsigned long )data != (unsigned long )((uint16_t *)0U)) { e->u.logio.data[0] = *data; e->u.logio.data[1] = *(data + 1UL); } else { } tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_async_logout_done_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 5); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.logio.fcport = fcport; if ((unsigned long )data != (unsigned long )((uint16_t *)0U)) { e->u.logio.data[0] = *data; e->u.logio.data[1] = *(data + 1UL); } else { } tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_async_adisc_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 6); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.logio.fcport = fcport; if ((unsigned long )data != (unsigned long )((uint16_t *)0U)) { e->u.logio.data[0] = *data; e->u.logio.data[1] = *(data + 1UL); } else { } tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 7); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.logio.fcport = fcport; if ((unsigned long )data != (unsigned long )((uint16_t *)0U)) { e->u.logio.data[0] = *data; e->u.logio.data[1] = *(data + 1UL); } else { } tmp = qla2x00_post_work(vha, e); return (tmp); } } int qla2x00_post_uevent_work(struct scsi_qla_host *vha , u32 code ) { struct qla_work_evt *e ; int tmp ; { e = qla2x00_alloc_work(vha, 8); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.uevent.code = code; tmp = qla2x00_post_work(vha, e); return (tmp); } } static void qla2x00_uevent_emit(struct scsi_qla_host *vha , u32 code ) { char event_string[40U] ; char *envp[2U] ; { envp[0] = (char *)(& event_string); envp[1] = (char *)0; switch (code) { case 0U: snprintf((char *)(& event_string), 40UL, "FW_DUMP=%ld", vha->host_no); goto ldv_62158; default: ; goto ldv_62158; } ldv_62158: kobject_uevent_env(& ((vha->hw)->pdev)->dev.kobj, 2, (char **)(& envp)); return; } } int qlafx00_post_aenfx_work(struct scsi_qla_host *vha , uint32_t evtcode , uint32_t *data , int cnt ) { struct qla_work_evt *e ; size_t __len ; void *__ret ; int tmp ; { e = qla2x00_alloc_work(vha, 9); if ((unsigned long )e == (unsigned long )((struct qla_work_evt *)0)) { return (258); } else { } e->u.aenfx.evtcode = evtcode; e->u.aenfx.count = (uint32_t )cnt; __len = (unsigned long )cnt * 4UL; __ret = __builtin_memcpy((void *)(& e->u.aenfx.mbx), (void const *)data, __len); tmp = qla2x00_post_work(vha, e); return (tmp); } } void qla2x00_do_work(struct scsi_qla_host *vha ) { struct qla_work_evt *e ; struct qla_work_evt *tmp ; unsigned long flags ; struct list_head work ; raw_spinlock_t *tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; u32 tmp___1 ; struct list_head const *__mptr___1 ; { work.next = & work; work.prev = & work; tmp___0 = spinlock_check(& vha->work_lock); flags = _raw_spin_lock_irqsave(tmp___0); list_splice_init(& vha->work_list, & work); spin_unlock_irqrestore(& vha->work_lock, flags); __mptr = (struct list_head const *)work.next; e = (struct qla_work_evt *)__mptr; __mptr___0 = (struct list_head const *)e->list.next; tmp = (struct qla_work_evt *)__mptr___0; goto ldv_62198; ldv_62197: list_del_init(& e->list); switch ((unsigned int )e->type) { case 0U: tmp___1 = fc_get_event_number(); fc_host_post_event(vha->host, tmp___1, e->u.aen.code, e->u.aen.data); goto ldv_62187; case 1U: qla81xx_idc_ack(vha, (uint16_t *)(& e->u.idc_ack.mb)); goto ldv_62187; case 2U: qla2x00_async_login(vha, e->u.logio.fcport, (uint16_t *)(& e->u.logio.data)); goto ldv_62187; case 3U: qla2x00_async_login_done(vha, e->u.logio.fcport, (uint16_t *)(& e->u.logio.data)); goto ldv_62187; case 4U: qla2x00_async_logout(vha, e->u.logio.fcport); goto ldv_62187; case 5U: qla2x00_async_logout_done(vha, e->u.logio.fcport, (uint16_t *)(& e->u.logio.data)); goto ldv_62187; case 6U: qla2x00_async_adisc(vha, e->u.logio.fcport, (uint16_t *)(& e->u.logio.data)); goto ldv_62187; case 7U: qla2x00_async_adisc_done(vha, e->u.logio.fcport, (uint16_t *)(& e->u.logio.data)); goto ldv_62187; case 8U: qla2x00_uevent_emit(vha, e->u.uevent.code); goto ldv_62187; case 9U: qlafx00_process_aen(vha, e); goto ldv_62187; } ldv_62187: ; if ((int )e->flags & 1) { kfree((void const *)e); } else { } atomic_dec(& vha->vref_count); e = tmp; __mptr___1 = (struct list_head const *)tmp->list.next; tmp = (struct qla_work_evt *)__mptr___1; ldv_62198: ; if ((unsigned long )(& e->list) != (unsigned long )(& work)) { goto ldv_62197; } else { } return; } } void qla2x00_relogin(struct scsi_qla_host *vha ) { fc_port_t *fcport ; int status ; uint16_t next_loopid ; struct qla_hw_data *ha ; uint16_t data[2U] ; struct list_head const *__mptr ; int status2 ; uint8_t opts ; int tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; { next_loopid = 0U; ha = vha->hw; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_62217; ldv_62216: tmp = atomic_read((atomic_t const *)(& fcport->state)); if ((tmp != 4 && fcport->login_retry != 0) && (fcport->flags & 8U) == 0U) { fcport->login_retry = fcport->login_retry - 1; if ((int )fcport->flags & 1) { if ((fcport->flags & 4U) != 0U) { (*((ha->isp_ops)->fabric_logout))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } if ((unsigned int )fcport->loop_id == 4096U) { next_loopid = ha->min_external_loopid; fcport->loop_id = next_loopid; status = qla2x00_find_new_loop_id(vha, fcport); if (status != 0) { goto ldv_62212; } else { } } else { } if ((((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) || (ha->device_type & 134217728U) != 0U) { fcport->flags = fcport->flags | 8U; data[0] = 0U; data[1] = 1U; status = qla2x00_post_async_login_work(vha, fcport, (uint16_t *)(& data)); if (status == 0) { goto ldv_62213; } else { } status = 1; } else { status = qla2x00_fabric_login(vha, fcport, & next_loopid); if (status == 0) { opts = 0U; if ((fcport->flags & 4U) != 0U) { opts = (uint8_t )((unsigned int )opts | 2U); } else { } status2 = qla2x00_get_port_database(vha, fcport, (int )opts); if (status2 != 0) { status = 1; } else { } } else { } } } else { status = qla2x00_local_device_login(vha, fcport); } if (status == 0) { fcport->old_loop_id = fcport->loop_id; ql_dbg(268435456U, vha, 8195, "Port login OK: logged in ID 0x%x.\n", (int )fcport->loop_id); qla2x00_update_fcport(vha, fcport); } else if (status == 1) { set_bit(8L, (unsigned long volatile *)(& vha->dpc_flags)); ql_dbg(268435456U, vha, 8199, "Retrying %d login again loop_id 0x%x.\n", fcport->login_retry, (int )fcport->loop_id); } else { fcport->login_retry = 0; } if (fcport->login_retry == 0 && status != 0) { qla2x00_clear_loop_id(fcport); } else { } } else { } tmp___0 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { goto ldv_62212; } else { } ldv_62213: __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_62217: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_62216; } else { } ldv_62212: ; return; } } void qla83xx_schedule_work(scsi_qla_host_t *base_vha , int work_code ) { struct qla_hw_data *ha ; { ha = base_vha->hw; switch (work_code) { case 33280: ; if ((unsigned long )ha->dpc_lp_wq != (unsigned long )((struct workqueue_struct *)0)) { queue_work(ha->dpc_lp_wq, & ha->idc_aen); } else { } goto ldv_62224; case 1: ; if (*((unsigned long *)ha + 2UL) == 0UL) { if ((unsigned long )ha->dpc_hp_wq != (unsigned long )((struct workqueue_struct *)0)) { queue_work(ha->dpc_hp_wq, & ha->nic_core_reset); } else { } } else { ql_dbg(524288U, base_vha, 45150, "NIC Core reset is already active. Skip scheduling it again.\n"); } goto ldv_62224; case 2: ; if ((unsigned long )ha->dpc_hp_wq != (unsigned long )((struct workqueue_struct *)0)) { queue_work(ha->dpc_hp_wq, & ha->idc_state_handler); } else { } goto ldv_62224; case 3: ; if ((unsigned long )ha->dpc_hp_wq != (unsigned long )((struct workqueue_struct *)0)) { queue_work(ha->dpc_hp_wq, & ha->nic_core_unrecoverable); } else { } goto ldv_62224; default: ql_log(1U, base_vha, 45151, "Unknow work-code=0x%x.\n", work_code); } ldv_62224: ; return; } } void qla83xx_nic_core_unrecoverable_work(struct work_struct *work ) { struct qla_hw_data *ha ; struct work_struct const *__mptr ; scsi_qla_host_t *base_vha ; void *tmp ; uint32_t dev_state ; { __mptr = (struct work_struct const *)work; ha = (struct qla_hw_data *)__mptr + 0xfffffffffffff3d8UL; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; dev_state = 0U; qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, 571483012U, & dev_state); qla83xx_reset_ownership(base_vha); if (*((unsigned long *)ha + 2UL) != 0UL) { ha->flags.nic_core_reset_owner = 0U; qla83xx_wr_reg(base_vha, 571483012U, 6U); ql_log(2U, base_vha, 45152, "HW State: FAILED.\n"); qla83xx_schedule_work(base_vha, 2); } else { } qla83xx_idc_unlock(base_vha, 0); return; } } void qla83xx_idc_state_handler_work(struct work_struct *work ) { struct qla_hw_data *ha ; struct work_struct const *__mptr ; scsi_qla_host_t *base_vha ; void *tmp ; uint32_t dev_state ; { __mptr = (struct work_struct const *)work; ha = (struct qla_hw_data *)__mptr + 0xfffffffffffff428UL; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; dev_state = 0U; qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, 571483012U, & dev_state); if (dev_state == 6U || dev_state == 5U) { qla83xx_idc_state_handler(base_vha); } else { } qla83xx_idc_unlock(base_vha, 0); return; } } static int qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha ) { int rval ; unsigned long heart_beat_wait ; uint32_t heart_beat_counter1 ; uint32_t heart_beat_counter2 ; { rval = 0; heart_beat_wait = (unsigned long )jiffies + 250UL; ldv_62259: ; if ((long )(heart_beat_wait - (unsigned long )jiffies) < 0L) { ql_dbg(524288U, base_vha, 45180, "Nic Core f/w is not alive.\n"); rval = 258; goto ldv_62258; } else { } qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, 571482288U, & heart_beat_counter1); qla83xx_idc_unlock(base_vha, 0); msleep(100U); qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, 571482288U, & heart_beat_counter2); qla83xx_idc_unlock(base_vha, 0); if (heart_beat_counter1 == heart_beat_counter2) { goto ldv_62259; } else { } ldv_62258: ; return (rval); } } void qla83xx_nic_core_reset_work(struct work_struct *work ) { struct qla_hw_data *ha ; struct work_struct const *__mptr ; scsi_qla_host_t *base_vha ; void *tmp ; uint32_t dev_state ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { __mptr = (struct work_struct const *)work; ha = (struct qla_hw_data *)__mptr + 0xfffffffffffff478UL; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; dev_state = 0U; if ((ha->device_type & 32768U) != 0U) { tmp___0 = qla2xxx_mctp_dump(base_vha); if (tmp___0 != 0) { ql_log(1U, base_vha, 45185, "Failed to dump mctp\n"); } else { } return; } else { } if (*((unsigned long *)ha + 2UL) == 0UL) { tmp___1 = qla83xx_check_nic_core_fw_alive(base_vha); if (tmp___1 == 0) { qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, 571483012U, & dev_state); qla83xx_idc_unlock(base_vha, 0); if (dev_state != 4U) { ql_dbg(524288U, base_vha, 45178, "Nic Core f/w is alive.\n"); return; } else { } } else { } ha->flags.nic_core_reset_hdlr_active = 1U; tmp___2 = qla83xx_nic_core_reset(base_vha); if (tmp___2 != 0) { ql_dbg(524288U, base_vha, 45153, "NIC Core reset failed.\n"); } else { } ha->flags.nic_core_reset_hdlr_active = 0U; } else { } return; } } void qla83xx_service_idc_aen(struct work_struct *work ) { struct qla_hw_data *ha ; struct work_struct const *__mptr ; scsi_qla_host_t *base_vha ; void *tmp ; uint32_t dev_state ; uint32_t idc_control ; int tmp___0 ; { __mptr = (struct work_struct const *)work; ha = (struct qla_hw_data *)__mptr + 0xfffffffffffff4d0UL; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, 571483012U, & dev_state); qla83xx_rd_reg(base_vha, 571483024U, & idc_control); qla83xx_idc_unlock(base_vha, 0); if (dev_state == 4U) { if ((idc_control & 2U) != 0U) { ql_dbg(524288U, base_vha, 45154, "Application requested NIC Core Reset.\n"); qla83xx_schedule_work(base_vha, 1); } else { tmp___0 = qla83xx_check_nic_core_fw_alive(base_vha); if (tmp___0 == 0) { ql_dbg(524288U, base_vha, 45179, "Other protocol driver requested NIC Core Reset.\n"); qla83xx_schedule_work(base_vha, 1); } else { } } } else if (dev_state == 6U || dev_state == 5U) { qla83xx_schedule_work(base_vha, 2); } else { } return; } } static void qla83xx_wait_logic(void) { int i ; struct thread_info *tmp ; { tmp = current_thread_info(); if (((unsigned long )tmp->preempt_count & 134217472UL) == 0UL) { msleep(100U); schedule(); } else { i = 0; goto ldv_62282; ldv_62281: cpu_relax(); i = i + 1; ldv_62282: ; if (i <= 19) { goto ldv_62281; } else { } } return; } } static int qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha ) { int rval ; uint32_t data ; uint32_t idc_lck_rcvry_stage_mask ; uint32_t idc_lck_rcvry_owner_mask ; struct qla_hw_data *ha ; { idc_lck_rcvry_stage_mask = 3U; idc_lck_rcvry_owner_mask = 60U; ha = base_vha->hw; ql_dbg(524288U, base_vha, 45190, "Trying force recovery of the IDC lock.\n"); rval = qla83xx_rd_reg(base_vha, 571483036U, & data); if (rval != 0) { return (rval); } else { } if ((data & idc_lck_rcvry_stage_mask) != 0U) { return (0); } else { data = (uint32_t )(((int )ha->portnum << 2) | 1); rval = qla83xx_wr_reg(base_vha, 571483036U, data); if (rval != 0) { return (rval); } else { } msleep(200U); rval = qla83xx_rd_reg(base_vha, 571483036U, & data); if (rval != 0) { return (rval); } else { } if ((data & idc_lck_rcvry_owner_mask) >> 2 == (uint32_t )ha->portnum) { data = (~ idc_lck_rcvry_stage_mask | 2U) & data; rval = qla83xx_wr_reg(base_vha, 571483036U, data); if (rval != 0) { return (rval); } else { } rval = qla83xx_rd_reg(base_vha, 2165424172U, & data); if (rval != 0) { return (rval); } else { } rval = qla83xx_wr_reg(base_vha, 571482372U, 255U); if (rval != 0) { return (rval); } else { } rval = qla83xx_wr_reg(base_vha, 571483036U, 0U); if (rval != 0) { return (rval); } else { } } else { return (0); } } return (rval); } } static int qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha ) { int rval ; uint32_t o_drv_lockid ; uint32_t n_drv_lockid ; unsigned long lock_recovery_timeout ; int tmp ; { rval = 0; lock_recovery_timeout = (unsigned long )jiffies + 500UL; retry_lockid: rval = qla83xx_rd_reg(base_vha, 571482372U, & o_drv_lockid); if (rval != 0) { goto exit; } else { } if ((long )((unsigned long )jiffies - lock_recovery_timeout) >= 0L) { tmp = qla83xx_force_lock_recovery(base_vha); if (tmp == 0) { return (0); } else { return (258); } } else { } rval = qla83xx_rd_reg(base_vha, 571482372U, & n_drv_lockid); if (rval != 0) { goto exit; } else { } if (o_drv_lockid == n_drv_lockid) { qla83xx_wait_logic(); goto retry_lockid; } else { return (0); } exit: ; return (rval); } } void qla83xx_idc_lock(scsi_qla_host_t *base_vha , uint16_t requester_id ) { uint16_t options ; uint32_t data ; uint32_t lock_owner ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { options = (uint16_t )((int )((short )((int )requester_id << 15)) | 64); ha = base_vha->hw; retry_lock: tmp___0 = qla83xx_rd_reg(base_vha, 2165424168U, & data); if (tmp___0 == 0) { if (data != 0U) { qla83xx_wr_reg(base_vha, 571482372U, (uint32_t )ha->portnum); } else { qla83xx_rd_reg(base_vha, 571482372U, & lock_owner); ql_dbg(524288U, base_vha, 45155, "Failed to acquire IDC lock, acquired by %d, retrying...\n", lock_owner); tmp = qla83xx_idc_lock_recovery(base_vha); if (tmp == 0) { qla83xx_wait_logic(); goto retry_lock; } else { ql_log(1U, base_vha, 45173, "IDC Lock recovery FAILED.\n"); } } } else { } return; retry_lock2: tmp___2 = qla83xx_access_control(base_vha, (int )options, 0U, 0U, (uint16_t *)0U); if (tmp___2 != 0) { ql_dbg(524288U, base_vha, 45170, "Failed to acquire IDC lock. retrying...\n"); tmp___1 = qla83xx_idc_lock_recovery(base_vha); if (tmp___1 == 0) { qla83xx_wait_logic(); goto retry_lock2; } else { ql_log(1U, base_vha, 45174, "IDC Lock recovery FAILED.\n"); } } else { } return; } } void qla83xx_idc_unlock(scsi_qla_host_t *base_vha , uint16_t requester_id ) { uint16_t options ; uint16_t retry ; uint32_t data ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { options = (uint16_t )((int )((short )((int )requester_id << 15)) | 128); ha = base_vha->hw; retry = 0U; retry_unlock: tmp = qla83xx_rd_reg(base_vha, 571482372U, & data); if (tmp == 0) { if ((uint32_t )ha->portnum == data) { qla83xx_rd_reg(base_vha, 2165424172U, & data); qla83xx_wr_reg(base_vha, 571482372U, 255U); } else if ((unsigned int )retry <= 9U) { qla83xx_wait_logic(); retry = (uint16_t )((int )retry + 1); ql_dbg(524288U, base_vha, 45156, "Failed to release IDC lock, retyring=%d\n", (int )retry); goto retry_unlock; } else { } } else if ((unsigned int )retry <= 9U) { qla83xx_wait_logic(); retry = (uint16_t )((int )retry + 1); ql_dbg(524288U, base_vha, 45157, "Failed to read drv-lockid, retyring=%d\n", (int )retry); goto retry_unlock; } else { } return; retry = 0U; retry_unlock2: tmp___0 = qla83xx_access_control(base_vha, (int )options, 0U, 0U, (uint16_t *)0U); if (tmp___0 != 0) { if ((unsigned int )retry <= 9U) { qla83xx_wait_logic(); retry = (uint16_t )((int )retry + 1); ql_dbg(524288U, base_vha, 45158, "Failed to release IDC lock, retyring=%d\n", (int )retry); goto retry_unlock2; } else { } } else { } return; } } int __qla83xx_set_drv_presence(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; uint32_t drv_presence ; { rval = 0; ha = vha->hw; rval = qla83xx_rd_reg(vha, 571483016U, & drv_presence); if (rval == 0) { drv_presence = (uint32_t )(1 << (int )ha->portnum) | drv_presence; rval = qla83xx_wr_reg(vha, 571483016U, drv_presence); } else { } return (rval); } } int qla83xx_set_drv_presence(scsi_qla_host_t *vha ) { int rval ; { rval = 0; qla83xx_idc_lock(vha, 0); rval = __qla83xx_set_drv_presence(vha); qla83xx_idc_unlock(vha, 0); return (rval); } } int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; uint32_t drv_presence ; { rval = 0; ha = vha->hw; rval = qla83xx_rd_reg(vha, 571483016U, & drv_presence); if (rval == 0) { drv_presence = (uint32_t )(~ (1 << (int )ha->portnum)) & drv_presence; rval = qla83xx_wr_reg(vha, 571483016U, drv_presence); } else { } return (rval); } } int qla83xx_clear_drv_presence(scsi_qla_host_t *vha ) { int rval ; { rval = 0; qla83xx_idc_lock(vha, 0); rval = __qla83xx_clear_drv_presence(vha); qla83xx_idc_unlock(vha, 0); return (rval); } } static void qla83xx_need_reset_handler(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t drv_ack ; uint32_t drv_presence ; unsigned long ack_timeout ; { ha = vha->hw; ack_timeout = (unsigned long )(ha->fcoe_reset_timeout * 250U) + (unsigned long )jiffies; ldv_62361: qla83xx_rd_reg(vha, 571483020U, & drv_ack); qla83xx_rd_reg(vha, 571483016U, & drv_presence); if ((drv_ack & drv_presence) == drv_presence) { goto ldv_62354; } else { } if ((long )((unsigned long )jiffies - ack_timeout) >= 0L) { ql_log(1U, vha, 45159, "RESET ACK TIMEOUT! drv_presence=0x%x drv_ack=0x%x\n", drv_presence, drv_ack); if (drv_ack != drv_presence) { qla83xx_wr_reg(vha, 571483016U, drv_ack); } else { } goto ldv_62354; } else { } qla83xx_idc_unlock(vha, 0); msleep(1000U); qla83xx_idc_lock(vha, 0); goto ldv_62361; ldv_62354: qla83xx_wr_reg(vha, 571483012U, 1U); ql_log(2U, vha, 45160, "HW State: COLD/RE-INIT.\n"); return; } } static int qla83xx_device_bootstrap(scsi_qla_host_t *vha ) { int rval ; uint32_t idc_control ; { rval = 0; qla83xx_wr_reg(vha, 571483012U, 2U); ql_log(2U, vha, 45161, "HW State: INITIALIZING.\n"); __qla83xx_get_idc_control(vha, & idc_control); idc_control = idc_control & 4294967293U; __qla83xx_set_idc_control(vha, 0U); qla83xx_idc_unlock(vha, 0); rval = qla83xx_restart_nic_firmware(vha); qla83xx_idc_lock(vha, 0); if (rval != 0) { ql_log(0U, vha, 45162, "Failed to restart NIC f/w.\n"); qla83xx_wr_reg(vha, 571483012U, 6U); ql_log(2U, vha, 45163, "HW State: FAILED.\n"); } else { ql_dbg(524288U, vha, 45164, "Success in restarting nic f/w.\n"); qla83xx_wr_reg(vha, 571483012U, 3U); ql_log(2U, vha, 45165, "HW State: READY.\n"); } return (rval); } } int qla83xx_idc_state_handler(scsi_qla_host_t *base_vha ) { struct qla_hw_data *ha ; int rval ; unsigned long dev_init_timeout ; uint32_t dev_state ; { ha = base_vha->hw; rval = 0; dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; ldv_62391: ; if ((long )((unsigned long )jiffies - dev_init_timeout) >= 0L) { ql_log(1U, base_vha, 45166, "Initialization TIMEOUT!\n"); qla83xx_wr_reg(base_vha, 571483012U, 6U); ql_log(2U, base_vha, 45167, "HW State: FAILED.\n"); } else { } qla83xx_rd_reg(base_vha, 571483012U, & dev_state); switch (dev_state) { case 3U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { qla83xx_idc_audit(base_vha, 1); } else { } ha->flags.nic_core_reset_owner = 0U; ql_dbg(524288U, base_vha, 45168, "Reset_owner reset by 0x%x.\n", (int )ha->portnum); goto exit; case 1U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { rval = qla83xx_device_bootstrap(base_vha); } else { qla83xx_idc_unlock(base_vha, 0); msleep(1000U); qla83xx_idc_lock(base_vha, 0); } goto ldv_62383; case 2U: qla83xx_idc_unlock(base_vha, 0); msleep(1000U); qla83xx_idc_lock(base_vha, 0); goto ldv_62383; case 4U: ; if (ql2xdontresethba == 0 && *((unsigned long *)ha + 2UL) != 0UL) { qla83xx_need_reset_handler(base_vha); } else { qla83xx_idc_unlock(base_vha, 0); msleep(1000U); qla83xx_idc_lock(base_vha, 0); } dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; goto ldv_62383; case 5U: qla83xx_idc_unlock(base_vha, 0); msleep(1000U); qla83xx_idc_lock(base_vha, 0); goto ldv_62383; case 7U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { goto exit; } else { } qla83xx_idc_unlock(base_vha, 0); msleep(1000U); qla83xx_idc_lock(base_vha, 0); dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; goto ldv_62383; case 6U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { qla83xx_idc_audit(base_vha, 1); } else { } ha->flags.nic_core_reset_owner = 0U; __qla83xx_clear_drv_presence(base_vha); qla83xx_idc_unlock(base_vha, 0); qla8xxx_dev_failed_handler(base_vha); rval = 258; qla83xx_idc_lock(base_vha, 0); goto exit; case 3134241488U: qla83xx_idc_unlock(base_vha, 0); msleep(1000U); qla83xx_idc_lock(base_vha, 0); goto ldv_62383; default: ql_log(1U, base_vha, 45169, "Unknow Device State: %x.\n", dev_state); qla83xx_idc_unlock(base_vha, 0); qla8xxx_dev_failed_handler(base_vha); rval = 258; qla83xx_idc_lock(base_vha, 0); goto exit; } ldv_62383: ; goto ldv_62391; exit: ; return (rval); } } static int qla2x00_do_dpc(void *data ) { int rval ; scsi_qla_host_t *base_vha ; struct qla_hw_data *ha ; void *tmp ; struct task_struct *tmp___0 ; long volatile __ret ; struct task_struct *tmp___1 ; struct task_struct *tmp___2 ; struct task_struct *tmp___3 ; struct task_struct *tmp___4 ; struct task_struct *tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; int tmp___19 ; int tmp___20 ; int ret ; int tmp___21 ; int tmp___22 ; int tmp___23 ; int tmp___24 ; int tmp___25 ; int tmp___26 ; int tmp___27 ; int tmp___28 ; int tmp___29 ; int tmp___30 ; int tmp___31 ; int tmp___32 ; int tmp___33 ; int tmp___34 ; long volatile __ret___0 ; struct task_struct *tmp___35 ; struct task_struct *tmp___36 ; struct task_struct *tmp___37 ; struct task_struct *tmp___38 ; bool tmp___39 ; int tmp___40 ; struct task_struct *tmp___41 ; { ha = (struct qla_hw_data *)data; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; tmp___0 = get_current(); set_user_nice(tmp___0, -20L); __ret = 1L; switch (8UL) { case 1UL: tmp___1 = get_current(); __asm__ volatile ("xchgb %b0, %1\n": "+q" (__ret), "+m" (tmp___1->state): : "memory", "cc"); goto ldv_62400; case 2UL: tmp___2 = get_current(); __asm__ volatile ("xchgw %w0, %1\n": "+r" (__ret), "+m" (tmp___2->state): : "memory", "cc"); goto ldv_62400; case 4UL: tmp___3 = get_current(); __asm__ volatile ("xchgl %0, %1\n": "+r" (__ret), "+m" (tmp___3->state): : "memory", "cc"); goto ldv_62400; case 8UL: tmp___4 = get_current(); __asm__ volatile ("xchgq %q0, %1\n": "+r" (__ret), "+m" (tmp___4->state): : "memory", "cc"); goto ldv_62400; default: __xchg_wrong_size(); } ldv_62400: ; goto ldv_62407; ldv_62419: ql_dbg(67108864U, base_vha, 16384, "DPC handler sleeping.\n"); schedule(); tmp___5 = get_current(); tmp___5->state = 0L; if (*((unsigned long *)base_vha + 19UL) == 0UL || *((unsigned long *)ha + 2UL) != 0UL) { goto end_loop; } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(67108864U, base_vha, 16387, "eeh_busy=%d.\n", (int )ha->flags.eeh_busy); goto end_loop; } else { } ha->dpc_active = 1U; ql_dbg(67141632U, base_vha, 16385, "DPC handler waking up, dpc_flags=0x%lx.\n", base_vha->dpc_flags); qla2x00_do_work(base_vha); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { if ((ha->device_type & 262144U) != 0U) { tmp___6 = test_and_clear_bit(17L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___6 != 0) { qla8044_idc_lock(ha); qla8044_wr_direct(base_vha, 4U, 6U); qla8044_idc_unlock(ha); ql_log(2U, base_vha, 16388, "HW State: FAILED.\n"); qla8044_device_state_handler(base_vha); goto ldv_62407; } else { } } else { tmp___7 = test_and_clear_bit(17L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___7 != 0) { qla82xx_idc_lock(ha); qla82xx_wr_32(ha, 136323392UL, 6U); qla82xx_idc_unlock(ha); ql_log(2U, base_vha, 337, "HW State: FAILED.\n"); qla82xx_device_state_handler(base_vha); goto ldv_62407; } else { } } tmp___10 = test_and_clear_bit(18L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___10 != 0) { ql_dbg(67108864U, base_vha, 16389, "FCoE context reset scheduled.\n"); tmp___9 = test_and_set_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___9 == 0) { tmp___8 = qla82xx_fcoe_ctx_reset(base_vha); if (tmp___8 != 0) { set_bit(2L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } clear_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } ql_dbg(67108864U, base_vha, 16390, "FCoE context reset end.\n"); } else { } } else if ((ha->device_type & 131072U) != 0U) { tmp___13 = test_and_clear_bit(17L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___13 != 0) { ql_dbg(67108864U, base_vha, 16416, "Firmware Reset Recovery\n"); tmp___12 = qlafx00_reset_initialize(base_vha); if (tmp___12 != 0) { tmp___11 = constant_test_bit(15L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___11 == 0) { set_bit(17L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } ql_dbg(67108864U, base_vha, 16417, "Reset Recovery Failed\n"); } else { } } else { } tmp___16 = test_and_clear_bit(26L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___16 != 0) { ql_dbg(67108864U, base_vha, 16418, "ISPFx00 Target Scan scheduled\n"); tmp___15 = qlafx00_rescan_isp(base_vha); if (tmp___15 != 0) { tmp___14 = constant_test_bit(15L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___14 == 0) { set_bit(17L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } ql_dbg(67108864U, base_vha, 16414, "ISPFx00 Target Scan Failed\n"); } else { } ql_dbg(67108864U, base_vha, 16415, "ISPFx00 Target Scan End\n"); } else { } } else { } tmp___19 = test_and_clear_bit(2L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___19 != 0) { ql_dbg(67108864U, base_vha, 16391, "ISP abort scheduled.\n"); tmp___18 = test_and_set_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___18 == 0) { tmp___17 = (*((ha->isp_ops)->abort_isp))(base_vha); if (tmp___17 != 0) { set_bit(2L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } clear_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } ql_dbg(67108864U, base_vha, 16392, "ISP abort end.\n"); } else { } tmp___20 = test_and_clear_bit(13L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___20 != 0) { qla2x00_update_fcports(base_vha); } else { } tmp___21 = constant_test_bit(21L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___21 != 0) { ret = qla2x00_send_change_request(base_vha, 3, 0); if (ret != 0) { ql_log(1U, base_vha, 289, "Failed to enable receiving of RSCN requests: 0x%x.\n", ret); } else { } clear_bit(21L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } if ((ha->device_type & 131072U) != 0U) { goto loop_resync_check; } else { } tmp___22 = constant_test_bit(20L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___22 != 0) { ql_dbg(67108864U, base_vha, 16393, "Quiescence mode scheduled.\n"); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { if ((ha->device_type & 16384U) != 0U) { qla82xx_device_state_handler(base_vha); } else { } if ((ha->device_type & 262144U) != 0U) { qla8044_device_state_handler(base_vha); } else { } clear_bit(20L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (*((unsigned long *)ha + 2UL) == 0UL) { qla2x00_perform_loop_resync(base_vha); if ((ha->device_type & 16384U) != 0U) { qla82xx_idc_lock(ha); qla82xx_clear_qsnt_ready(base_vha); qla82xx_idc_unlock(ha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); qla8044_clear_qsnt_ready(base_vha); qla8044_idc_unlock(ha); } else { } } else { } } else { clear_bit(20L, (unsigned long volatile *)(& base_vha->dpc_flags)); qla2x00_quiesce_io(base_vha); } ql_dbg(67108864U, base_vha, 16394, "Quiescence mode end.\n"); } else { } tmp___23 = test_and_clear_bit(0L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___23 != 0) { tmp___24 = test_and_set_bit(1L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___24 == 0) { ql_dbg(67108864U, base_vha, 16395, "Reset marker scheduled.\n"); qla2x00_rst_aen(base_vha); clear_bit(1L, (unsigned long volatile *)(& base_vha->dpc_flags)); ql_dbg(67108864U, base_vha, 16396, "Reset marker end.\n"); } else { } } else { } tmp___25 = test_and_clear_bit(8L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___25 != 0) { tmp___26 = constant_test_bit(4L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___26 == 0) { tmp___27 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___27 != 2) { ql_dbg(67108864U, base_vha, 16397, "Relogin scheduled.\n"); qla2x00_relogin(base_vha); ql_dbg(67108864U, base_vha, 16398, "Relogin end.\n"); } else { } } else { } } else { } loop_resync_check: tmp___29 = test_and_clear_bit(4L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___29 != 0) { ql_dbg(67108864U, base_vha, 16399, "Loop resync scheduled.\n"); tmp___28 = test_and_set_bit(5L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___28 == 0) { rval = qla2x00_loop_resync(base_vha); clear_bit(5L, (unsigned long volatile *)(& base_vha->dpc_flags)); } else { } ql_dbg(67108864U, base_vha, 16400, "Loop resync end.\n"); } else { } if ((ha->device_type & 131072U) != 0U) { goto intr_on_check; } else { } tmp___30 = constant_test_bit(16L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___30 != 0) { tmp___31 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___31 == 5) { clear_bit(16L, (unsigned long volatile *)(& base_vha->dpc_flags)); qla2xxx_flash_npiv_conf(base_vha); } else { } } else { } tmp___32 = test_and_clear_bit(22L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___32 != 0) { clear_bit(23L, (unsigned long volatile *)(& base_vha->dpc_flags)); qla2x00_host_ramp_down_queuedepth(base_vha); } else { } tmp___33 = test_and_clear_bit(23L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___33 != 0) { qla2x00_host_ramp_up_queuedepth(base_vha); } else { } intr_on_check: ; if ((unsigned int )ha->interrupts_on == 0U) { (*((ha->isp_ops)->enable_intrs))(ha); } else { } tmp___34 = test_and_clear_bit(11L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (tmp___34 != 0) { (*((ha->isp_ops)->beacon_blink))(base_vha); } else { } if ((ha->device_type & 131072U) == 0U) { qla2x00_do_dpc_all_vps(base_vha); } else { } ha->dpc_active = 0U; end_loop: __ret___0 = 1L; switch (8UL) { case 1UL: tmp___35 = get_current(); __asm__ volatile ("xchgb %b0, %1\n": "+q" (__ret___0), "+m" (tmp___35->state): : "memory", "cc"); goto ldv_62413; case 2UL: tmp___36 = get_current(); __asm__ volatile ("xchgw %w0, %1\n": "+r" (__ret___0), "+m" (tmp___36->state): : "memory", "cc"); goto ldv_62413; case 4UL: tmp___37 = get_current(); __asm__ volatile ("xchgl %0, %1\n": "+r" (__ret___0), "+m" (tmp___37->state): : "memory", "cc"); goto ldv_62413; case 8UL: tmp___38 = get_current(); __asm__ volatile ("xchgq %q0, %1\n": "+r" (__ret___0), "+m" (tmp___38->state): : "memory", "cc"); goto ldv_62413; default: __xchg_wrong_size(); } ldv_62413: ; ldv_62407: tmp___39 = kthread_should_stop(); if (tmp___39) { tmp___40 = 0; } else { tmp___40 = 1; } if (tmp___40) { goto ldv_62419; } else { } tmp___41 = get_current(); tmp___41->state = 0L; ql_dbg(67108864U, base_vha, 16401, "DPC handler exiting.\n"); ha->dpc_active = 0U; qla2x00_abort_all_cmds(base_vha, 65536); return (0); } } void qla2xxx_wake_dpc(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct task_struct *t ; int tmp ; { ha = vha->hw; t = ha->dpc_thread; tmp = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp == 0 && (unsigned long )t != (unsigned long )((struct task_struct *)0)) { wake_up_process(t); } else { } return; } } static void qla2x00_rst_aen(scsi_qla_host_t *vha ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { if (*((unsigned long *)vha + 19UL) != 0UL && *((unsigned long *)vha + 19UL) == 0UL) { tmp___1 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___1 == 0) { tmp___2 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 == 0) { ldv_62429: clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); vha->marker_needed = 1U; tmp = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp == 0) { tmp___0 = constant_test_bit(0L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { goto ldv_62429; } else { goto ldv_62430; } } else { } ldv_62430: ; } else { } } else { } } else { } return; } } void qla2x00_timer(scsi_qla_host_t *vha ) { unsigned long cpu_flags ; int start_dpc ; int index ; srb_t *sp ; uint16_t w ; struct qla_hw_data *ha ; struct req_que *req ; int tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; fc_port_t *sfcp ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; int tmp___19 ; int tmp___20 ; int tmp___21 ; int tmp___22 ; int tmp___23 ; int tmp___24 ; int tmp___25 ; int tmp___26 ; int tmp___27 ; int tmp___28 ; int tmp___29 ; int tmp___30 ; { cpu_flags = 0UL; start_dpc = 0; ha = vha->hw; if (*((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(16777216U, vha, 24576, "EEH = %d, restarting timer.\n", (int )ha->flags.eeh_busy); qla2x00_restart_timer(vha, 1UL); return; } else { } tmp = pci_channel_offline(ha->pdev); if (tmp == 0) { pci_read_config_word((struct pci_dev const *)ha->pdev, 0, & w); } else { } if ((unsigned int )vha->vp_idx == 0U && ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U)) { tmp___0 = constant_test_bit(20L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { start_dpc = start_dpc + 1; } else { } if ((ha->device_type & 16384U) != 0U) { qla82xx_watchdog(vha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_watchdog(vha); } else { } } else { } if ((unsigned int )vha->vp_idx == 0U && (ha->device_type & 131072U) != 0U) { qlafx00_timer_routine(vha); } else { } tmp___5 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___5 > 0) { tmp___6 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___6 == 0) { tmp___7 = constant_test_bit(18L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___7 == 0) { if (*((unsigned long *)vha + 19UL) != 0UL) { tmp___2 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___2 == (int )vha->loop_down_abort_time) { ql_log(2U, vha, 24584, "Loop down - aborting the queues before time expires.\n"); if ((ha->device_type & 1U) == 0U && (unsigned int )vha->link_down_timeout != 0U) { atomic_set(& vha->loop_state, 6); } else { } if ((unsigned int )vha->vp_idx == 0U) { tmp___1 = spinlock_check(& ha->hardware_lock); cpu_flags = _raw_spin_lock_irqsave(tmp___1); req = *(ha->req_q_map); index = 1; goto ldv_62448; ldv_62447: sp = *(req->outstanding_cmds + (unsigned long )index); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto ldv_62445; } else { } if ((unsigned int )sp->type != 8U) { goto ldv_62445; } else { } sfcp = sp->fcport; if ((sfcp->flags & 4U) == 0U) { goto ldv_62445; } else { } if ((ha->device_type & 16384U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } goto ldv_62446; ldv_62445: index = index + 1; ldv_62448: ; if ((int )req->num_outstanding_cmds > index) { goto ldv_62447; } else { } ldv_62446: spin_unlock_irqrestore(& ha->hardware_lock, cpu_flags); } else { } start_dpc = start_dpc + 1; } else { } tmp___3 = atomic_dec_and_test(& vha->loop_down_timer); if (tmp___3 != 0) { if ((vha->device_flags & 2U) == 0U) { ql_log(1U, vha, 24585, "Loop down - aborting ISP.\n"); if ((ha->device_type & 16384U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } } else { } } else { } tmp___4 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); ql_dbg(16777216U, vha, 24586, "Loop down - seconds remaining %d.\n", tmp___4); } else { } } else { } } else { } } else { } if ((unsigned int )vha->vp_idx == 0U && (unsigned int )ha->beacon_blink_led == 1U) { if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { set_bit(11L, (unsigned long volatile *)(& vha->dpc_flags)); start_dpc = start_dpc + 1; } else { } } else { } tmp___8 = list_empty((struct list_head const *)(& vha->work_list)); if (tmp___8 == 0) { start_dpc = start_dpc + 1; } else { } tmp___20 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___20 != 0) { goto _L; } else { tmp___21 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___21 != 0) { goto _L; } else { tmp___22 = constant_test_bit(13L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___22 != 0) { goto _L; } else if (start_dpc != 0) { goto _L; } else { tmp___23 = constant_test_bit(0L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___23 != 0) { goto _L; } else { tmp___24 = constant_test_bit(11L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___24 != 0) { goto _L; } else { tmp___25 = constant_test_bit(17L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___25 != 0) { goto _L; } else { tmp___26 = constant_test_bit(18L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___26 != 0) { goto _L; } else { tmp___27 = constant_test_bit(14L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___27 != 0) { goto _L; } else { tmp___28 = constant_test_bit(8L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___28 != 0) { goto _L; } else { tmp___29 = constant_test_bit(22L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___29 != 0) { goto _L; } else { tmp___30 = constant_test_bit(23L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___30 != 0) { _L: /* CIL Label */ tmp___9 = constant_test_bit(0L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___10 = constant_test_bit(13L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___11 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___12 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); ql_dbg(16777216U, vha, 24587, "isp_abort_needed=%d loop_resync_needed=%d fcport_update_needed=%d start_dpc=%d reset_marker_needed=%d", tmp___12, tmp___11, tmp___10, start_dpc, tmp___9); tmp___13 = constant_test_bit(22L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___14 = constant_test_bit(23L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___15 = constant_test_bit(8L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___16 = constant_test_bit(14L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___17 = constant_test_bit(18L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___18 = constant_test_bit(17L, (unsigned long const volatile *)(& vha->dpc_flags)); tmp___19 = constant_test_bit(11L, (unsigned long const volatile *)(& vha->dpc_flags)); ql_dbg(16777216U, vha, 24588, "beacon_blink_needed=%d isp_unrecoverable=%d fcoe_ctx_reset_needed=%d vp_dpc_needed=%d relogin_needed=%d, host_ramp_down_needed=%d host_ramp_up_needed=%d.\n", tmp___19, tmp___18, tmp___17, tmp___16, tmp___15, tmp___14, tmp___13); qla2xxx_wake_dpc(vha); } else { } } } } } } } } } } } qla2x00_restart_timer(vha, 1UL); return; } } static struct mutex qla_fw_lock = {{1}, {{{{{0U}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "qla_fw_lock.wait_lock", 0, 0UL}}}}, {& qla_fw_lock.wait_list, & qla_fw_lock.wait_list}, 0, 0, (void *)(& qla_fw_lock), {0, {0, 0}, "qla_fw_lock", 0, 0UL}}; static struct fw_blob qla_fw_blobs[10U] = { {(char *)"ql2100_fw.bin", {4096U, 0U}, 0}, {(char *)"ql2200_fw.bin", {4096U, 0U}, 0}, {(char *)"ql2300_fw.bin", {2048U, 0U}, 0}, {(char *)"ql2322_fw.bin", {2048U, 114688U, 122880U, 0U}, 0}, {(char *)"ql2400_fw.bin", {0U, 0U, 0U, 0U}, 0}, {(char *)"ql2500_fw.bin", {0U, 0U, 0U, 0U}, 0}, {(char *)"ql8100_fw.bin", {0U, 0U, 0U, 0U}, 0}, {(char *)"ql8200_fw.bin", {0U, 0U, 0U, 0U}, 0}, {(char *)"ql2600_fw.bin", {0U, 0U, 0U, 0U}, 0}, {(char *)"ql8300_fw.bin", {0U, 0U, 0U, 0U}, 0}}; struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct fw_blob *blob ; int tmp ; { ha = vha->hw; if ((int )ha->device_type & 1) { blob = (struct fw_blob *)(& qla_fw_blobs); } else if ((ha->device_type & 2U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 1UL; } else if (((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 32U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 2UL; } else if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 3UL; } else if ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 4UL; } else if ((ha->device_type & 2048U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 5UL; } else if ((ha->device_type & 8192U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 6UL; } else if ((ha->device_type & 16384U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 7UL; } else if ((ha->device_type & 32768U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 8UL; } else if ((ha->device_type & 65536U) != 0U) { blob = (struct fw_blob *)(& qla_fw_blobs) + 9UL; } else { return ((struct fw_blob *)0); } mutex_lock_nested(& qla_fw_lock, 0U); if ((unsigned long )blob->fw != (unsigned long )((struct firmware const *)0)) { goto out; } else { } tmp = request_firmware(& blob->fw, (char const *)blob->name, & (ha->pdev)->dev); if (tmp != 0) { ql_log(1U, vha, 99, "Failed to load firmware image (%s).\n", blob->name); blob->fw = (struct firmware const *)0; blob = (struct fw_blob *)0; goto out; } else { } out: mutex_unlock(& qla_fw_lock); return (blob); } } static void qla2x00_release_firmware(void) { int idx ; { mutex_lock_nested(& qla_fw_lock, 0U); idx = 0; goto ldv_62463; ldv_62462: release_firmware(qla_fw_blobs[idx].fw); idx = idx + 1; ldv_62463: ; if (idx <= 9) { goto ldv_62462; } else { } mutex_unlock(& qla_fw_lock); return; } } static pci_ers_result_t qla2xxx_pci_error_detected(struct pci_dev *pdev , pci_channel_state_t state ) { scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; { tmp = pci_get_drvdata(pdev); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; ql_dbg(2097152U, vha, 36864, "PCI error detected, state %x.\n", state); switch (state) { case 1U: ha->flags.eeh_busy = 0U; return (2U); case 2U: ha->flags.eeh_busy = 1U; if ((ha->device_type & 16384U) != 0U) { ha->flags.isp82xx_fw_hung = 1U; ql_dbg(2097152U, vha, 36865, "Pci channel io frozen\n"); qla82xx_clear_pending_mbx(vha); } else { } qla2x00_free_irqs(vha); pci_disable_device(pdev); qla2x00_abort_all_cmds(vha, 524288); return (3U); case 3U: ha->flags.pci_channel_io_perm_failure = 1U; qla2x00_abort_all_cmds(vha, 65536); return (4U); } return (3U); } } static pci_ers_result_t qla2xxx_pci_mmio_enabled(struct pci_dev *pdev ) { int risc_paused ; uint32_t stat ; unsigned long flags ; scsi_qla_host_t *base_vha ; void *tmp ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; struct device_reg_24xx *reg24 ; raw_spinlock_t *tmp___0 ; { risc_paused = 0; tmp = pci_get_drvdata(pdev); base_vha = (scsi_qla_host_t *)tmp; ha = base_vha->hw; reg = & (ha->iobase)->isp; reg24 = & (ha->iobase)->isp24; if ((ha->device_type & 16384U) != 0U) { return (5U); } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { stat = readl((void const volatile *)(& reg->hccr)); if ((stat & 32U) != 0U) { risc_paused = 1; } else { } } else if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { stat = readl((void const volatile *)(& reg->u.isp2300.host_status)); if ((stat & 256U) != 0U) { risc_paused = 1; } else { } } else if ((ha->device_type & 134217728U) != 0U) { stat = readl((void const volatile *)(& reg24->host_status)); if ((stat & 256U) != 0U) { risc_paused = 1; } else { } } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); if (risc_paused != 0) { ql_log(2U, base_vha, 36867, "RISC paused -- mmio_enabled, Dumping firmware.\n"); (*((ha->isp_ops)->fw_dump))(base_vha, 0); return (3U); } else { return (5U); } } } static uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha ) { uint32_t rval ; uint32_t drv_active ; struct qla_hw_data *ha ; int fn ; struct pci_dev *other_pdev ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; { rval = 258U; drv_active = 0U; ha = base_vha->hw; other_pdev = (struct pci_dev *)0; ql_dbg(2097152U, base_vha, 36870, "Entered %s.\n", "qla82xx_error_recovery"); set_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); if (*((unsigned long *)base_vha + 19UL) != 0UL) { qla2x00_abort_isp_cleanup(base_vha); } else { } fn = (int )(ha->pdev)->devfn & 7; goto ldv_62496; ldv_62498: fn = fn - 1; ql_dbg(2097152U, base_vha, 36871, "Finding pci device at function = 0x%x.\n", fn); tmp = pci_domain_nr((ha->pdev)->bus); other_pdev = pci_get_domain_bus_and_slot(tmp, (unsigned int )((ha->pdev)->bus)->number, ((ha->pdev)->devfn & 248U) | ((unsigned int )fn & 7U)); if ((unsigned long )other_pdev == (unsigned long )((struct pci_dev *)0)) { goto ldv_62496; } else { } tmp___0 = atomic_read((atomic_t const *)(& other_pdev->enable_cnt)); if (tmp___0 != 0) { ql_dbg(2097152U, base_vha, 36872, "Found PCI func available and enable at 0x%x.\n", fn); pci_dev_put(other_pdev); goto ldv_62497; } else { } pci_dev_put(other_pdev); ldv_62496: ; if (fn > 0) { goto ldv_62498; } else { } ldv_62497: ; if (fn == 0) { ql_dbg(2097152U, base_vha, 36873, "This devfn is reset owner = 0x%x.\n", (ha->pdev)->devfn); qla82xx_idc_lock(ha); qla82xx_wr_32(ha, 136323392UL, 2U); qla82xx_wr_32(ha, 136323444UL, 1U); tmp___1 = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp___1; ql_dbg(2097152U, base_vha, 36874, "drv_active = 0x%x.\n", drv_active); qla82xx_idc_unlock(ha); if (drv_active != 0U) { tmp___2 = qla82xx_start_firmware(base_vha); rval = (uint32_t )tmp___2; } else { rval = 0U; } qla82xx_idc_lock(ha); if (rval != 0U) { ql_log(2U, base_vha, 36875, "HW State: FAILED.\n"); qla82xx_clear_drv_active(ha); qla82xx_wr_32(ha, 136323392UL, 6U); } else { ql_log(2U, base_vha, 36876, "HW State: READY.\n"); qla82xx_wr_32(ha, 136323392UL, 3U); qla82xx_idc_unlock(ha); ha->flags.isp82xx_fw_hung = 0U; tmp___3 = qla82xx_restart_isp(base_vha); rval = (uint32_t )tmp___3; qla82xx_idc_lock(ha); qla82xx_wr_32(ha, 136323396UL, 0U); qla82xx_set_drv_active(base_vha); } qla82xx_idc_unlock(ha); } else { ql_dbg(2097152U, base_vha, 36877, "This devfn is not reset owner = 0x%x.\n", (ha->pdev)->devfn); tmp___5 = qla82xx_rd_32(ha, 136323392UL); if (tmp___5 == 3) { ha->flags.isp82xx_fw_hung = 0U; tmp___4 = qla82xx_restart_isp(base_vha); rval = (uint32_t )tmp___4; qla82xx_idc_lock(ha); qla82xx_set_drv_active(base_vha); qla82xx_idc_unlock(ha); } else { } } clear_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); return (rval); } } static pci_ers_result_t qla2xxx_pci_slot_reset(struct pci_dev *pdev ) { pci_ers_result_t ret ; scsi_qla_host_t *base_vha ; void *tmp ; struct qla_hw_data *ha ; struct rsp_que *rsp ; int rc ; int retries ; int tmp___0 ; int tmp___1 ; uint32_t tmp___2 ; int tmp___3 ; int tmp___4 ; { ret = 4U; tmp = pci_get_drvdata(pdev); base_vha = (scsi_qla_host_t *)tmp; ha = base_vha->hw; retries = 10; ql_dbg(2097152U, base_vha, 36868, "Slot Reset.\n"); pdev->error_state = 1U; pci_restore_state(pdev); pci_save_state(pdev); if (ha->mem_only != 0) { rc = pci_enable_device_mem(pdev); } else { rc = pci_enable_device(pdev); } if (rc != 0) { ql_log(1U, base_vha, 36869, "Can\'t re-enable PCI device after reset.\n"); goto exit_slot_reset; } else { } rsp = *(ha->rsp_q_map); tmp___0 = qla2x00_request_irqs(ha, rsp); if (tmp___0 != 0) { goto exit_slot_reset; } else { } tmp___1 = (*((ha->isp_ops)->pci_config))(base_vha); if (tmp___1 != 0) { goto exit_slot_reset; } else { } if ((ha->device_type & 16384U) != 0U) { tmp___2 = qla82xx_error_recovery(base_vha); if (tmp___2 == 0U) { ret = 5U; goto exit_slot_reset; } else { goto exit_slot_reset; } } else { } goto ldv_62510; ldv_62509: msleep(1000U); ldv_62510: ; if (*((unsigned long *)ha + 2UL) != 0UL) { tmp___3 = retries; retries = retries - 1; if (tmp___3 != 0) { goto ldv_62509; } else { goto ldv_62511; } } else { } ldv_62511: set_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); tmp___4 = (*((ha->isp_ops)->abort_isp))(base_vha); if (tmp___4 == 0) { ret = 5U; } else { } clear_bit(3L, (unsigned long volatile *)(& base_vha->dpc_flags)); exit_slot_reset: ql_dbg(2097152U, base_vha, 36878, "slot_reset return %x.\n", ret); return (ret); } } static void qla2xxx_pci_resume(struct pci_dev *pdev ) { scsi_qla_host_t *base_vha ; void *tmp ; struct qla_hw_data *ha ; int ret ; { tmp = pci_get_drvdata(pdev); base_vha = (scsi_qla_host_t *)tmp; ha = base_vha->hw; ql_dbg(2097152U, base_vha, 36879, "pci_resume.\n"); ret = qla2x00_wait_for_hba_online(base_vha); if (ret != 0) { ql_log(0U, base_vha, 36866, "The device failed to resume I/O from slot/link_reset.\n"); } else { } pci_cleanup_aer_uncorrect_error_status(pdev); ha->flags.eeh_busy = 0U; return; } } static struct pci_error_handlers const qla2xxx_err_handler = {(pci_ers_result_t (*)(struct pci_dev * , enum pci_channel_state ))(& qla2xxx_pci_error_detected), & qla2xxx_pci_mmio_enabled, 0, & qla2xxx_pci_slot_reset, & qla2xxx_pci_resume}; static struct pci_device_id qla2xxx_pci_tbl[20U] = { {4215U, 8448U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 8704U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 8960U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 8978U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 8994U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 25362U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 25378U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 9250U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 9266U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 33842U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 21538U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 21554U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 9522U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 8241U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 32769U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 32801U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 32817U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 61441U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 32836U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {0U, 0U, 0U, 0U, 0U, 0U, 0UL}}; struct pci_device_id const __mod_pci_device_table ; static struct pci_driver qla2xxx_pci_driver = {{0, 0}, "qla2xxx", (struct pci_device_id const *)(& qla2xxx_pci_tbl), & qla2x00_probe_one, & qla2x00_remove_one, 0, 0, 0, 0, & qla2x00_shutdown, 0, & qla2xxx_err_handler, {0, 0, & __this_module, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{{{{{0U}}, 0U, 0U, 0, {0, {0, 0}, 0, 0, 0UL}}}}, {0, 0}}}; static struct file_operations const apidev_fops = {& __this_module, & noop_llseek, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int qla2x00_module_init(void) { int ret ; { ret = 0; srb_cachep = kmem_cache_create("qla2xxx_srbs", 376UL, 0UL, 8192UL, (void (*)(void * ))0); if ((unsigned long )srb_cachep == (unsigned long )((struct kmem_cache *)0)) { ql_log(0U, (scsi_qla_host_t *)0, 1, "Unable to allocate SRB cache...Failing load!.\n"); return (-12); } else { } ret = qlt_init(); if (ret < 0) { kmem_cache_destroy(srb_cachep); return (ret); } else if (ret > 0) { qla2xxx_transport_functions.disable_target_scan = 1U; qla2xxx_transport_vport_functions.disable_target_scan = 1U; } else { } strcpy((char *)(& qla2x00_version_str), "8.06.00.08-k"); if (ql2xextended_error_logging != 0) { strcat((char *)(& qla2x00_version_str), "-debug"); } else { } qla2xxx_transport_template = fc_attach_transport(& qla2xxx_transport_functions); if ((unsigned long )qla2xxx_transport_template == (unsigned long )((struct scsi_transport_template *)0)) { kmem_cache_destroy(srb_cachep); ql_log(0U, (scsi_qla_host_t *)0, 2, "fc_attach_transport failed...Failing load!.\n"); qlt_exit(); return (-19); } else { } apidev_major = ldv_register_chrdev_7(0U, "ql2xapidev", & apidev_fops); if (apidev_major < 0) { ql_log(0U, (scsi_qla_host_t *)0, 3, "Unable to register char device %s.\n", (char *)"ql2xapidev"); } else { } qla2xxx_transport_vport_template = fc_attach_transport(& qla2xxx_transport_vport_functions); if ((unsigned long )qla2xxx_transport_vport_template == (unsigned long )((struct scsi_transport_template *)0)) { kmem_cache_destroy(srb_cachep); qlt_exit(); fc_release_transport(qla2xxx_transport_template); ql_log(0U, (scsi_qla_host_t *)0, 4, "fc_attach_transport vport failed...Failing load!.\n"); return (-19); } else { } ql_log(2U, (scsi_qla_host_t *)0, 5, "QLogic Fibre Channel HBA Driver: %s.\n", (char *)(& qla2x00_version_str)); ret = __pci_register_driver(& qla2xxx_pci_driver, & __this_module, "qla2xxx"); if (ret != 0) { kmem_cache_destroy(srb_cachep); qlt_exit(); fc_release_transport(qla2xxx_transport_template); fc_release_transport(qla2xxx_transport_vport_template); ql_log(0U, (scsi_qla_host_t *)0, 6, "pci_register_driver failed...ret=%d Failing load!.\n", ret); } else { } return (ret); } } static void qla2x00_module_exit(void) { { ldv_unregister_chrdev_8((unsigned int )apidev_major, "ql2xapidev"); pci_unregister_driver(& qla2xxx_pci_driver); qla2x00_release_firmware(); kmem_cache_destroy(srb_cachep); qlt_exit(); if ((unsigned long )ctx_cachep != (unsigned long )((struct kmem_cache *)0)) { kmem_cache_destroy(ctx_cachep); } else { } fc_release_transport(qla2xxx_transport_template); fc_release_transport(qla2xxx_transport_vport_template); return; } } extern int ldv_probe_71(void) ; extern int ldv_release_62(void) ; int ldv_retval_2 ; extern int ldv_probe_68(void) ; extern int ldv_probe_66(void) ; extern int ldv_probe_62(void) ; extern int ldv_open_60(void) ; int ldv_retval_1 ; extern void ldv_initialize(void) ; extern int ldv_probe_69(void) ; extern int ldv_release_69(void) ; extern int ldv_release_64(void) ; extern int ldv_release_68(void) ; extern int ldv_release_65(void) ; extern int ldv_probe_67(void) ; extern int ldv_release_67(void) ; extern int ldv_release_66(void) ; extern int ldv_release_70(void) ; extern int ldv_probe_65(void) ; extern int ldv_probe_63(void) ; extern int ldv_release_71(void) ; extern int ldv_suspend_62(void) ; void ldv_check_final_state(void) ; extern int ldv_release_63(void) ; int ldv_retval_3 ; extern int ldv_probe_70(void) ; extern int ldv_probe_64(void) ; extern int ldv_release_60(void) ; void activate_pending_timer_2(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_2 == (unsigned long )timer) { if (ldv_timer_state_2 == 2 || pending_flag != 0) { ldv_timer_list_2 = timer; ldv_timer_list_2->data = data; ldv_timer_state_2 = 1; } else { } return; } else { } reg_timer_2(timer); ldv_timer_list_2->data = data; return; } } void choose_timer_1(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_1 = 2; return; } } void choose_timer_2(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_2 = 2; return; } } int reg_timer_2(struct timer_list *timer ) { { ldv_timer_list_2 = timer; ldv_timer_state_2 = 1; return (0); } } void ldv_initialize_isp_operations_65(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(992UL); qla8044_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_zalloc(128UL); qla8044_isp_ops_group1 = (struct fc_port *)tmp___0; tmp___1 = ldv_zalloc(12480UL); qla8044_isp_ops_group2 = (struct qla_hw_data *)tmp___1; return; } } void ldv_initialize_isp_operations_68(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(992UL); qla25xx_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_zalloc(128UL); qla25xx_isp_ops_group1 = (struct fc_port *)tmp___0; tmp___1 = ldv_zalloc(12480UL); qla25xx_isp_ops_group2 = (struct qla_hw_data *)tmp___1; return; } } void ldv_initialize_isp_operations_70(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(992UL); qla2300_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_zalloc(128UL); qla2300_isp_ops_group1 = (struct fc_port *)tmp___0; tmp___1 = ldv_zalloc(12480UL); qla2300_isp_ops_group2 = (struct qla_hw_data *)tmp___1; return; } } void ldv_initialize_pci_error_handlers_62(void) { void *tmp ; { tmp = ldv_zalloc(2928UL); qla2xxx_err_handler_group0 = (struct pci_dev *)tmp; return; } } void ldv_initialize_scsi_host_template_72(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(232UL); qla2xxx_driver_template_group0 = (struct scsi_cmnd *)tmp; tmp___0 = ldv_zalloc(3496UL); qla2xxx_driver_template_group1 = (struct Scsi_Host *)tmp___0; tmp___1 = ldv_zalloc(3376UL); qla2xxx_driver_template_group2 = (struct scsi_device *)tmp___1; return; } } void ldv_initialize_isp_operations_66(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(992UL); qla82xx_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_zalloc(128UL); qla82xx_isp_ops_group1 = (struct fc_port *)tmp___0; tmp___1 = ldv_zalloc(12480UL); qla82xx_isp_ops_group2 = (struct qla_hw_data *)tmp___1; return; } } void ldv_initialize_isp_operations_63(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(992UL); qlafx00_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_zalloc(128UL); qlafx00_isp_ops_group1 = (struct fc_port *)tmp___0; tmp___1 = ldv_zalloc(12480UL); qlafx00_isp_ops_group2 = (struct qla_hw_data *)tmp___1; return; } } void ldv_initialize_isp_operations_64(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(992UL); qla83xx_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_zalloc(128UL); qla83xx_isp_ops_group1 = (struct fc_port *)tmp___0; tmp___1 = ldv_zalloc(12480UL); qla83xx_isp_ops_group2 = (struct qla_hw_data *)tmp___1; return; } } void ldv_initialize_isp_operations_71(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(992UL); qla2100_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_zalloc(128UL); qla2100_isp_ops_group1 = (struct fc_port *)tmp___0; tmp___1 = ldv_zalloc(12480UL); qla2100_isp_ops_group2 = (struct qla_hw_data *)tmp___1; return; } } void ldv_file_operations_60(void) { void *tmp ; { apidev_fops_group1 = ldv_zalloc(1032UL); tmp = ldv_zalloc(360UL); apidev_fops_group2 = (struct file *)tmp; return; } } int reg_timer_1(struct timer_list *timer ) { { ldv_timer_list_1 = timer; ldv_timer_state_1 = 1; return (0); } } void disable_suitable_timer_2(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_2) { ldv_timer_state_2 = 0; return; } else { } return; } } void ldv_initialize_isp_operations_67(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(992UL); qla81xx_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_zalloc(128UL); qla81xx_isp_ops_group1 = (struct fc_port *)tmp___0; tmp___1 = ldv_zalloc(12480UL); qla81xx_isp_ops_group2 = (struct qla_hw_data *)tmp___1; return; } } void activate_pending_timer_1(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_1 == (unsigned long )timer) { if (ldv_timer_state_1 == 2 || pending_flag != 0) { ldv_timer_list_1 = timer; ldv_timer_list_1->data = data; ldv_timer_state_1 = 1; } else { } return; } else { } reg_timer_1(timer); ldv_timer_list_1->data = data; return; } } void disable_suitable_timer_1(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_1) { ldv_timer_state_1 = 0; return; } else { } return; } } void ldv_initialize_pci_driver_61(void) { void *tmp ; { tmp = ldv_zalloc(2928UL); qla2xxx_pci_driver_group0 = (struct pci_dev *)tmp; return; } } void ldv_initialize_isp_operations_69(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(992UL); qla24xx_isp_ops_group0 = (struct scsi_qla_host *)tmp; tmp___0 = ldv_zalloc(128UL); qla24xx_isp_ops_group1 = (struct fc_port *)tmp___0; tmp___1 = ldv_zalloc(12480UL); qla24xx_isp_ops_group2 = (struct qla_hw_data *)tmp___1; return; } } void ldv_main_exported_18(void) ; void ldv_main_exported_33(void) ; void ldv_main_exported_32(void) ; void ldv_main_exported_21(void) ; void ldv_main_exported_26(void) ; void ldv_main_exported_30(void) ; void ldv_main_exported_44(void) ; void ldv_main_exported_55(void) ; void ldv_main_exported_25(void) ; void ldv_main_exported_27(void) ; void ldv_main_exported_28(void) ; void ldv_main_exported_57(void) ; void ldv_main_exported_40(void) ; void ldv_main_exported_20(void) ; void ldv_main_exported_59(void) ; void ldv_main_exported_49(void) ; void ldv_main_exported_24(void) ; void ldv_main_exported_31(void) ; void ldv_main_exported_35(void) ; void ldv_main_exported_53(void) ; void ldv_main_exported_48(void) ; void ldv_main_exported_22(void) ; void ldv_main_exported_42(void) ; void ldv_main_exported_46(void) ; void ldv_main_exported_23(void) ; void ldv_main_exported_29(void) ; void ldv_main_exported_50(void) ; void ldv_main_exported_39(void) ; void ldv_main_exported_36(void) ; void ldv_main_exported_51(void) ; void ldv_main_exported_58(void) ; void ldv_main_exported_41(void) ; void ldv_main_exported_47(void) ; void ldv_main_exported_38(void) ; void ldv_main_exported_52(void) ; void ldv_main_exported_34(void) ; void ldv_main_exported_56(void) ; void ldv_main_exported_37(void) ; void ldv_main_exported_45(void) ; void ldv_main_exported_19(void) ; void ldv_main_exported_43(void) ; void ldv_main_exported_54(void) ; int main(void) { char *ldvarg18 ; void *tmp ; uint32_t ldvarg11 ; uint8_t *ldvarg32 ; void *tmp___0 ; uint32_t ldvarg31 ; char *ldvarg20 ; void *tmp___1 ; uint32_t ldvarg7 ; uint32_t ldvarg23 ; uint32_t ldvarg12 ; uint32_t ldvarg8 ; uint8_t *ldvarg13 ; void *tmp___2 ; void *ldvarg29 ; void *tmp___3 ; uint8_t *ldvarg10 ; void *tmp___4 ; uint32_t ldvarg9 ; uint32_t ldvarg24 ; unsigned int ldvarg27 ; unsigned int tmp___5 ; int ldvarg26 ; int tmp___6 ; uint32_t ldvarg30 ; uint32_t ldvarg6 ; uint32_t ldvarg15 ; uint32_t ldvarg16 ; int ldvarg21 ; int tmp___7 ; srb_t *ldvarg25 ; void *tmp___8 ; srb_t *ldvarg14 ; void *tmp___9 ; uint8_t *ldvarg17 ; void *tmp___10 ; unsigned int ldvarg22 ; unsigned int tmp___11 ; void *ldvarg19 ; void *tmp___12 ; int ldvarg28 ; int tmp___13 ; uint32_t ldvarg77 ; int ldvarg51 ; int tmp___14 ; uint8_t ldvarg67 ; uint8_t *ldvarg43 ; void *tmp___15 ; uint32_t ldvarg42 ; srb_t *ldvarg71 ; void *tmp___16 ; int ldvarg72 ; int tmp___17 ; uint32_t ldvarg56 ; uint32_t *ldvarg50 ; void *tmp___18 ; uint8_t ldvarg46 ; uint8_t *ldvarg58 ; void *tmp___19 ; uint32_t ldvarg37 ; srb_t *ldvarg53 ; void *tmp___20 ; unsigned int ldvarg64 ; unsigned int tmp___21 ; uint16_t *ldvarg44 ; void *tmp___22 ; uint32_t ldvarg38 ; uint16_t ldvarg69 ; uint16_t ldvarg48 ; uint8_t ldvarg47 ; uint32_t ldvarg39 ; uint32_t ldvarg41 ; uint16_t ldvarg60 ; char *ldvarg59 ; void *tmp___23 ; uint8_t ldvarg49 ; char *ldvarg62 ; void *tmp___24 ; uint32_t ldvarg57 ; uint32_t ldvarg65 ; uint32_t ldvarg36 ; cmd_entry_t *ldvarg55 ; void *tmp___25 ; uint32_t ldvarg66 ; uint8_t *ldvarg40 ; void *tmp___26 ; uint8_t ldvarg45 ; int ldvarg63 ; int tmp___27 ; unsigned int ldvarg73 ; unsigned int tmp___28 ; uint8_t ldvarg70 ; uint8_t *ldvarg78 ; void *tmp___29 ; uint8_t ldvarg68 ; uint16_t ldvarg54 ; void *ldvarg61 ; void *tmp___30 ; uint32_t ldvarg76 ; int ldvarg74 ; int tmp___31 ; srb_t *ldvarg52 ; void *tmp___32 ; void *ldvarg75 ; void *tmp___33 ; int ldvarg88 ; int tmp___34 ; int ldvarg89 ; int tmp___35 ; unsigned long ldvarg90 ; unsigned long tmp___36 ; int ldvarg87 ; int tmp___37 ; struct pci_device_id *ldvarg109 ; void *tmp___38 ; int ldvarg131 ; int tmp___39 ; uint32_t ldvarg134 ; uint8_t ldvarg146 ; uint32_t ldvarg141 ; char *ldvarg136 ; void *tmp___40 ; uint8_t ldvarg125 ; uint16_t ldvarg145 ; unsigned int ldvarg140 ; unsigned int tmp___41 ; uint32_t ldvarg152 ; srb_t *ldvarg132 ; void *tmp___42 ; char *ldvarg138 ; void *tmp___43 ; uint8_t ldvarg129 ; unsigned int ldvarg149 ; unsigned int tmp___44 ; uint32_t ldvarg142 ; void *ldvarg151 ; void *tmp___45 ; uint8_t *ldvarg154 ; void *tmp___46 ; void *ldvarg137 ; void *tmp___47 ; uint32_t ldvarg122 ; uint8_t ldvarg144 ; uint16_t ldvarg128 ; uint8_t ldvarg126 ; uint32_t ldvarg123 ; uint32_t ldvarg153 ; uint8_t *ldvarg135 ; void *tmp___48 ; srb_t *ldvarg147 ; void *tmp___49 ; uint8_t ldvarg127 ; uint16_t *ldvarg124 ; void *tmp___50 ; uint32_t ldvarg133 ; int ldvarg150 ; int tmp___51 ; uint8_t ldvarg143 ; uint32_t *ldvarg130 ; void *tmp___52 ; int ldvarg139 ; int tmp___53 ; int ldvarg148 ; int tmp___54 ; char *ldvarg178 ; void *tmp___55 ; int ldvarg181 ; int tmp___56 ; srb_t *ldvarg174 ; void *tmp___57 ; uint32_t ldvarg165 ; uint8_t ldvarg168 ; uint8_t ldvarg188 ; uint8_t ldvarg185 ; unsigned int ldvarg191 ; unsigned int tmp___58 ; uint8_t ldvarg167 ; uint8_t *ldvarg196 ; void *tmp___59 ; uint16_t ldvarg170 ; srb_t *ldvarg189 ; void *tmp___60 ; unsigned int ldvarg182 ; unsigned int tmp___61 ; uint8_t ldvarg169 ; uint32_t ldvarg175 ; uint32_t ldvarg195 ; uint16_t ldvarg187 ; uint32_t *ldvarg172 ; void *tmp___62 ; uint32_t ldvarg184 ; uint8_t ldvarg186 ; uint8_t *ldvarg177 ; void *tmp___63 ; int ldvarg190 ; int tmp___64 ; void *ldvarg193 ; void *tmp___65 ; uint16_t *ldvarg166 ; void *tmp___66 ; int ldvarg192 ; int tmp___67 ; uint32_t ldvarg164 ; uint32_t ldvarg183 ; uint32_t ldvarg176 ; int ldvarg173 ; int tmp___68 ; uint32_t ldvarg194 ; void *ldvarg179 ; void *tmp___69 ; char *ldvarg180 ; void *tmp___70 ; uint8_t ldvarg171 ; loff_t ldvarg213 ; loff_t tmp___71 ; int ldvarg212 ; int tmp___72 ; char *ldvarg242 ; void *tmp___73 ; uint32_t ldvarg237 ; uint32_t ldvarg225 ; uint8_t ldvarg247 ; uint8_t ldvarg233 ; srb_t *ldvarg236 ; void *tmp___74 ; int ldvarg243 ; int tmp___75 ; uint32_t *ldvarg234 ; void *tmp___76 ; unsigned int ldvarg244 ; unsigned int tmp___77 ; uint16_t *ldvarg228 ; void *tmp___78 ; uint32_t ldvarg257 ; uint32_t ldvarg226 ; uint32_t ldvarg221 ; uint8_t *ldvarg258 ; void *tmp___79 ; uint32_t ldvarg222 ; void *ldvarg255 ; void *tmp___80 ; uint16_t ldvarg232 ; uint8_t ldvarg230 ; char *ldvarg240 ; void *tmp___81 ; void *ldvarg241 ; void *tmp___82 ; uint32_t ldvarg246 ; uint8_t ldvarg231 ; uint32_t ldvarg238 ; int ldvarg254 ; int tmp___83 ; uint8_t *ldvarg224 ; void *tmp___84 ; uint8_t ldvarg248 ; uint32_t ldvarg223 ; int ldvarg235 ; int tmp___85 ; uint32_t ldvarg245 ; uint32_t ldvarg256 ; int ldvarg252 ; int tmp___86 ; uint8_t ldvarg229 ; uint8_t *ldvarg227 ; void *tmp___87 ; srb_t *ldvarg251 ; void *tmp___88 ; uint32_t ldvarg220 ; uint8_t ldvarg250 ; uint16_t ldvarg249 ; uint8_t *ldvarg239 ; void *tmp___89 ; unsigned int ldvarg253 ; unsigned int tmp___90 ; enum pci_channel_state ldvarg263 ; uint32_t ldvarg290 ; void *ldvarg299 ; void *tmp___91 ; uint32_t *ldvarg278 ; void *tmp___92 ; void *ldvarg285 ; void *tmp___93 ; uint32_t ldvarg300 ; srb_t *ldvarg295 ; void *tmp___94 ; int ldvarg296 ; int tmp___95 ; uint32_t ldvarg281 ; uint32_t ldvarg271 ; uint8_t *ldvarg302 ; void *tmp___96 ; char *ldvarg284 ; void *tmp___97 ; srb_t *ldvarg280 ; void *tmp___98 ; uint8_t ldvarg274 ; uint16_t *ldvarg272 ; void *tmp___99 ; uint16_t ldvarg293 ; uint8_t ldvarg275 ; uint8_t ldvarg277 ; unsigned int ldvarg288 ; unsigned int tmp___100 ; uint8_t ldvarg291 ; uint32_t ldvarg289 ; uint32_t ldvarg282 ; uint32_t ldvarg301 ; uint32_t ldvarg270 ; char *ldvarg286 ; void *tmp___101 ; int ldvarg287 ; int tmp___102 ; uint16_t ldvarg276 ; unsigned int ldvarg297 ; unsigned int tmp___103 ; uint8_t *ldvarg283 ; void *tmp___104 ; int ldvarg279 ; int tmp___105 ; uint8_t ldvarg273 ; uint8_t ldvarg292 ; int ldvarg298 ; int tmp___106 ; uint8_t ldvarg294 ; uint16_t ldvarg315 ; uint32_t ldvarg308 ; uint32_t ldvarg305 ; int ldvarg341 ; int tmp___107 ; uint8_t ldvarg313 ; uint8_t *ldvarg307 ; void *tmp___108 ; uint16_t ldvarg321 ; uint16_t ldvarg336 ; uint16_t *ldvarg311 ; void *tmp___109 ; uint32_t ldvarg309 ; cmd_entry_t *ldvarg322 ; void *tmp___110 ; uint8_t *ldvarg310 ; void *tmp___111 ; unsigned int ldvarg340 ; unsigned int tmp___112 ; uint32_t ldvarg333 ; srb_t *ldvarg338 ; void *tmp___113 ; uint8_t *ldvarg325 ; void *tmp___114 ; uint32_t ldvarg303 ; uint32_t *ldvarg317 ; void *tmp___115 ; void *ldvarg342 ; void *tmp___116 ; uint8_t ldvarg335 ; uint32_t ldvarg304 ; uint32_t ldvarg343 ; uint8_t ldvarg337 ; uint32_t ldvarg344 ; int ldvarg339 ; int tmp___117 ; uint8_t ldvarg312 ; uint8_t *ldvarg345 ; void *tmp___118 ; int ldvarg318 ; int tmp___119 ; char *ldvarg326 ; void *tmp___120 ; unsigned int ldvarg331 ; unsigned int tmp___121 ; uint8_t ldvarg334 ; uint32_t ldvarg324 ; uint32_t ldvarg306 ; uint8_t ldvarg314 ; srb_t *ldvarg319 ; void *tmp___122 ; uint32_t ldvarg323 ; char *ldvarg329 ; void *tmp___123 ; uint16_t ldvarg327 ; uint32_t ldvarg332 ; srb_t *ldvarg320 ; void *tmp___124 ; void *ldvarg328 ; void *tmp___125 ; int ldvarg330 ; int tmp___126 ; uint8_t ldvarg316 ; uint32_t ldvarg372 ; uint8_t ldvarg357 ; void *ldvarg381 ; void *tmp___127 ; uint32_t ldvarg349 ; uint32_t ldvarg352 ; int ldvarg380 ; int tmp___128 ; uint32_t ldvarg363 ; int ldvarg369 ; int tmp___129 ; uint32_t ldvarg383 ; srb_t *ldvarg362 ; void *tmp___130 ; unsigned int ldvarg379 ; unsigned int tmp___131 ; uint32_t ldvarg348 ; uint8_t *ldvarg365 ; void *tmp___132 ; uint8_t *ldvarg350 ; void *tmp___133 ; uint32_t ldvarg346 ; uint32_t *ldvarg360 ; void *tmp___134 ; uint32_t ldvarg364 ; uint8_t ldvarg376 ; srb_t *ldvarg377 ; void *tmp___135 ; uint8_t ldvarg374 ; unsigned int ldvarg370 ; unsigned int tmp___136 ; int ldvarg361 ; int tmp___137 ; uint16_t *ldvarg354 ; void *tmp___138 ; uint16_t ldvarg375 ; uint8_t *ldvarg384 ; void *tmp___139 ; uint16_t ldvarg358 ; uint32_t ldvarg351 ; void *ldvarg367 ; void *tmp___140 ; uint32_t ldvarg347 ; int ldvarg378 ; int tmp___141 ; uint32_t ldvarg382 ; uint8_t ldvarg359 ; char *ldvarg368 ; void *tmp___142 ; uint8_t *ldvarg353 ; void *tmp___143 ; uint8_t ldvarg373 ; uint32_t ldvarg371 ; char *ldvarg366 ; void *tmp___144 ; uint8_t ldvarg355 ; uint8_t ldvarg356 ; uint8_t *ldvarg404 ; void *tmp___145 ; uint8_t *ldvarg435 ; void *tmp___146 ; srb_t *ldvarg428 ; void *tmp___147 ; int ldvarg412 ; int tmp___148 ; uint32_t ldvarg397 ; uint8_t ldvarg408 ; uint32_t ldvarg403 ; uint8_t ldvarg424 ; uint8_t *ldvarg416 ; void *tmp___149 ; uint32_t ldvarg398 ; uint32_t ldvarg423 ; uint32_t ldvarg415 ; void *ldvarg418 ; void *tmp___150 ; uint32_t ldvarg422 ; uint8_t ldvarg407 ; uint16_t *ldvarg405 ; void *tmp___151 ; uint16_t ldvarg409 ; int ldvarg431 ; int tmp___152 ; uint8_t ldvarg406 ; uint32_t ldvarg399 ; void *ldvarg432 ; void *tmp___153 ; char *ldvarg419 ; void *tmp___154 ; int ldvarg420 ; int tmp___155 ; uint8_t *ldvarg401 ; void *tmp___156 ; unsigned int ldvarg421 ; unsigned int tmp___157 ; char *ldvarg417 ; void *tmp___158 ; uint32_t ldvarg433 ; uint8_t ldvarg425 ; uint32_t ldvarg434 ; uint8_t ldvarg410 ; uint32_t ldvarg402 ; uint32_t ldvarg400 ; int ldvarg429 ; int tmp___159 ; unsigned int ldvarg430 ; unsigned int tmp___160 ; uint16_t ldvarg426 ; srb_t *ldvarg413 ; void *tmp___161 ; uint8_t ldvarg427 ; uint32_t *ldvarg411 ; void *tmp___162 ; uint32_t ldvarg414 ; int tmp___163 ; int tmp___164 ; int tmp___165 ; int tmp___166 ; int tmp___167 ; int tmp___168 ; int tmp___169 ; int tmp___170 ; int tmp___171 ; int tmp___172 ; int tmp___173 ; int tmp___174 ; int tmp___175 ; int tmp___176 ; int tmp___177 ; { tmp = ldv_zalloc(1UL); ldvarg18 = (char *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg32 = (uint8_t *)tmp___0; tmp___1 = ldv_zalloc(1UL); ldvarg20 = (char *)tmp___1; tmp___2 = ldv_zalloc(1UL); ldvarg13 = (uint8_t *)tmp___2; tmp___3 = ldv_zalloc(1UL); ldvarg29 = tmp___3; tmp___4 = ldv_zalloc(1UL); ldvarg10 = (uint8_t *)tmp___4; tmp___5 = __VERIFIER_nondet_uint(); ldvarg27 = tmp___5; tmp___6 = __VERIFIER_nondet_int(); ldvarg26 = tmp___6; tmp___7 = __VERIFIER_nondet_int(); ldvarg21 = tmp___7; tmp___8 = ldv_zalloc(376UL); ldvarg25 = (srb_t *)tmp___8; tmp___9 = ldv_zalloc(376UL); ldvarg14 = (srb_t *)tmp___9; tmp___10 = ldv_zalloc(1UL); ldvarg17 = (uint8_t *)tmp___10; tmp___11 = __VERIFIER_nondet_uint(); ldvarg22 = tmp___11; tmp___12 = ldv_zalloc(1UL); ldvarg19 = tmp___12; tmp___13 = __VERIFIER_nondet_int(); ldvarg28 = tmp___13; tmp___14 = __VERIFIER_nondet_int(); ldvarg51 = tmp___14; tmp___15 = ldv_zalloc(1UL); ldvarg43 = (uint8_t *)tmp___15; tmp___16 = ldv_zalloc(376UL); ldvarg71 = (srb_t *)tmp___16; tmp___17 = __VERIFIER_nondet_int(); ldvarg72 = tmp___17; tmp___18 = ldv_zalloc(4UL); ldvarg50 = (uint32_t *)tmp___18; tmp___19 = ldv_zalloc(1UL); ldvarg58 = (uint8_t *)tmp___19; tmp___20 = ldv_zalloc(376UL); ldvarg53 = (srb_t *)tmp___20; tmp___21 = __VERIFIER_nondet_uint(); ldvarg64 = tmp___21; tmp___22 = ldv_zalloc(2UL); ldvarg44 = (uint16_t *)tmp___22; tmp___23 = ldv_zalloc(1UL); ldvarg59 = (char *)tmp___23; tmp___24 = ldv_zalloc(1UL); ldvarg62 = (char *)tmp___24; tmp___25 = ldv_zalloc(64UL); ldvarg55 = (cmd_entry_t *)tmp___25; tmp___26 = ldv_zalloc(1UL); ldvarg40 = (uint8_t *)tmp___26; tmp___27 = __VERIFIER_nondet_int(); ldvarg63 = tmp___27; tmp___28 = __VERIFIER_nondet_uint(); ldvarg73 = tmp___28; tmp___29 = ldv_zalloc(1UL); ldvarg78 = (uint8_t *)tmp___29; tmp___30 = ldv_zalloc(1UL); ldvarg61 = tmp___30; tmp___31 = __VERIFIER_nondet_int(); ldvarg74 = tmp___31; tmp___32 = ldv_zalloc(376UL); ldvarg52 = (srb_t *)tmp___32; tmp___33 = ldv_zalloc(1UL); ldvarg75 = tmp___33; tmp___34 = __VERIFIER_nondet_int(); ldvarg88 = tmp___34; tmp___35 = __VERIFIER_nondet_int(); ldvarg89 = tmp___35; tmp___36 = __VERIFIER_nondet_ulong(); ldvarg90 = tmp___36; tmp___37 = __VERIFIER_nondet_int(); ldvarg87 = tmp___37; tmp___38 = ldv_zalloc(32UL); ldvarg109 = (struct pci_device_id *)tmp___38; tmp___39 = __VERIFIER_nondet_int(); ldvarg131 = tmp___39; tmp___40 = ldv_zalloc(1UL); ldvarg136 = (char *)tmp___40; tmp___41 = __VERIFIER_nondet_uint(); ldvarg140 = tmp___41; tmp___42 = ldv_zalloc(376UL); ldvarg132 = (srb_t *)tmp___42; tmp___43 = ldv_zalloc(1UL); ldvarg138 = (char *)tmp___43; tmp___44 = __VERIFIER_nondet_uint(); ldvarg149 = tmp___44; tmp___45 = ldv_zalloc(1UL); ldvarg151 = tmp___45; tmp___46 = ldv_zalloc(1UL); ldvarg154 = (uint8_t *)tmp___46; tmp___47 = ldv_zalloc(1UL); ldvarg137 = tmp___47; tmp___48 = ldv_zalloc(1UL); ldvarg135 = (uint8_t *)tmp___48; tmp___49 = ldv_zalloc(376UL); ldvarg147 = (srb_t *)tmp___49; tmp___50 = ldv_zalloc(2UL); ldvarg124 = (uint16_t *)tmp___50; tmp___51 = __VERIFIER_nondet_int(); ldvarg150 = tmp___51; tmp___52 = ldv_zalloc(4UL); ldvarg130 = (uint32_t *)tmp___52; tmp___53 = __VERIFIER_nondet_int(); ldvarg139 = tmp___53; tmp___54 = __VERIFIER_nondet_int(); ldvarg148 = tmp___54; tmp___55 = ldv_zalloc(1UL); ldvarg178 = (char *)tmp___55; tmp___56 = __VERIFIER_nondet_int(); ldvarg181 = tmp___56; tmp___57 = ldv_zalloc(376UL); ldvarg174 = (srb_t *)tmp___57; tmp___58 = __VERIFIER_nondet_uint(); ldvarg191 = tmp___58; tmp___59 = ldv_zalloc(1UL); ldvarg196 = (uint8_t *)tmp___59; tmp___60 = ldv_zalloc(376UL); ldvarg189 = (srb_t *)tmp___60; tmp___61 = __VERIFIER_nondet_uint(); ldvarg182 = tmp___61; tmp___62 = ldv_zalloc(4UL); ldvarg172 = (uint32_t *)tmp___62; tmp___63 = ldv_zalloc(1UL); ldvarg177 = (uint8_t *)tmp___63; tmp___64 = __VERIFIER_nondet_int(); ldvarg190 = tmp___64; tmp___65 = ldv_zalloc(1UL); ldvarg193 = tmp___65; tmp___66 = ldv_zalloc(2UL); ldvarg166 = (uint16_t *)tmp___66; tmp___67 = __VERIFIER_nondet_int(); ldvarg192 = tmp___67; tmp___68 = __VERIFIER_nondet_int(); ldvarg173 = tmp___68; tmp___69 = ldv_zalloc(1UL); ldvarg179 = tmp___69; tmp___70 = ldv_zalloc(1UL); ldvarg180 = (char *)tmp___70; tmp___71 = __VERIFIER_nondet_loff_t(); ldvarg213 = tmp___71; tmp___72 = __VERIFIER_nondet_int(); ldvarg212 = tmp___72; tmp___73 = ldv_zalloc(1UL); ldvarg242 = (char *)tmp___73; tmp___74 = ldv_zalloc(376UL); ldvarg236 = (srb_t *)tmp___74; tmp___75 = __VERIFIER_nondet_int(); ldvarg243 = tmp___75; tmp___76 = ldv_zalloc(4UL); ldvarg234 = (uint32_t *)tmp___76; tmp___77 = __VERIFIER_nondet_uint(); ldvarg244 = tmp___77; tmp___78 = ldv_zalloc(2UL); ldvarg228 = (uint16_t *)tmp___78; tmp___79 = ldv_zalloc(1UL); ldvarg258 = (uint8_t *)tmp___79; tmp___80 = ldv_zalloc(1UL); ldvarg255 = tmp___80; tmp___81 = ldv_zalloc(1UL); ldvarg240 = (char *)tmp___81; tmp___82 = ldv_zalloc(1UL); ldvarg241 = tmp___82; tmp___83 = __VERIFIER_nondet_int(); ldvarg254 = tmp___83; tmp___84 = ldv_zalloc(1UL); ldvarg224 = (uint8_t *)tmp___84; tmp___85 = __VERIFIER_nondet_int(); ldvarg235 = tmp___85; tmp___86 = __VERIFIER_nondet_int(); ldvarg252 = tmp___86; tmp___87 = ldv_zalloc(1UL); ldvarg227 = (uint8_t *)tmp___87; tmp___88 = ldv_zalloc(376UL); ldvarg251 = (srb_t *)tmp___88; tmp___89 = ldv_zalloc(1UL); ldvarg239 = (uint8_t *)tmp___89; tmp___90 = __VERIFIER_nondet_uint(); ldvarg253 = tmp___90; tmp___91 = ldv_zalloc(1UL); ldvarg299 = tmp___91; tmp___92 = ldv_zalloc(4UL); ldvarg278 = (uint32_t *)tmp___92; tmp___93 = ldv_zalloc(1UL); ldvarg285 = tmp___93; tmp___94 = ldv_zalloc(376UL); ldvarg295 = (srb_t *)tmp___94; tmp___95 = __VERIFIER_nondet_int(); ldvarg296 = tmp___95; tmp___96 = ldv_zalloc(1UL); ldvarg302 = (uint8_t *)tmp___96; tmp___97 = ldv_zalloc(1UL); ldvarg284 = (char *)tmp___97; tmp___98 = ldv_zalloc(376UL); ldvarg280 = (srb_t *)tmp___98; tmp___99 = ldv_zalloc(2UL); ldvarg272 = (uint16_t *)tmp___99; tmp___100 = __VERIFIER_nondet_uint(); ldvarg288 = tmp___100; tmp___101 = ldv_zalloc(1UL); ldvarg286 = (char *)tmp___101; tmp___102 = __VERIFIER_nondet_int(); ldvarg287 = tmp___102; tmp___103 = __VERIFIER_nondet_uint(); ldvarg297 = tmp___103; tmp___104 = ldv_zalloc(1UL); ldvarg283 = (uint8_t *)tmp___104; tmp___105 = __VERIFIER_nondet_int(); ldvarg279 = tmp___105; tmp___106 = __VERIFIER_nondet_int(); ldvarg298 = tmp___106; tmp___107 = __VERIFIER_nondet_int(); ldvarg341 = tmp___107; tmp___108 = ldv_zalloc(1UL); ldvarg307 = (uint8_t *)tmp___108; tmp___109 = ldv_zalloc(2UL); ldvarg311 = (uint16_t *)tmp___109; tmp___110 = ldv_zalloc(64UL); ldvarg322 = (cmd_entry_t *)tmp___110; tmp___111 = ldv_zalloc(1UL); ldvarg310 = (uint8_t *)tmp___111; tmp___112 = __VERIFIER_nondet_uint(); ldvarg340 = tmp___112; tmp___113 = ldv_zalloc(376UL); ldvarg338 = (srb_t *)tmp___113; tmp___114 = ldv_zalloc(1UL); ldvarg325 = (uint8_t *)tmp___114; tmp___115 = ldv_zalloc(4UL); ldvarg317 = (uint32_t *)tmp___115; tmp___116 = ldv_zalloc(1UL); ldvarg342 = tmp___116; tmp___117 = __VERIFIER_nondet_int(); ldvarg339 = tmp___117; tmp___118 = ldv_zalloc(1UL); ldvarg345 = (uint8_t *)tmp___118; tmp___119 = __VERIFIER_nondet_int(); ldvarg318 = tmp___119; tmp___120 = ldv_zalloc(1UL); ldvarg326 = (char *)tmp___120; tmp___121 = __VERIFIER_nondet_uint(); ldvarg331 = tmp___121; tmp___122 = ldv_zalloc(376UL); ldvarg319 = (srb_t *)tmp___122; tmp___123 = ldv_zalloc(1UL); ldvarg329 = (char *)tmp___123; tmp___124 = ldv_zalloc(376UL); ldvarg320 = (srb_t *)tmp___124; tmp___125 = ldv_zalloc(1UL); ldvarg328 = tmp___125; tmp___126 = __VERIFIER_nondet_int(); ldvarg330 = tmp___126; tmp___127 = ldv_zalloc(1UL); ldvarg381 = tmp___127; tmp___128 = __VERIFIER_nondet_int(); ldvarg380 = tmp___128; tmp___129 = __VERIFIER_nondet_int(); ldvarg369 = tmp___129; tmp___130 = ldv_zalloc(376UL); ldvarg362 = (srb_t *)tmp___130; tmp___131 = __VERIFIER_nondet_uint(); ldvarg379 = tmp___131; tmp___132 = ldv_zalloc(1UL); ldvarg365 = (uint8_t *)tmp___132; tmp___133 = ldv_zalloc(1UL); ldvarg350 = (uint8_t *)tmp___133; tmp___134 = ldv_zalloc(4UL); ldvarg360 = (uint32_t *)tmp___134; tmp___135 = ldv_zalloc(376UL); ldvarg377 = (srb_t *)tmp___135; tmp___136 = __VERIFIER_nondet_uint(); ldvarg370 = tmp___136; tmp___137 = __VERIFIER_nondet_int(); ldvarg361 = tmp___137; tmp___138 = ldv_zalloc(2UL); ldvarg354 = (uint16_t *)tmp___138; tmp___139 = ldv_zalloc(1UL); ldvarg384 = (uint8_t *)tmp___139; tmp___140 = ldv_zalloc(1UL); ldvarg367 = tmp___140; tmp___141 = __VERIFIER_nondet_int(); ldvarg378 = tmp___141; tmp___142 = ldv_zalloc(1UL); ldvarg368 = (char *)tmp___142; tmp___143 = ldv_zalloc(1UL); ldvarg353 = (uint8_t *)tmp___143; tmp___144 = ldv_zalloc(1UL); ldvarg366 = (char *)tmp___144; tmp___145 = ldv_zalloc(1UL); ldvarg404 = (uint8_t *)tmp___145; tmp___146 = ldv_zalloc(1UL); ldvarg435 = (uint8_t *)tmp___146; tmp___147 = ldv_zalloc(376UL); ldvarg428 = (srb_t *)tmp___147; tmp___148 = __VERIFIER_nondet_int(); ldvarg412 = tmp___148; tmp___149 = ldv_zalloc(1UL); ldvarg416 = (uint8_t *)tmp___149; tmp___150 = ldv_zalloc(1UL); ldvarg418 = tmp___150; tmp___151 = ldv_zalloc(2UL); ldvarg405 = (uint16_t *)tmp___151; tmp___152 = __VERIFIER_nondet_int(); ldvarg431 = tmp___152; tmp___153 = ldv_zalloc(1UL); ldvarg432 = tmp___153; tmp___154 = ldv_zalloc(1UL); ldvarg419 = (char *)tmp___154; tmp___155 = __VERIFIER_nondet_int(); ldvarg420 = tmp___155; tmp___156 = ldv_zalloc(1UL); ldvarg401 = (uint8_t *)tmp___156; tmp___157 = __VERIFIER_nondet_uint(); ldvarg421 = tmp___157; tmp___158 = ldv_zalloc(1UL); ldvarg417 = (char *)tmp___158; tmp___159 = __VERIFIER_nondet_int(); ldvarg429 = tmp___159; tmp___160 = __VERIFIER_nondet_uint(); ldvarg430 = tmp___160; tmp___161 = ldv_zalloc(376UL); ldvarg413 = (srb_t *)tmp___161; tmp___162 = ldv_zalloc(4UL); ldvarg411 = (uint32_t *)tmp___162; ldv_initialize(); memset((void *)(& ldvarg11), 0, 4UL); memset((void *)(& ldvarg31), 0, 4UL); memset((void *)(& ldvarg7), 0, 4UL); memset((void *)(& ldvarg23), 0, 4UL); memset((void *)(& ldvarg12), 0, 4UL); memset((void *)(& ldvarg8), 0, 4UL); memset((void *)(& ldvarg9), 0, 4UL); memset((void *)(& ldvarg24), 0, 4UL); memset((void *)(& ldvarg30), 0, 4UL); memset((void *)(& ldvarg6), 0, 4UL); memset((void *)(& ldvarg15), 0, 4UL); memset((void *)(& ldvarg16), 0, 4UL); memset((void *)(& ldvarg77), 0, 4UL); memset((void *)(& ldvarg67), 0, 1UL); memset((void *)(& ldvarg42), 0, 4UL); memset((void *)(& ldvarg56), 0, 4UL); memset((void *)(& ldvarg46), 0, 1UL); memset((void *)(& ldvarg37), 0, 4UL); memset((void *)(& ldvarg38), 0, 4UL); memset((void *)(& ldvarg69), 0, 2UL); memset((void *)(& ldvarg48), 0, 2UL); memset((void *)(& ldvarg47), 0, 1UL); memset((void *)(& ldvarg39), 0, 4UL); memset((void *)(& ldvarg41), 0, 4UL); memset((void *)(& ldvarg60), 0, 2UL); memset((void *)(& ldvarg49), 0, 1UL); memset((void *)(& ldvarg57), 0, 4UL); memset((void *)(& ldvarg65), 0, 4UL); memset((void *)(& ldvarg36), 0, 4UL); memset((void *)(& ldvarg66), 0, 4UL); memset((void *)(& ldvarg45), 0, 1UL); memset((void *)(& ldvarg70), 0, 1UL); memset((void *)(& ldvarg68), 0, 1UL); memset((void *)(& ldvarg54), 0, 2UL); memset((void *)(& ldvarg76), 0, 4UL); memset((void *)(& ldvarg134), 0, 4UL); memset((void *)(& ldvarg146), 0, 1UL); memset((void *)(& ldvarg141), 0, 4UL); memset((void *)(& ldvarg125), 0, 1UL); memset((void *)(& ldvarg145), 0, 2UL); memset((void *)(& ldvarg152), 0, 4UL); memset((void *)(& ldvarg129), 0, 1UL); memset((void *)(& ldvarg142), 0, 4UL); memset((void *)(& ldvarg122), 0, 4UL); memset((void *)(& ldvarg144), 0, 1UL); memset((void *)(& ldvarg128), 0, 2UL); memset((void *)(& ldvarg126), 0, 1UL); memset((void *)(& ldvarg123), 0, 4UL); memset((void *)(& ldvarg153), 0, 4UL); memset((void *)(& ldvarg127), 0, 1UL); memset((void *)(& ldvarg133), 0, 4UL); memset((void *)(& ldvarg143), 0, 1UL); memset((void *)(& ldvarg165), 0, 4UL); memset((void *)(& ldvarg168), 0, 1UL); memset((void *)(& ldvarg188), 0, 1UL); memset((void *)(& ldvarg185), 0, 1UL); memset((void *)(& ldvarg167), 0, 1UL); memset((void *)(& ldvarg170), 0, 2UL); memset((void *)(& ldvarg169), 0, 1UL); memset((void *)(& ldvarg175), 0, 4UL); memset((void *)(& ldvarg195), 0, 4UL); memset((void *)(& ldvarg187), 0, 2UL); memset((void *)(& ldvarg184), 0, 4UL); memset((void *)(& ldvarg186), 0, 1UL); memset((void *)(& ldvarg164), 0, 4UL); memset((void *)(& ldvarg183), 0, 4UL); memset((void *)(& ldvarg176), 0, 4UL); memset((void *)(& ldvarg194), 0, 4UL); memset((void *)(& ldvarg171), 0, 1UL); memset((void *)(& ldvarg237), 0, 4UL); memset((void *)(& ldvarg225), 0, 4UL); memset((void *)(& ldvarg247), 0, 1UL); memset((void *)(& ldvarg233), 0, 1UL); memset((void *)(& ldvarg257), 0, 4UL); memset((void *)(& ldvarg226), 0, 4UL); memset((void *)(& ldvarg221), 0, 4UL); memset((void *)(& ldvarg222), 0, 4UL); memset((void *)(& ldvarg232), 0, 2UL); memset((void *)(& ldvarg230), 0, 1UL); memset((void *)(& ldvarg246), 0, 4UL); memset((void *)(& ldvarg231), 0, 1UL); memset((void *)(& ldvarg238), 0, 4UL); memset((void *)(& ldvarg248), 0, 1UL); memset((void *)(& ldvarg223), 0, 4UL); memset((void *)(& ldvarg245), 0, 4UL); memset((void *)(& ldvarg256), 0, 4UL); memset((void *)(& ldvarg229), 0, 1UL); memset((void *)(& ldvarg220), 0, 4UL); memset((void *)(& ldvarg250), 0, 1UL); memset((void *)(& ldvarg249), 0, 2UL); memset((void *)(& ldvarg263), 0, 4UL); memset((void *)(& ldvarg290), 0, 4UL); memset((void *)(& ldvarg300), 0, 4UL); memset((void *)(& ldvarg281), 0, 4UL); memset((void *)(& ldvarg271), 0, 4UL); memset((void *)(& ldvarg274), 0, 1UL); memset((void *)(& ldvarg293), 0, 2UL); memset((void *)(& ldvarg275), 0, 1UL); memset((void *)(& ldvarg277), 0, 1UL); memset((void *)(& ldvarg291), 0, 1UL); memset((void *)(& ldvarg289), 0, 4UL); memset((void *)(& ldvarg282), 0, 4UL); memset((void *)(& ldvarg301), 0, 4UL); memset((void *)(& ldvarg270), 0, 4UL); memset((void *)(& ldvarg276), 0, 2UL); memset((void *)(& ldvarg273), 0, 1UL); memset((void *)(& ldvarg292), 0, 1UL); memset((void *)(& ldvarg294), 0, 1UL); memset((void *)(& ldvarg315), 0, 2UL); memset((void *)(& ldvarg308), 0, 4UL); memset((void *)(& ldvarg305), 0, 4UL); memset((void *)(& ldvarg313), 0, 1UL); memset((void *)(& ldvarg321), 0, 2UL); memset((void *)(& ldvarg336), 0, 2UL); memset((void *)(& ldvarg309), 0, 4UL); memset((void *)(& ldvarg333), 0, 4UL); memset((void *)(& ldvarg303), 0, 4UL); memset((void *)(& ldvarg335), 0, 1UL); memset((void *)(& ldvarg304), 0, 4UL); memset((void *)(& ldvarg343), 0, 4UL); memset((void *)(& ldvarg337), 0, 1UL); memset((void *)(& ldvarg344), 0, 4UL); memset((void *)(& ldvarg312), 0, 1UL); memset((void *)(& ldvarg334), 0, 1UL); memset((void *)(& ldvarg324), 0, 4UL); memset((void *)(& ldvarg306), 0, 4UL); memset((void *)(& ldvarg314), 0, 1UL); memset((void *)(& ldvarg323), 0, 4UL); memset((void *)(& ldvarg327), 0, 2UL); memset((void *)(& ldvarg332), 0, 4UL); memset((void *)(& ldvarg316), 0, 1UL); memset((void *)(& ldvarg372), 0, 4UL); memset((void *)(& ldvarg357), 0, 1UL); memset((void *)(& ldvarg349), 0, 4UL); memset((void *)(& ldvarg352), 0, 4UL); memset((void *)(& ldvarg363), 0, 4UL); memset((void *)(& ldvarg383), 0, 4UL); memset((void *)(& ldvarg348), 0, 4UL); memset((void *)(& ldvarg346), 0, 4UL); memset((void *)(& ldvarg364), 0, 4UL); memset((void *)(& ldvarg376), 0, 1UL); memset((void *)(& ldvarg374), 0, 1UL); memset((void *)(& ldvarg375), 0, 2UL); memset((void *)(& ldvarg358), 0, 2UL); memset((void *)(& ldvarg351), 0, 4UL); memset((void *)(& ldvarg347), 0, 4UL); memset((void *)(& ldvarg382), 0, 4UL); memset((void *)(& ldvarg359), 0, 1UL); memset((void *)(& ldvarg373), 0, 1UL); memset((void *)(& ldvarg371), 0, 4UL); memset((void *)(& ldvarg355), 0, 1UL); memset((void *)(& ldvarg356), 0, 1UL); memset((void *)(& ldvarg397), 0, 4UL); memset((void *)(& ldvarg408), 0, 1UL); memset((void *)(& ldvarg403), 0, 4UL); memset((void *)(& ldvarg424), 0, 1UL); memset((void *)(& ldvarg398), 0, 4UL); memset((void *)(& ldvarg423), 0, 4UL); memset((void *)(& ldvarg415), 0, 4UL); memset((void *)(& ldvarg422), 0, 4UL); memset((void *)(& ldvarg407), 0, 1UL); memset((void *)(& ldvarg409), 0, 2UL); memset((void *)(& ldvarg406), 0, 1UL); memset((void *)(& ldvarg399), 0, 4UL); memset((void *)(& ldvarg433), 0, 4UL); memset((void *)(& ldvarg425), 0, 1UL); memset((void *)(& ldvarg434), 0, 4UL); memset((void *)(& ldvarg410), 0, 1UL); memset((void *)(& ldvarg402), 0, 4UL); memset((void *)(& ldvarg400), 0, 4UL); memset((void *)(& ldvarg426), 0, 2UL); memset((void *)(& ldvarg427), 0, 1UL); memset((void *)(& ldvarg414), 0, 4UL); ldv_state_variable_33 = 0; ldv_state_variable_32 = 0; ldv_state_variable_63 = 0; ldv_state_variable_21 = 0; ldv_state_variable_71 = 0; ldv_state_variable_7 = 1; ldv_state_variable_26 = 0; ldv_state_variable_18 = 0; ldv_state_variable_72 = 0; ldv_state_variable_16 = 1; ldv_state_variable_44 = 0; ldv_state_variable_55 = 0; ldv_state_variable_27 = 0; ldv_state_variable_57 = 0; ldv_state_variable_61 = 0; ldv_state_variable_20 = 0; ldv_state_variable_10 = 1; ldv_state_variable_31 = 0; ldv_state_variable_35 = 0; ldv_state_variable_11 = 1; ldv_state_variable_48 = 0; ldv_state_variable_65 = 0; ldv_state_variable_29 = 0; ldv_state_variable_50 = 0; ldv_state_variable_39 = 0; ldv_state_variable_64 = 0; ldv_state_variable_58 = 0; ldv_state_variable_41 = 0; ldv_state_variable_12 = 1; ldv_state_variable_15 = 1; ldv_state_variable_52 = 0; ldv_state_variable_60 = 0; ldv_state_variable_56 = 0; ldv_state_variable_66 = 0; ldv_state_variable_45 = 0; ldv_state_variable_19 = 0; ldv_state_variable_62 = 0; ldv_state_variable_54 = 0; ldv_state_variable_67 = 0; ldv_state_variable_70 = 0; ldv_state_variable_68 = 0; ldv_state_variable_2 = 1; ldv_state_variable_17 = 1; ldv_state_variable_1 = 1; ldv_state_variable_30 = 0; ldv_state_variable_25 = 0; ldv_state_variable_28 = 0; ldv_state_variable_40 = 0; ldv_state_variable_14 = 1; ldv_state_variable_69 = 0; ldv_state_variable_59 = 0; ldv_state_variable_49 = 0; ldv_state_variable_24 = 0; ldv_state_variable_53 = 0; ldv_state_variable_22 = 0; ldv_state_variable_42 = 0; ref_cnt = 0; ldv_state_variable_0 = 1; ldv_state_variable_46 = 0; ldv_state_variable_23 = 0; ldv_state_variable_13 = 1; ldv_state_variable_6 = 1; ldv_state_variable_3 = 1; ldv_state_variable_36 = 0; ldv_state_variable_9 = 1; ldv_state_variable_51 = 0; ldv_state_variable_47 = 0; ldv_state_variable_8 = 1; ldv_state_variable_38 = 0; ldv_state_variable_4 = 1; ldv_state_variable_34 = 0; ldv_state_variable_37 = 0; ldv_state_variable_43 = 0; ldv_state_variable_5 = 1; ldv_63557: tmp___163 = __VERIFIER_nondet_int(); switch (tmp___163) { case 0: ; if (ldv_state_variable_33 != 0) { ldv_main_exported_33(); } else { } goto ldv_63125; case 1: ; if (ldv_state_variable_32 != 0) { ldv_main_exported_32(); } else { } goto ldv_63125; case 2: ; if (ldv_state_variable_63 != 0) { tmp___164 = __VERIFIER_nondet_int(); switch (tmp___164) { case 0: ; if (ldv_state_variable_63 == 1) { qlafx00_soft_reset(qlafx00_isp_ops_group0); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_soft_reset(qlafx00_isp_ops_group0); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 1: ; if (ldv_state_variable_63 == 2) { qla24xx_write_optrom_data(qlafx00_isp_ops_group0, ldvarg32, ldvarg31, ldvarg30); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 2: ; if (ldv_state_variable_63 == 1) { qlafx00_enable_intrs(qlafx00_isp_ops_group2); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_enable_intrs(qlafx00_isp_ops_group2); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 3: ; if (ldv_state_variable_63 == 1) { qlafx00_intr_handler(ldvarg28, ldvarg29); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_intr_handler(ldvarg28, ldvarg29); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 4: ; if (ldv_state_variable_63 == 1) { qlafx00_abort_target(qlafx00_isp_ops_group1, ldvarg27, ldvarg26); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_abort_target(qlafx00_isp_ops_group1, ldvarg27, ldvarg26); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 5: ; if (ldv_state_variable_63 == 1) { qlafx00_abort_isp(qlafx00_isp_ops_group0); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_abort_isp(qlafx00_isp_ops_group0); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 6: ; if (ldv_state_variable_63 == 1) { qlafx00_start_scsi(ldvarg25); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_start_scsi(ldvarg25); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 7: ; if (ldv_state_variable_63 == 1) { qlafx00_chip_diag(qlafx00_isp_ops_group0); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_chip_diag(qlafx00_isp_ops_group0); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 8: ; if (ldv_state_variable_63 == 1) { qlafx00_soft_reset(qlafx00_isp_ops_group0); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_soft_reset(qlafx00_isp_ops_group0); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 9: ; if (ldv_state_variable_63 == 1) { qla24xx_prep_ms_iocb(qlafx00_isp_ops_group0, ldvarg24, ldvarg23); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qla24xx_prep_ms_iocb(qlafx00_isp_ops_group0, ldvarg24, ldvarg23); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 10: ; if (ldv_state_variable_63 == 1) { qlafx00_lun_reset(qlafx00_isp_ops_group1, ldvarg22, ldvarg21); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_lun_reset(qlafx00_isp_ops_group1, ldvarg22, ldvarg21); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 11: ; if (ldv_state_variable_63 == 1) { qlafx00_fw_version_str(qlafx00_isp_ops_group0, ldvarg20); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_fw_version_str(qlafx00_isp_ops_group0, ldvarg20); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 12: ; if (ldv_state_variable_63 == 1) { qlafx00_iospace_config(qlafx00_isp_ops_group2); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_iospace_config(qlafx00_isp_ops_group2); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 13: ; if (ldv_state_variable_63 == 1) { qlafx00_initialize_adapter(qlafx00_isp_ops_group0); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_initialize_adapter(qlafx00_isp_ops_group0); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 14: ; if (ldv_state_variable_63 == 1) { qla24xx_get_flash_version(qlafx00_isp_ops_group0, ldvarg19); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qla24xx_get_flash_version(qlafx00_isp_ops_group0, ldvarg19); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 15: ; if (ldv_state_variable_63 == 1) { qlafx00_disable_intrs(qlafx00_isp_ops_group2); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_disable_intrs(qlafx00_isp_ops_group2); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 16: ; if (ldv_state_variable_63 == 1) { qlafx00_pci_info_str(qlafx00_isp_ops_group0, ldvarg18); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_pci_info_str(qlafx00_isp_ops_group0, ldvarg18); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 17: ; if (ldv_state_variable_63 == 2) { qla24xx_read_optrom_data(qlafx00_isp_ops_group0, ldvarg17, ldvarg16, ldvarg15); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 18: ; if (ldv_state_variable_63 == 1) { qla24xx_beacon_off(qlafx00_isp_ops_group0); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qla24xx_beacon_off(qlafx00_isp_ops_group0); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 19: ; if (ldv_state_variable_63 == 1) { qlafx00_abort_command(ldvarg14); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_abort_command(ldvarg14); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 20: ; if (ldv_state_variable_63 == 1) { qlafx00_config_rings(qlafx00_isp_ops_group0); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_config_rings(qlafx00_isp_ops_group0); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 21: ; if (ldv_state_variable_63 == 1) { qlafx00_pci_config(qlafx00_isp_ops_group0); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qlafx00_pci_config(qlafx00_isp_ops_group0); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 22: ; if (ldv_state_variable_63 == 1) { qla24xx_write_nvram_data(qlafx00_isp_ops_group0, ldvarg13, ldvarg12, ldvarg11); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qla24xx_write_nvram_data(qlafx00_isp_ops_group0, ldvarg13, ldvarg12, ldvarg11); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 23: ; if (ldv_state_variable_63 == 1) { qla24xx_beacon_on(qlafx00_isp_ops_group0); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qla24xx_beacon_on(qlafx00_isp_ops_group0); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 24: ; if (ldv_state_variable_63 == 1) { qla24xx_read_nvram_data(qlafx00_isp_ops_group0, ldvarg10, ldvarg9, ldvarg8); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qla24xx_read_nvram_data(qlafx00_isp_ops_group0, ldvarg10, ldvarg9, ldvarg8); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 25: ; if (ldv_state_variable_63 == 1) { qla24xx_prep_ms_fdmi_iocb(qlafx00_isp_ops_group0, ldvarg7, ldvarg6); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 2) { qla24xx_prep_ms_fdmi_iocb(qlafx00_isp_ops_group0, ldvarg7, ldvarg6); ldv_state_variable_63 = 2; } else { } goto ldv_63129; case 26: ; if (ldv_state_variable_63 == 2) { ldv_release_63(); ldv_state_variable_63 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63129; case 27: ; if (ldv_state_variable_63 == 1) { ldv_probe_63(); ldv_state_variable_63 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63129; default: ldv_stop(); } ldv_63129: ; } else { } goto ldv_63125; case 3: ; if (ldv_state_variable_21 != 0) { ldv_main_exported_21(); } else { } goto ldv_63125; case 4: ; if (ldv_state_variable_71 != 0) { tmp___165 = __VERIFIER_nondet_int(); switch (tmp___165) { case 0: ; if (ldv_state_variable_71 == 1) { qla2x00_reset_adapter(qla2100_isp_ops_group0); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_reset_adapter(qla2100_isp_ops_group0); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 1: ; if (ldv_state_variable_71 == 1) { qla2x00_enable_intrs(qla2100_isp_ops_group2); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_enable_intrs(qla2100_isp_ops_group2); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 2: ; if (ldv_state_variable_71 == 2) { qla2x00_write_optrom_data(qla2100_isp_ops_group0, ldvarg78, ldvarg77, ldvarg76); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 3: ; if (ldv_state_variable_71 == 1) { qla2100_intr_handler(ldvarg74, ldvarg75); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2100_intr_handler(ldvarg74, ldvarg75); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 4: ; if (ldv_state_variable_71 == 1) { qla2x00_abort_target(qla2100_isp_ops_group1, ldvarg73, ldvarg72); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_abort_target(qla2100_isp_ops_group1, ldvarg73, ldvarg72); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 5: ; if (ldv_state_variable_71 == 1) { qla2x00_update_fw_options(qla2100_isp_ops_group0); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_update_fw_options(qla2100_isp_ops_group0); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 6: ; if (ldv_state_variable_71 == 1) { qla2x00_start_scsi(ldvarg71); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_start_scsi(ldvarg71); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 7: ; if (ldv_state_variable_71 == 1) { qla2x00_abort_isp(qla2100_isp_ops_group0); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_abort_isp(qla2100_isp_ops_group0); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 8: ; if (ldv_state_variable_71 == 1) { qla2x00_chip_diag(qla2100_isp_ops_group0); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_chip_diag(qla2100_isp_ops_group0); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 9: ; if (ldv_state_variable_71 == 1) { qla2x00_fabric_logout(qla2100_isp_ops_group0, (int )ldvarg69, (int )ldvarg68, (int )ldvarg67, (int )ldvarg70); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_fabric_logout(qla2100_isp_ops_group0, (int )ldvarg69, (int )ldvarg68, (int )ldvarg67, (int )ldvarg70); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 10: ; if (ldv_state_variable_71 == 1) { qla2x00_reset_chip(qla2100_isp_ops_group0); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_reset_chip(qla2100_isp_ops_group0); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 11: ; if (ldv_state_variable_71 == 1) { qla2x00_prep_ms_iocb(qla2100_isp_ops_group0, ldvarg66, ldvarg65); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_prep_ms_iocb(qla2100_isp_ops_group0, ldvarg66, ldvarg65); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 12: ; if (ldv_state_variable_71 == 1) { qla2x00_lun_reset(qla2100_isp_ops_group1, ldvarg64, ldvarg63); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_lun_reset(qla2100_isp_ops_group1, ldvarg64, ldvarg63); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 13: ; if (ldv_state_variable_71 == 1) { qla2x00_fw_version_str(qla2100_isp_ops_group0, ldvarg62); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_fw_version_str(qla2100_isp_ops_group0, ldvarg62); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 14: ; if (ldv_state_variable_71 == 1) { qla2x00_iospace_config(qla2100_isp_ops_group2); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_iospace_config(qla2100_isp_ops_group2); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 15: ; if (ldv_state_variable_71 == 1) { qla2x00_initialize_adapter(qla2100_isp_ops_group0); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_initialize_adapter(qla2100_isp_ops_group0); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 16: ; if (ldv_state_variable_71 == 1) { qla2x00_disable_intrs(qla2100_isp_ops_group2); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_disable_intrs(qla2100_isp_ops_group2); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 17: ; if (ldv_state_variable_71 == 1) { qla2x00_get_flash_version(qla2100_isp_ops_group0, ldvarg61); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_get_flash_version(qla2100_isp_ops_group0, ldvarg61); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 18: ; if (ldv_state_variable_71 == 1) { qla2x00_calc_iocbs_32((int )ldvarg60); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_calc_iocbs_32((int )ldvarg60); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 19: ; if (ldv_state_variable_71 == 1) { qla2x00_nvram_config(qla2100_isp_ops_group0); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_nvram_config(qla2100_isp_ops_group0); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 20: ; if (ldv_state_variable_71 == 1) { qla2x00_pci_info_str(qla2100_isp_ops_group0, ldvarg59); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_pci_info_str(qla2100_isp_ops_group0, ldvarg59); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 21: ; if (ldv_state_variable_71 == 2) { qla2x00_read_optrom_data(qla2100_isp_ops_group0, ldvarg58, ldvarg57, ldvarg56); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 22: ; if (ldv_state_variable_71 == 1) { qla2x00_build_scsi_iocbs_32(ldvarg53, ldvarg55, (int )ldvarg54); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_build_scsi_iocbs_32(ldvarg53, ldvarg55, (int )ldvarg54); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 23: ; if (ldv_state_variable_71 == 1) { qla2x00_abort_command(ldvarg52); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_abort_command(ldvarg52); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 24: ; if (ldv_state_variable_71 == 1) { qla2100_fw_dump(qla2100_isp_ops_group0, ldvarg51); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2100_fw_dump(qla2100_isp_ops_group0, ldvarg51); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 25: ; if (ldv_state_variable_71 == 1) { qla2x00_config_rings(qla2100_isp_ops_group0); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_config_rings(qla2100_isp_ops_group0); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 26: ; if (ldv_state_variable_71 == 1) { qla2x00_load_risc(qla2100_isp_ops_group0, ldvarg50); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_load_risc(qla2100_isp_ops_group0, ldvarg50); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 27: ; if (ldv_state_variable_71 == 1) { qla2100_pci_config(qla2100_isp_ops_group0); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2100_pci_config(qla2100_isp_ops_group0); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 28: ; if (ldv_state_variable_71 == 1) { qla2x00_login_fabric(qla2100_isp_ops_group0, (int )ldvarg48, (int )ldvarg46, (int )ldvarg45, (int )ldvarg49, ldvarg44, (int )ldvarg47); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_login_fabric(qla2100_isp_ops_group0, (int )ldvarg48, (int )ldvarg46, (int )ldvarg45, (int )ldvarg49, ldvarg44, (int )ldvarg47); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 29: ; if (ldv_state_variable_71 == 1) { qla2x00_write_nvram_data(qla2100_isp_ops_group0, ldvarg43, ldvarg42, ldvarg41); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_write_nvram_data(qla2100_isp_ops_group0, ldvarg43, ldvarg42, ldvarg41); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 30: ; if (ldv_state_variable_71 == 1) { qla2x00_read_nvram_data(qla2100_isp_ops_group0, ldvarg40, ldvarg39, ldvarg38); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_read_nvram_data(qla2100_isp_ops_group0, ldvarg40, ldvarg39, ldvarg38); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 31: ; if (ldv_state_variable_71 == 1) { qla2x00_prep_ms_fdmi_iocb(qla2100_isp_ops_group0, ldvarg37, ldvarg36); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { qla2x00_prep_ms_fdmi_iocb(qla2100_isp_ops_group0, ldvarg37, ldvarg36); ldv_state_variable_71 = 2; } else { } goto ldv_63161; case 32: ; if (ldv_state_variable_71 == 2) { ldv_release_71(); ldv_state_variable_71 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63161; case 33: ; if (ldv_state_variable_71 == 1) { ldv_probe_71(); ldv_state_variable_71 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63161; default: ldv_stop(); } ldv_63161: ; } else { } goto ldv_63125; case 5: ; goto ldv_63125; case 6: ; if (ldv_state_variable_26 != 0) { ldv_main_exported_26(); } else { } goto ldv_63125; case 7: ; if (ldv_state_variable_18 != 0) { ldv_main_exported_18(); } else { } goto ldv_63125; case 8: ; if (ldv_state_variable_72 != 0) { tmp___166 = __VERIFIER_nondet_int(); switch (tmp___166) { case 0: ; if (ldv_state_variable_72 == 1) { qla2xxx_scan_finished(qla2xxx_driver_template_group1, ldvarg90); ldv_state_variable_72 = 1; } else { } goto ldv_63201; case 1: ; if (ldv_state_variable_72 == 1) { qla2xxx_slave_configure(qla2xxx_driver_template_group2); ldv_state_variable_72 = 1; } else { } goto ldv_63201; case 2: ; if (ldv_state_variable_72 == 1) { qla2x00_change_queue_depth(qla2xxx_driver_template_group2, ldvarg89, ldvarg88); ldv_state_variable_72 = 1; } else { } goto ldv_63201; case 3: ; if (ldv_state_variable_72 == 1) { qla2xxx_queuecommand(qla2xxx_driver_template_group1, qla2xxx_driver_template_group0); ldv_state_variable_72 = 1; } else { } goto ldv_63201; case 4: ; if (ldv_state_variable_72 == 1) { qla2xxx_eh_target_reset(qla2xxx_driver_template_group0); ldv_state_variable_72 = 1; } else { } goto ldv_63201; case 5: ; if (ldv_state_variable_72 == 1) { qla2xxx_eh_device_reset(qla2xxx_driver_template_group0); ldv_state_variable_72 = 1; } else { } goto ldv_63201; case 6: ; if (ldv_state_variable_72 == 1) { qla2xxx_scan_start(qla2xxx_driver_template_group1); ldv_state_variable_72 = 1; } else { } goto ldv_63201; case 7: ; if (ldv_state_variable_72 == 1) { qla2xxx_eh_abort(qla2xxx_driver_template_group0); ldv_state_variable_72 = 1; } else { } goto ldv_63201; case 8: ; if (ldv_state_variable_72 == 1) { qla2xxx_slave_alloc(qla2xxx_driver_template_group2); ldv_state_variable_72 = 1; } else { } goto ldv_63201; case 9: ; if (ldv_state_variable_72 == 1) { qla2xxx_slave_destroy(qla2xxx_driver_template_group2); ldv_state_variable_72 = 1; } else { } goto ldv_63201; case 10: ; if (ldv_state_variable_72 == 1) { qla2x00_change_queue_type(qla2xxx_driver_template_group2, ldvarg87); ldv_state_variable_72 = 1; } else { } goto ldv_63201; case 11: ; if (ldv_state_variable_72 == 1) { qla2xxx_eh_bus_reset(qla2xxx_driver_template_group0); ldv_state_variable_72 = 1; } else { } goto ldv_63201; case 12: ; if (ldv_state_variable_72 == 1) { qla2xxx_eh_host_reset(qla2xxx_driver_template_group0); ldv_state_variable_72 = 1; } else { } goto ldv_63201; default: ldv_stop(); } ldv_63201: ; } else { } goto ldv_63125; case 9: ; goto ldv_63125; case 10: ; if (ldv_state_variable_44 != 0) { ldv_main_exported_44(); } else { } goto ldv_63125; case 11: ; if (ldv_state_variable_55 != 0) { ldv_main_exported_55(); } else { } goto ldv_63125; case 12: ; if (ldv_state_variable_27 != 0) { ldv_main_exported_27(); } else { } goto ldv_63125; case 13: ; if (ldv_state_variable_57 != 0) { ldv_main_exported_57(); } else { } goto ldv_63125; case 14: ; if (ldv_state_variable_61 != 0) { tmp___167 = __VERIFIER_nondet_int(); switch (tmp___167) { case 0: ; if (ldv_state_variable_61 == 1) { ldv_retval_1 = qla2x00_probe_one(qla2xxx_pci_driver_group0, (struct pci_device_id const *)ldvarg109); if (ldv_retval_1 == 0) { ldv_state_variable_61 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_63222; case 1: ; if (ldv_state_variable_61 == 2) { qla2x00_shutdown(qla2xxx_pci_driver_group0); ldv_state_variable_61 = 3; } else { } goto ldv_63222; case 2: ; if (ldv_state_variable_61 == 3) { qla2x00_remove_one(qla2xxx_pci_driver_group0); ldv_state_variable_61 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_61 == 2) { qla2x00_remove_one(qla2xxx_pci_driver_group0); ldv_state_variable_61 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63222; default: ldv_stop(); } ldv_63222: ; } else { } goto ldv_63125; case 15: ; if (ldv_state_variable_20 != 0) { ldv_main_exported_20(); } else { } goto ldv_63125; case 16: ; goto ldv_63125; case 17: ; if (ldv_state_variable_31 != 0) { ldv_main_exported_31(); } else { } goto ldv_63125; case 18: ; if (ldv_state_variable_35 != 0) { ldv_main_exported_35(); } else { } goto ldv_63125; case 19: ; goto ldv_63125; case 20: ; if (ldv_state_variable_48 != 0) { ldv_main_exported_48(); } else { } goto ldv_63125; case 21: ; if (ldv_state_variable_65 != 0) { tmp___168 = __VERIFIER_nondet_int(); switch (tmp___168) { case 0: ; if (ldv_state_variable_65 == 1) { qla24xx_reset_adapter(qla8044_isp_ops_group0); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla24xx_reset_adapter(qla8044_isp_ops_group0); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 1: ; if (ldv_state_variable_65 == 2) { qla8044_write_optrom_data(qla8044_isp_ops_group0, ldvarg154, ldvarg153, ldvarg152); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 2: ; if (ldv_state_variable_65 == 1) { qla82xx_enable_intrs(qla8044_isp_ops_group2); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla82xx_enable_intrs(qla8044_isp_ops_group2); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 3: ; if (ldv_state_variable_65 == 1) { qla8044_intr_handler(ldvarg150, ldvarg151); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla8044_intr_handler(ldvarg150, ldvarg151); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 4: ; if (ldv_state_variable_65 == 1) { qla24xx_abort_target(qla8044_isp_ops_group1, ldvarg149, ldvarg148); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla24xx_abort_target(qla8044_isp_ops_group1, ldvarg149, ldvarg148); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 5: ; if (ldv_state_variable_65 == 1) { qla24xx_update_fw_options(qla8044_isp_ops_group0); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla24xx_update_fw_options(qla8044_isp_ops_group0); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 6: ; if (ldv_state_variable_65 == 1) { qla8044_abort_isp(qla8044_isp_ops_group0); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla8044_abort_isp(qla8044_isp_ops_group0); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 7: ; if (ldv_state_variable_65 == 1) { qla82xx_start_scsi(ldvarg147); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla82xx_start_scsi(ldvarg147); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 8: ; if (ldv_state_variable_65 == 1) { qla24xx_chip_diag(qla8044_isp_ops_group0); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla24xx_chip_diag(qla8044_isp_ops_group0); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 9: ; if (ldv_state_variable_65 == 1) { qla24xx_fabric_logout(qla8044_isp_ops_group0, (int )ldvarg145, (int )ldvarg144, (int )ldvarg143, (int )ldvarg146); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla24xx_fabric_logout(qla8044_isp_ops_group0, (int )ldvarg145, (int )ldvarg144, (int )ldvarg143, (int )ldvarg146); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 10: ; if (ldv_state_variable_65 == 1) { qla82xx_reset_chip(qla8044_isp_ops_group0); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla82xx_reset_chip(qla8044_isp_ops_group0); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 11: ; if (ldv_state_variable_65 == 1) { qla24xx_prep_ms_iocb(qla8044_isp_ops_group0, ldvarg142, ldvarg141); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla24xx_prep_ms_iocb(qla8044_isp_ops_group0, ldvarg142, ldvarg141); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 12: ; if (ldv_state_variable_65 == 1) { qla24xx_lun_reset(qla8044_isp_ops_group1, ldvarg140, ldvarg139); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla24xx_lun_reset(qla8044_isp_ops_group1, ldvarg140, ldvarg139); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 13: ; if (ldv_state_variable_65 == 1) { qla24xx_fw_version_str(qla8044_isp_ops_group0, ldvarg138); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla24xx_fw_version_str(qla8044_isp_ops_group0, ldvarg138); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 14: ; if (ldv_state_variable_65 == 1) { qla82xx_iospace_config(qla8044_isp_ops_group2); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla82xx_iospace_config(qla8044_isp_ops_group2); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 15: ; if (ldv_state_variable_65 == 1) { qla2x00_initialize_adapter(qla8044_isp_ops_group0); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla2x00_initialize_adapter(qla8044_isp_ops_group0); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 16: ; if (ldv_state_variable_65 == 1) { qla82xx_get_flash_version(qla8044_isp_ops_group0, ldvarg137); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla82xx_get_flash_version(qla8044_isp_ops_group0, ldvarg137); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 17: ; if (ldv_state_variable_65 == 1) { qla82xx_disable_intrs(qla8044_isp_ops_group2); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla82xx_disable_intrs(qla8044_isp_ops_group2); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 18: ; if (ldv_state_variable_65 == 1) { qla81xx_nvram_config(qla8044_isp_ops_group0); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla81xx_nvram_config(qla8044_isp_ops_group0); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 19: ; if (ldv_state_variable_65 == 1) { qla24xx_pci_info_str(qla8044_isp_ops_group0, ldvarg136); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla24xx_pci_info_str(qla8044_isp_ops_group0, ldvarg136); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 20: ; if (ldv_state_variable_65 == 2) { qla82xx_read_optrom_data(qla8044_isp_ops_group0, ldvarg135, ldvarg134, ldvarg133); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 21: ; if (ldv_state_variable_65 == 1) { qla82xx_beacon_off(qla8044_isp_ops_group0); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla82xx_beacon_off(qla8044_isp_ops_group0); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 22: ; if (ldv_state_variable_65 == 1) { qla24xx_abort_command(ldvarg132); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla24xx_abort_command(ldvarg132); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 23: ; if (ldv_state_variable_65 == 1) { qla24xx_fw_dump(qla8044_isp_ops_group0, ldvarg131); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla24xx_fw_dump(qla8044_isp_ops_group0, ldvarg131); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 24: ; if (ldv_state_variable_65 == 1) { qla82xx_config_rings(qla8044_isp_ops_group0); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla82xx_config_rings(qla8044_isp_ops_group0); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 25: ; if (ldv_state_variable_65 == 1) { qla82xx_load_risc(qla8044_isp_ops_group0, ldvarg130); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla82xx_load_risc(qla8044_isp_ops_group0, ldvarg130); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 26: ; if (ldv_state_variable_65 == 1) { qla82xx_pci_config(qla8044_isp_ops_group0); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla82xx_pci_config(qla8044_isp_ops_group0); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 27: ; if (ldv_state_variable_65 == 1) { qla24xx_login_fabric(qla8044_isp_ops_group0, (int )ldvarg128, (int )ldvarg126, (int )ldvarg125, (int )ldvarg129, ldvarg124, (int )ldvarg127); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla24xx_login_fabric(qla8044_isp_ops_group0, (int )ldvarg128, (int )ldvarg126, (int )ldvarg125, (int )ldvarg129, ldvarg124, (int )ldvarg127); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 28: ; if (ldv_state_variable_65 == 1) { qla82xx_beacon_on(qla8044_isp_ops_group0); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla82xx_beacon_on(qla8044_isp_ops_group0); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 29: ; if (ldv_state_variable_65 == 1) { qla24xx_prep_ms_fdmi_iocb(qla8044_isp_ops_group0, ldvarg123, ldvarg122); ldv_state_variable_65 = 1; } else { } if (ldv_state_variable_65 == 2) { qla24xx_prep_ms_fdmi_iocb(qla8044_isp_ops_group0, ldvarg123, ldvarg122); ldv_state_variable_65 = 2; } else { } goto ldv_63234; case 30: ; if (ldv_state_variable_65 == 2) { ldv_release_65(); ldv_state_variable_65 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63234; case 31: ; if (ldv_state_variable_65 == 1) { ldv_probe_65(); ldv_state_variable_65 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63234; default: ldv_stop(); } ldv_63234: ; } else { } goto ldv_63125; case 22: ; if (ldv_state_variable_29 != 0) { ldv_main_exported_29(); } else { } goto ldv_63125; case 23: ; if (ldv_state_variable_50 != 0) { ldv_main_exported_50(); } else { } goto ldv_63125; case 24: ; if (ldv_state_variable_39 != 0) { ldv_main_exported_39(); } else { } goto ldv_63125; case 25: ; if (ldv_state_variable_64 != 0) { tmp___169 = __VERIFIER_nondet_int(); switch (tmp___169) { case 0: ; if (ldv_state_variable_64 == 1) { qla24xx_reset_adapter(qla83xx_isp_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_reset_adapter(qla83xx_isp_ops_group0); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 1: ; if (ldv_state_variable_64 == 2) { qla24xx_write_optrom_data(qla83xx_isp_ops_group0, ldvarg196, ldvarg195, ldvarg194); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 2: ; if (ldv_state_variable_64 == 1) { qla24xx_enable_intrs(qla83xx_isp_ops_group2); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_enable_intrs(qla83xx_isp_ops_group2); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 3: ; if (ldv_state_variable_64 == 1) { qla24xx_intr_handler(ldvarg192, ldvarg193); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_intr_handler(ldvarg192, ldvarg193); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 4: ; if (ldv_state_variable_64 == 1) { qla24xx_abort_target(qla83xx_isp_ops_group1, ldvarg191, ldvarg190); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_abort_target(qla83xx_isp_ops_group1, ldvarg191, ldvarg190); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 5: ; if (ldv_state_variable_64 == 1) { qla81xx_update_fw_options(qla83xx_isp_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla81xx_update_fw_options(qla83xx_isp_ops_group0); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 6: ; if (ldv_state_variable_64 == 1) { qla2x00_abort_isp(qla83xx_isp_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla2x00_abort_isp(qla83xx_isp_ops_group0); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 7: ; if (ldv_state_variable_64 == 1) { qla24xx_dif_start_scsi(ldvarg189); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_dif_start_scsi(ldvarg189); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 8: ; if (ldv_state_variable_64 == 1) { qla24xx_chip_diag(qla83xx_isp_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_chip_diag(qla83xx_isp_ops_group0); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 9: ; if (ldv_state_variable_64 == 1) { qla24xx_fabric_logout(qla83xx_isp_ops_group0, (int )ldvarg187, (int )ldvarg186, (int )ldvarg185, (int )ldvarg188); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_fabric_logout(qla83xx_isp_ops_group0, (int )ldvarg187, (int )ldvarg186, (int )ldvarg185, (int )ldvarg188); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 10: ; if (ldv_state_variable_64 == 1) { qla24xx_reset_chip(qla83xx_isp_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_reset_chip(qla83xx_isp_ops_group0); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 11: ; if (ldv_state_variable_64 == 1) { qla24xx_prep_ms_iocb(qla83xx_isp_ops_group0, ldvarg184, ldvarg183); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_prep_ms_iocb(qla83xx_isp_ops_group0, ldvarg184, ldvarg183); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 12: ; if (ldv_state_variable_64 == 1) { qla24xx_lun_reset(qla83xx_isp_ops_group1, ldvarg182, ldvarg181); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_lun_reset(qla83xx_isp_ops_group1, ldvarg182, ldvarg181); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 13: ; if (ldv_state_variable_64 == 1) { qla24xx_fw_version_str(qla83xx_isp_ops_group0, ldvarg180); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_fw_version_str(qla83xx_isp_ops_group0, ldvarg180); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 14: ; if (ldv_state_variable_64 == 1) { qla83xx_iospace_config(qla83xx_isp_ops_group2); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla83xx_iospace_config(qla83xx_isp_ops_group2); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 15: ; if (ldv_state_variable_64 == 1) { qla2x00_initialize_adapter(qla83xx_isp_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla2x00_initialize_adapter(qla83xx_isp_ops_group0); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 16: ; if (ldv_state_variable_64 == 1) { qla24xx_get_flash_version(qla83xx_isp_ops_group0, ldvarg179); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_get_flash_version(qla83xx_isp_ops_group0, ldvarg179); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 17: ; if (ldv_state_variable_64 == 1) { qla24xx_disable_intrs(qla83xx_isp_ops_group2); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_disable_intrs(qla83xx_isp_ops_group2); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 18: ; if (ldv_state_variable_64 == 1) { qla81xx_nvram_config(qla83xx_isp_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla81xx_nvram_config(qla83xx_isp_ops_group0); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 19: ; if (ldv_state_variable_64 == 1) { qla24xx_pci_info_str(qla83xx_isp_ops_group0, ldvarg178); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_pci_info_str(qla83xx_isp_ops_group0, ldvarg178); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 20: ; if (ldv_state_variable_64 == 2) { qla25xx_read_optrom_data(qla83xx_isp_ops_group0, ldvarg177, ldvarg176, ldvarg175); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 21: ; if (ldv_state_variable_64 == 1) { qla83xx_beacon_blink(qla83xx_isp_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla83xx_beacon_blink(qla83xx_isp_ops_group0); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 22: ; if (ldv_state_variable_64 == 1) { qla24xx_beacon_off(qla83xx_isp_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_beacon_off(qla83xx_isp_ops_group0); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 23: ; if (ldv_state_variable_64 == 1) { qla24xx_abort_command(ldvarg174); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_abort_command(ldvarg174); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 24: ; if (ldv_state_variable_64 == 1) { qla83xx_fw_dump(qla83xx_isp_ops_group0, ldvarg173); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla83xx_fw_dump(qla83xx_isp_ops_group0, ldvarg173); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 25: ; if (ldv_state_variable_64 == 1) { qla24xx_config_rings(qla83xx_isp_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_config_rings(qla83xx_isp_ops_group0); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 26: ; if (ldv_state_variable_64 == 1) { qla81xx_load_risc(qla83xx_isp_ops_group0, ldvarg172); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla81xx_load_risc(qla83xx_isp_ops_group0, ldvarg172); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 27: ; if (ldv_state_variable_64 == 1) { qla25xx_pci_config(qla83xx_isp_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla25xx_pci_config(qla83xx_isp_ops_group0); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 28: ; if (ldv_state_variable_64 == 1) { qla24xx_login_fabric(qla83xx_isp_ops_group0, (int )ldvarg170, (int )ldvarg168, (int )ldvarg167, (int )ldvarg171, ldvarg166, (int )ldvarg169); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_login_fabric(qla83xx_isp_ops_group0, (int )ldvarg170, (int )ldvarg168, (int )ldvarg167, (int )ldvarg171, ldvarg166, (int )ldvarg169); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 29: ; if (ldv_state_variable_64 == 1) { qla24xx_beacon_on(qla83xx_isp_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_beacon_on(qla83xx_isp_ops_group0); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 30: ; if (ldv_state_variable_64 == 1) { qla24xx_prep_ms_fdmi_iocb(qla83xx_isp_ops_group0, ldvarg165, ldvarg164); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 2) { qla24xx_prep_ms_fdmi_iocb(qla83xx_isp_ops_group0, ldvarg165, ldvarg164); ldv_state_variable_64 = 2; } else { } goto ldv_63272; case 31: ; if (ldv_state_variable_64 == 2) { ldv_release_64(); ldv_state_variable_64 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63272; case 32: ; if (ldv_state_variable_64 == 1) { ldv_probe_64(); ldv_state_variable_64 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63272; default: ldv_stop(); } ldv_63272: ; } else { } goto ldv_63125; case 26: ; if (ldv_state_variable_58 != 0) { ldv_main_exported_58(); } else { } goto ldv_63125; case 27: ; if (ldv_state_variable_41 != 0) { ldv_main_exported_41(); } else { } goto ldv_63125; case 28: ; goto ldv_63125; case 29: ; goto ldv_63125; case 30: ; if (ldv_state_variable_52 != 0) { ldv_main_exported_52(); } else { } goto ldv_63125; case 31: ; if (ldv_state_variable_60 != 0) { tmp___170 = __VERIFIER_nondet_int(); switch (tmp___170) { case 0: ; if (ldv_state_variable_60 == 2) { noop_llseek(apidev_fops_group2, ldvarg213, ldvarg212); ldv_state_variable_60 = 2; } else { } goto ldv_63313; case 1: ; if (ldv_state_variable_60 == 1) { ldv_retval_2 = ldv_open_60(); if (ldv_retval_2 == 0) { ldv_state_variable_60 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_63313; case 2: ; if (ldv_state_variable_60 == 2) { ldv_release_60(); ldv_state_variable_60 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63313; default: ldv_stop(); } ldv_63313: ; } else { } goto ldv_63125; case 32: ; if (ldv_state_variable_56 != 0) { ldv_main_exported_56(); } else { } goto ldv_63125; case 33: ; if (ldv_state_variable_66 != 0) { tmp___171 = __VERIFIER_nondet_int(); switch (tmp___171) { case 0: ; if (ldv_state_variable_66 == 1) { qla24xx_reset_adapter(qla82xx_isp_ops_group0); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_reset_adapter(qla82xx_isp_ops_group0); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 1: ; if (ldv_state_variable_66 == 2) { qla82xx_write_optrom_data(qla82xx_isp_ops_group0, ldvarg258, ldvarg257, ldvarg256); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 2: ; if (ldv_state_variable_66 == 1) { qla82xx_enable_intrs(qla82xx_isp_ops_group2); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla82xx_enable_intrs(qla82xx_isp_ops_group2); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 3: ; if (ldv_state_variable_66 == 1) { qla82xx_intr_handler(ldvarg254, ldvarg255); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla82xx_intr_handler(ldvarg254, ldvarg255); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 4: ; if (ldv_state_variable_66 == 1) { qla24xx_abort_target(qla82xx_isp_ops_group1, ldvarg253, ldvarg252); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_abort_target(qla82xx_isp_ops_group1, ldvarg253, ldvarg252); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 5: ; if (ldv_state_variable_66 == 1) { qla24xx_update_fw_options(qla82xx_isp_ops_group0); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_update_fw_options(qla82xx_isp_ops_group0); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 6: ; if (ldv_state_variable_66 == 1) { qla82xx_abort_isp(qla82xx_isp_ops_group0); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla82xx_abort_isp(qla82xx_isp_ops_group0); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 7: ; if (ldv_state_variable_66 == 1) { qla82xx_start_scsi(ldvarg251); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla82xx_start_scsi(ldvarg251); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 8: ; if (ldv_state_variable_66 == 1) { qla24xx_chip_diag(qla82xx_isp_ops_group0); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_chip_diag(qla82xx_isp_ops_group0); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 9: ; if (ldv_state_variable_66 == 1) { qla24xx_fabric_logout(qla82xx_isp_ops_group0, (int )ldvarg249, (int )ldvarg248, (int )ldvarg247, (int )ldvarg250); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_fabric_logout(qla82xx_isp_ops_group0, (int )ldvarg249, (int )ldvarg248, (int )ldvarg247, (int )ldvarg250); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 10: ; if (ldv_state_variable_66 == 1) { qla82xx_reset_chip(qla82xx_isp_ops_group0); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla82xx_reset_chip(qla82xx_isp_ops_group0); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 11: ; if (ldv_state_variable_66 == 1) { qla24xx_prep_ms_iocb(qla82xx_isp_ops_group0, ldvarg246, ldvarg245); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_prep_ms_iocb(qla82xx_isp_ops_group0, ldvarg246, ldvarg245); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 12: ; if (ldv_state_variable_66 == 1) { qla24xx_lun_reset(qla82xx_isp_ops_group1, ldvarg244, ldvarg243); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_lun_reset(qla82xx_isp_ops_group1, ldvarg244, ldvarg243); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 13: ; if (ldv_state_variable_66 == 1) { qla24xx_fw_version_str(qla82xx_isp_ops_group0, ldvarg242); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_fw_version_str(qla82xx_isp_ops_group0, ldvarg242); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 14: ; if (ldv_state_variable_66 == 1) { qla82xx_iospace_config(qla82xx_isp_ops_group2); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla82xx_iospace_config(qla82xx_isp_ops_group2); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 15: ; if (ldv_state_variable_66 == 1) { qla2x00_initialize_adapter(qla82xx_isp_ops_group0); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla2x00_initialize_adapter(qla82xx_isp_ops_group0); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 16: ; if (ldv_state_variable_66 == 1) { qla82xx_get_flash_version(qla82xx_isp_ops_group0, ldvarg241); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla82xx_get_flash_version(qla82xx_isp_ops_group0, ldvarg241); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 17: ; if (ldv_state_variable_66 == 1) { qla82xx_disable_intrs(qla82xx_isp_ops_group2); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla82xx_disable_intrs(qla82xx_isp_ops_group2); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 18: ; if (ldv_state_variable_66 == 1) { qla81xx_nvram_config(qla82xx_isp_ops_group0); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla81xx_nvram_config(qla82xx_isp_ops_group0); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 19: ; if (ldv_state_variable_66 == 1) { qla24xx_pci_info_str(qla82xx_isp_ops_group0, ldvarg240); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_pci_info_str(qla82xx_isp_ops_group0, ldvarg240); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 20: ; if (ldv_state_variable_66 == 2) { qla82xx_read_optrom_data(qla82xx_isp_ops_group0, ldvarg239, ldvarg238, ldvarg237); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 21: ; if (ldv_state_variable_66 == 1) { qla82xx_beacon_off(qla82xx_isp_ops_group0); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla82xx_beacon_off(qla82xx_isp_ops_group0); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 22: ; if (ldv_state_variable_66 == 1) { qla24xx_abort_command(ldvarg236); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_abort_command(ldvarg236); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 23: ; if (ldv_state_variable_66 == 1) { qla24xx_fw_dump(qla82xx_isp_ops_group0, ldvarg235); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_fw_dump(qla82xx_isp_ops_group0, ldvarg235); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 24: ; if (ldv_state_variable_66 == 1) { qla82xx_config_rings(qla82xx_isp_ops_group0); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla82xx_config_rings(qla82xx_isp_ops_group0); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 25: ; if (ldv_state_variable_66 == 1) { qla82xx_load_risc(qla82xx_isp_ops_group0, ldvarg234); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla82xx_load_risc(qla82xx_isp_ops_group0, ldvarg234); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 26: ; if (ldv_state_variable_66 == 1) { qla82xx_pci_config(qla82xx_isp_ops_group0); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla82xx_pci_config(qla82xx_isp_ops_group0); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 27: ; if (ldv_state_variable_66 == 1) { qla24xx_login_fabric(qla82xx_isp_ops_group0, (int )ldvarg232, (int )ldvarg230, (int )ldvarg229, (int )ldvarg233, ldvarg228, (int )ldvarg231); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_login_fabric(qla82xx_isp_ops_group0, (int )ldvarg232, (int )ldvarg230, (int )ldvarg229, (int )ldvarg233, ldvarg228, (int )ldvarg231); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 28: ; if (ldv_state_variable_66 == 1) { qla24xx_write_nvram_data(qla82xx_isp_ops_group0, ldvarg227, ldvarg226, ldvarg225); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_write_nvram_data(qla82xx_isp_ops_group0, ldvarg227, ldvarg226, ldvarg225); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 29: ; if (ldv_state_variable_66 == 1) { qla82xx_beacon_on(qla82xx_isp_ops_group0); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla82xx_beacon_on(qla82xx_isp_ops_group0); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 30: ; if (ldv_state_variable_66 == 1) { qla24xx_read_nvram_data(qla82xx_isp_ops_group0, ldvarg224, ldvarg223, ldvarg222); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_read_nvram_data(qla82xx_isp_ops_group0, ldvarg224, ldvarg223, ldvarg222); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 31: ; if (ldv_state_variable_66 == 1) { qla24xx_prep_ms_fdmi_iocb(qla82xx_isp_ops_group0, ldvarg221, ldvarg220); ldv_state_variable_66 = 1; } else { } if (ldv_state_variable_66 == 2) { qla24xx_prep_ms_fdmi_iocb(qla82xx_isp_ops_group0, ldvarg221, ldvarg220); ldv_state_variable_66 = 2; } else { } goto ldv_63320; case 32: ; if (ldv_state_variable_66 == 2) { ldv_release_66(); ldv_state_variable_66 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63320; case 33: ; if (ldv_state_variable_66 == 1) { ldv_probe_66(); ldv_state_variable_66 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63320; default: ldv_stop(); } ldv_63320: ; } else { } goto ldv_63125; case 34: ; if (ldv_state_variable_45 != 0) { ldv_main_exported_45(); } else { } goto ldv_63125; case 35: ; if (ldv_state_variable_19 != 0) { ldv_main_exported_19(); } else { } goto ldv_63125; case 36: ; if (ldv_state_variable_62 != 0) { tmp___172 = __VERIFIER_nondet_int(); switch (tmp___172) { case 0: ; if (ldv_state_variable_62 == 3) { qla2xxx_pci_resume(qla2xxx_err_handler_group0); ldv_state_variable_62 = 2; } else { } goto ldv_63359; case 1: ; if (ldv_state_variable_62 == 1) { qla2xxx_pci_slot_reset(qla2xxx_err_handler_group0); ldv_state_variable_62 = 1; } else { } if (ldv_state_variable_62 == 3) { qla2xxx_pci_slot_reset(qla2xxx_err_handler_group0); ldv_state_variable_62 = 3; } else { } if (ldv_state_variable_62 == 2) { qla2xxx_pci_slot_reset(qla2xxx_err_handler_group0); ldv_state_variable_62 = 2; } else { } goto ldv_63359; case 2: ; if (ldv_state_variable_62 == 1) { qla2xxx_pci_error_detected(qla2xxx_err_handler_group0, (pci_channel_state_t )ldvarg263); ldv_state_variable_62 = 1; } else { } if (ldv_state_variable_62 == 3) { qla2xxx_pci_error_detected(qla2xxx_err_handler_group0, (pci_channel_state_t )ldvarg263); ldv_state_variable_62 = 3; } else { } if (ldv_state_variable_62 == 2) { qla2xxx_pci_error_detected(qla2xxx_err_handler_group0, (pci_channel_state_t )ldvarg263); ldv_state_variable_62 = 2; } else { } goto ldv_63359; case 3: ; if (ldv_state_variable_62 == 1) { qla2xxx_pci_mmio_enabled(qla2xxx_err_handler_group0); ldv_state_variable_62 = 1; } else { } if (ldv_state_variable_62 == 3) { qla2xxx_pci_mmio_enabled(qla2xxx_err_handler_group0); ldv_state_variable_62 = 3; } else { } if (ldv_state_variable_62 == 2) { qla2xxx_pci_mmio_enabled(qla2xxx_err_handler_group0); ldv_state_variable_62 = 2; } else { } goto ldv_63359; case 4: ; if (ldv_state_variable_62 == 2) { ldv_suspend_62(); ldv_state_variable_62 = 3; } else { } goto ldv_63359; case 5: ; if (ldv_state_variable_62 == 3) { ldv_release_62(); ldv_state_variable_62 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_62 == 2) { ldv_release_62(); ldv_state_variable_62 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63359; case 6: ; if (ldv_state_variable_62 == 1) { ldv_probe_62(); ldv_state_variable_62 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63359; default: ldv_stop(); } ldv_63359: ; } else { } goto ldv_63125; case 37: ; if (ldv_state_variable_54 != 0) { ldv_main_exported_54(); } else { } goto ldv_63125; case 38: ; if (ldv_state_variable_67 != 0) { tmp___173 = __VERIFIER_nondet_int(); switch (tmp___173) { case 0: ; if (ldv_state_variable_67 == 1) { qla24xx_reset_adapter(qla81xx_isp_ops_group0); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_reset_adapter(qla81xx_isp_ops_group0); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 1: ; if (ldv_state_variable_67 == 2) { qla24xx_write_optrom_data(qla81xx_isp_ops_group0, ldvarg302, ldvarg301, ldvarg300); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 2: ; if (ldv_state_variable_67 == 1) { qla24xx_enable_intrs(qla81xx_isp_ops_group2); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_enable_intrs(qla81xx_isp_ops_group2); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 3: ; if (ldv_state_variable_67 == 1) { qla24xx_intr_handler(ldvarg298, ldvarg299); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_intr_handler(ldvarg298, ldvarg299); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 4: ; if (ldv_state_variable_67 == 1) { qla24xx_abort_target(qla81xx_isp_ops_group1, ldvarg297, ldvarg296); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_abort_target(qla81xx_isp_ops_group1, ldvarg297, ldvarg296); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 5: ; if (ldv_state_variable_67 == 1) { qla81xx_update_fw_options(qla81xx_isp_ops_group0); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla81xx_update_fw_options(qla81xx_isp_ops_group0); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 6: ; if (ldv_state_variable_67 == 1) { qla2x00_abort_isp(qla81xx_isp_ops_group0); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla2x00_abort_isp(qla81xx_isp_ops_group0); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 7: ; if (ldv_state_variable_67 == 1) { qla24xx_dif_start_scsi(ldvarg295); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_dif_start_scsi(ldvarg295); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 8: ; if (ldv_state_variable_67 == 1) { qla24xx_chip_diag(qla81xx_isp_ops_group0); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_chip_diag(qla81xx_isp_ops_group0); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 9: ; if (ldv_state_variable_67 == 1) { qla24xx_fabric_logout(qla81xx_isp_ops_group0, (int )ldvarg293, (int )ldvarg292, (int )ldvarg291, (int )ldvarg294); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_fabric_logout(qla81xx_isp_ops_group0, (int )ldvarg293, (int )ldvarg292, (int )ldvarg291, (int )ldvarg294); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 10: ; if (ldv_state_variable_67 == 1) { qla24xx_reset_chip(qla81xx_isp_ops_group0); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_reset_chip(qla81xx_isp_ops_group0); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 11: ; if (ldv_state_variable_67 == 1) { qla24xx_prep_ms_iocb(qla81xx_isp_ops_group0, ldvarg290, ldvarg289); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_prep_ms_iocb(qla81xx_isp_ops_group0, ldvarg290, ldvarg289); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 12: ; if (ldv_state_variable_67 == 1) { qla24xx_lun_reset(qla81xx_isp_ops_group1, ldvarg288, ldvarg287); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_lun_reset(qla81xx_isp_ops_group1, ldvarg288, ldvarg287); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 13: ; if (ldv_state_variable_67 == 1) { qla24xx_fw_version_str(qla81xx_isp_ops_group0, ldvarg286); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_fw_version_str(qla81xx_isp_ops_group0, ldvarg286); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 14: ; if (ldv_state_variable_67 == 1) { qla2x00_iospace_config(qla81xx_isp_ops_group2); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla2x00_iospace_config(qla81xx_isp_ops_group2); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 15: ; if (ldv_state_variable_67 == 1) { qla2x00_initialize_adapter(qla81xx_isp_ops_group0); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla2x00_initialize_adapter(qla81xx_isp_ops_group0); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 16: ; if (ldv_state_variable_67 == 1) { qla24xx_get_flash_version(qla81xx_isp_ops_group0, ldvarg285); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_get_flash_version(qla81xx_isp_ops_group0, ldvarg285); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 17: ; if (ldv_state_variable_67 == 1) { qla24xx_disable_intrs(qla81xx_isp_ops_group2); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_disable_intrs(qla81xx_isp_ops_group2); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 18: ; if (ldv_state_variable_67 == 1) { qla81xx_nvram_config(qla81xx_isp_ops_group0); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla81xx_nvram_config(qla81xx_isp_ops_group0); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 19: ; if (ldv_state_variable_67 == 1) { qla24xx_pci_info_str(qla81xx_isp_ops_group0, ldvarg284); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_pci_info_str(qla81xx_isp_ops_group0, ldvarg284); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 20: ; if (ldv_state_variable_67 == 2) { qla25xx_read_optrom_data(qla81xx_isp_ops_group0, ldvarg283, ldvarg282, ldvarg281); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 21: ; if (ldv_state_variable_67 == 1) { qla83xx_beacon_blink(qla81xx_isp_ops_group0); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla83xx_beacon_blink(qla81xx_isp_ops_group0); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 22: ; if (ldv_state_variable_67 == 1) { qla24xx_beacon_off(qla81xx_isp_ops_group0); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_beacon_off(qla81xx_isp_ops_group0); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 23: ; if (ldv_state_variable_67 == 1) { qla24xx_abort_command(ldvarg280); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_abort_command(ldvarg280); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 24: ; if (ldv_state_variable_67 == 1) { qla81xx_fw_dump(qla81xx_isp_ops_group0, ldvarg279); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla81xx_fw_dump(qla81xx_isp_ops_group0, ldvarg279); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 25: ; if (ldv_state_variable_67 == 1) { qla24xx_config_rings(qla81xx_isp_ops_group0); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_config_rings(qla81xx_isp_ops_group0); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 26: ; if (ldv_state_variable_67 == 1) { qla81xx_load_risc(qla81xx_isp_ops_group0, ldvarg278); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla81xx_load_risc(qla81xx_isp_ops_group0, ldvarg278); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 27: ; if (ldv_state_variable_67 == 1) { qla25xx_pci_config(qla81xx_isp_ops_group0); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla25xx_pci_config(qla81xx_isp_ops_group0); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 28: ; if (ldv_state_variable_67 == 1) { qla24xx_login_fabric(qla81xx_isp_ops_group0, (int )ldvarg276, (int )ldvarg274, (int )ldvarg273, (int )ldvarg277, ldvarg272, (int )ldvarg275); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_login_fabric(qla81xx_isp_ops_group0, (int )ldvarg276, (int )ldvarg274, (int )ldvarg273, (int )ldvarg277, ldvarg272, (int )ldvarg275); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 29: ; if (ldv_state_variable_67 == 1) { qla24xx_beacon_on(qla81xx_isp_ops_group0); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_beacon_on(qla81xx_isp_ops_group0); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 30: ; if (ldv_state_variable_67 == 1) { qla24xx_prep_ms_fdmi_iocb(qla81xx_isp_ops_group0, ldvarg271, ldvarg270); ldv_state_variable_67 = 1; } else { } if (ldv_state_variable_67 == 2) { qla24xx_prep_ms_fdmi_iocb(qla81xx_isp_ops_group0, ldvarg271, ldvarg270); ldv_state_variable_67 = 2; } else { } goto ldv_63370; case 31: ; if (ldv_state_variable_67 == 2) { ldv_release_67(); ldv_state_variable_67 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63370; case 32: ; if (ldv_state_variable_67 == 1) { ldv_probe_67(); ldv_state_variable_67 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63370; default: ldv_stop(); } ldv_63370: ; } else { } goto ldv_63125; case 39: ; if (ldv_state_variable_70 != 0) { tmp___174 = __VERIFIER_nondet_int(); switch (tmp___174) { case 0: ; if (ldv_state_variable_70 == 1) { qla2x00_reset_adapter(qla2300_isp_ops_group0); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_reset_adapter(qla2300_isp_ops_group0); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 1: ; if (ldv_state_variable_70 == 1) { qla2x00_enable_intrs(qla2300_isp_ops_group2); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_enable_intrs(qla2300_isp_ops_group2); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 2: ; if (ldv_state_variable_70 == 2) { qla2x00_write_optrom_data(qla2300_isp_ops_group0, ldvarg345, ldvarg344, ldvarg343); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 3: ; if (ldv_state_variable_70 == 1) { qla2300_intr_handler(ldvarg341, ldvarg342); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2300_intr_handler(ldvarg341, ldvarg342); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 4: ; if (ldv_state_variable_70 == 1) { qla2x00_abort_target(qla2300_isp_ops_group1, ldvarg340, ldvarg339); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_abort_target(qla2300_isp_ops_group1, ldvarg340, ldvarg339); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 5: ; if (ldv_state_variable_70 == 1) { qla2x00_update_fw_options(qla2300_isp_ops_group0); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_update_fw_options(qla2300_isp_ops_group0); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 6: ; if (ldv_state_variable_70 == 1) { qla2x00_abort_isp(qla2300_isp_ops_group0); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_abort_isp(qla2300_isp_ops_group0); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 7: ; if (ldv_state_variable_70 == 1) { qla2x00_start_scsi(ldvarg338); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_start_scsi(ldvarg338); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 8: ; if (ldv_state_variable_70 == 1) { qla2x00_chip_diag(qla2300_isp_ops_group0); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_chip_diag(qla2300_isp_ops_group0); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 9: ; if (ldv_state_variable_70 == 1) { qla2x00_fabric_logout(qla2300_isp_ops_group0, (int )ldvarg336, (int )ldvarg335, (int )ldvarg334, (int )ldvarg337); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_fabric_logout(qla2300_isp_ops_group0, (int )ldvarg336, (int )ldvarg335, (int )ldvarg334, (int )ldvarg337); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 10: ; if (ldv_state_variable_70 == 1) { qla2x00_reset_chip(qla2300_isp_ops_group0); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_reset_chip(qla2300_isp_ops_group0); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 11: ; if (ldv_state_variable_70 == 1) { qla2x00_prep_ms_iocb(qla2300_isp_ops_group0, ldvarg333, ldvarg332); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_prep_ms_iocb(qla2300_isp_ops_group0, ldvarg333, ldvarg332); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 12: ; if (ldv_state_variable_70 == 1) { qla2x00_lun_reset(qla2300_isp_ops_group1, ldvarg331, ldvarg330); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_lun_reset(qla2300_isp_ops_group1, ldvarg331, ldvarg330); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 13: ; if (ldv_state_variable_70 == 1) { qla2x00_fw_version_str(qla2300_isp_ops_group0, ldvarg329); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_fw_version_str(qla2300_isp_ops_group0, ldvarg329); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 14: ; if (ldv_state_variable_70 == 1) { qla2x00_iospace_config(qla2300_isp_ops_group2); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_iospace_config(qla2300_isp_ops_group2); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 15: ; if (ldv_state_variable_70 == 1) { qla2x00_initialize_adapter(qla2300_isp_ops_group0); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_initialize_adapter(qla2300_isp_ops_group0); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 16: ; if (ldv_state_variable_70 == 1) { qla2x00_disable_intrs(qla2300_isp_ops_group2); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_disable_intrs(qla2300_isp_ops_group2); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 17: ; if (ldv_state_variable_70 == 1) { qla2x00_get_flash_version(qla2300_isp_ops_group0, ldvarg328); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_get_flash_version(qla2300_isp_ops_group0, ldvarg328); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 18: ; if (ldv_state_variable_70 == 1) { qla2x00_calc_iocbs_32((int )ldvarg327); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_calc_iocbs_32((int )ldvarg327); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 19: ; if (ldv_state_variable_70 == 1) { qla2x00_nvram_config(qla2300_isp_ops_group0); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_nvram_config(qla2300_isp_ops_group0); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 20: ; if (ldv_state_variable_70 == 1) { qla2x00_pci_info_str(qla2300_isp_ops_group0, ldvarg326); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_pci_info_str(qla2300_isp_ops_group0, ldvarg326); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 21: ; if (ldv_state_variable_70 == 2) { qla2x00_read_optrom_data(qla2300_isp_ops_group0, ldvarg325, ldvarg324, ldvarg323); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 22: ; if (ldv_state_variable_70 == 1) { qla2x00_beacon_blink(qla2300_isp_ops_group0); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_beacon_blink(qla2300_isp_ops_group0); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 23: ; if (ldv_state_variable_70 == 1) { qla2x00_beacon_off(qla2300_isp_ops_group0); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_beacon_off(qla2300_isp_ops_group0); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 24: ; if (ldv_state_variable_70 == 1) { qla2x00_build_scsi_iocbs_32(ldvarg320, ldvarg322, (int )ldvarg321); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_build_scsi_iocbs_32(ldvarg320, ldvarg322, (int )ldvarg321); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 25: ; if (ldv_state_variable_70 == 1) { qla2x00_abort_command(ldvarg319); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_abort_command(ldvarg319); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 26: ; if (ldv_state_variable_70 == 1) { qla2300_fw_dump(qla2300_isp_ops_group0, ldvarg318); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2300_fw_dump(qla2300_isp_ops_group0, ldvarg318); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 27: ; if (ldv_state_variable_70 == 1) { qla2x00_config_rings(qla2300_isp_ops_group0); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_config_rings(qla2300_isp_ops_group0); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 28: ; if (ldv_state_variable_70 == 1) { qla2x00_load_risc(qla2300_isp_ops_group0, ldvarg317); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_load_risc(qla2300_isp_ops_group0, ldvarg317); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 29: ; if (ldv_state_variable_70 == 1) { qla2300_pci_config(qla2300_isp_ops_group0); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2300_pci_config(qla2300_isp_ops_group0); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 30: ; if (ldv_state_variable_70 == 1) { qla2x00_login_fabric(qla2300_isp_ops_group0, (int )ldvarg315, (int )ldvarg313, (int )ldvarg312, (int )ldvarg316, ldvarg311, (int )ldvarg314); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_login_fabric(qla2300_isp_ops_group0, (int )ldvarg315, (int )ldvarg313, (int )ldvarg312, (int )ldvarg316, ldvarg311, (int )ldvarg314); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 31: ; if (ldv_state_variable_70 == 1) { qla2x00_write_nvram_data(qla2300_isp_ops_group0, ldvarg310, ldvarg309, ldvarg308); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_write_nvram_data(qla2300_isp_ops_group0, ldvarg310, ldvarg309, ldvarg308); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 32: ; if (ldv_state_variable_70 == 1) { qla2x00_beacon_on(qla2300_isp_ops_group0); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_beacon_on(qla2300_isp_ops_group0); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 33: ; if (ldv_state_variable_70 == 1) { qla2x00_read_nvram_data(qla2300_isp_ops_group0, ldvarg307, ldvarg306, ldvarg305); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_read_nvram_data(qla2300_isp_ops_group0, ldvarg307, ldvarg306, ldvarg305); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 34: ; if (ldv_state_variable_70 == 1) { qla2x00_prep_ms_fdmi_iocb(qla2300_isp_ops_group0, ldvarg304, ldvarg303); ldv_state_variable_70 = 1; } else { } if (ldv_state_variable_70 == 2) { qla2x00_prep_ms_fdmi_iocb(qla2300_isp_ops_group0, ldvarg304, ldvarg303); ldv_state_variable_70 = 2; } else { } goto ldv_63406; case 35: ; if (ldv_state_variable_70 == 2) { ldv_release_70(); ldv_state_variable_70 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63406; case 36: ; if (ldv_state_variable_70 == 1) { ldv_probe_70(); ldv_state_variable_70 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63406; default: ldv_stop(); } ldv_63406: ; } else { } goto ldv_63125; case 40: ; if (ldv_state_variable_68 != 0) { tmp___175 = __VERIFIER_nondet_int(); switch (tmp___175) { case 0: ; if (ldv_state_variable_68 == 1) { qla24xx_reset_adapter(qla25xx_isp_ops_group0); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_reset_adapter(qla25xx_isp_ops_group0); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 1: ; if (ldv_state_variable_68 == 2) { qla24xx_write_optrom_data(qla25xx_isp_ops_group0, ldvarg384, ldvarg383, ldvarg382); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 2: ; if (ldv_state_variable_68 == 1) { qla24xx_enable_intrs(qla25xx_isp_ops_group2); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_enable_intrs(qla25xx_isp_ops_group2); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 3: ; if (ldv_state_variable_68 == 1) { qla24xx_intr_handler(ldvarg380, ldvarg381); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_intr_handler(ldvarg380, ldvarg381); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 4: ; if (ldv_state_variable_68 == 1) { qla24xx_abort_target(qla25xx_isp_ops_group1, ldvarg379, ldvarg378); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_abort_target(qla25xx_isp_ops_group1, ldvarg379, ldvarg378); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 5: ; if (ldv_state_variable_68 == 1) { qla24xx_update_fw_options(qla25xx_isp_ops_group0); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_update_fw_options(qla25xx_isp_ops_group0); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 6: ; if (ldv_state_variable_68 == 1) { qla2x00_abort_isp(qla25xx_isp_ops_group0); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla2x00_abort_isp(qla25xx_isp_ops_group0); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 7: ; if (ldv_state_variable_68 == 1) { qla24xx_dif_start_scsi(ldvarg377); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_dif_start_scsi(ldvarg377); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 8: ; if (ldv_state_variable_68 == 1) { qla24xx_chip_diag(qla25xx_isp_ops_group0); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_chip_diag(qla25xx_isp_ops_group0); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 9: ; if (ldv_state_variable_68 == 1) { qla24xx_fabric_logout(qla25xx_isp_ops_group0, (int )ldvarg375, (int )ldvarg374, (int )ldvarg373, (int )ldvarg376); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_fabric_logout(qla25xx_isp_ops_group0, (int )ldvarg375, (int )ldvarg374, (int )ldvarg373, (int )ldvarg376); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 10: ; if (ldv_state_variable_68 == 1) { qla24xx_reset_chip(qla25xx_isp_ops_group0); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_reset_chip(qla25xx_isp_ops_group0); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 11: ; if (ldv_state_variable_68 == 1) { qla24xx_prep_ms_iocb(qla25xx_isp_ops_group0, ldvarg372, ldvarg371); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_prep_ms_iocb(qla25xx_isp_ops_group0, ldvarg372, ldvarg371); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 12: ; if (ldv_state_variable_68 == 1) { qla24xx_lun_reset(qla25xx_isp_ops_group1, ldvarg370, ldvarg369); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_lun_reset(qla25xx_isp_ops_group1, ldvarg370, ldvarg369); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 13: ; if (ldv_state_variable_68 == 1) { qla24xx_fw_version_str(qla25xx_isp_ops_group0, ldvarg368); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_fw_version_str(qla25xx_isp_ops_group0, ldvarg368); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 14: ; if (ldv_state_variable_68 == 1) { qla2x00_iospace_config(qla25xx_isp_ops_group2); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla2x00_iospace_config(qla25xx_isp_ops_group2); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 15: ; if (ldv_state_variable_68 == 1) { qla2x00_initialize_adapter(qla25xx_isp_ops_group0); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla2x00_initialize_adapter(qla25xx_isp_ops_group0); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 16: ; if (ldv_state_variable_68 == 1) { qla24xx_get_flash_version(qla25xx_isp_ops_group0, ldvarg367); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_get_flash_version(qla25xx_isp_ops_group0, ldvarg367); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 17: ; if (ldv_state_variable_68 == 1) { qla24xx_disable_intrs(qla25xx_isp_ops_group2); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_disable_intrs(qla25xx_isp_ops_group2); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 18: ; if (ldv_state_variable_68 == 1) { qla24xx_nvram_config(qla25xx_isp_ops_group0); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_nvram_config(qla25xx_isp_ops_group0); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 19: ; if (ldv_state_variable_68 == 1) { qla24xx_pci_info_str(qla25xx_isp_ops_group0, ldvarg366); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_pci_info_str(qla25xx_isp_ops_group0, ldvarg366); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 20: ; if (ldv_state_variable_68 == 2) { qla25xx_read_optrom_data(qla25xx_isp_ops_group0, ldvarg365, ldvarg364, ldvarg363); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 21: ; if (ldv_state_variable_68 == 1) { qla24xx_beacon_blink(qla25xx_isp_ops_group0); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_beacon_blink(qla25xx_isp_ops_group0); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 22: ; if (ldv_state_variable_68 == 1) { qla24xx_beacon_off(qla25xx_isp_ops_group0); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_beacon_off(qla25xx_isp_ops_group0); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 23: ; if (ldv_state_variable_68 == 1) { qla24xx_abort_command(ldvarg362); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_abort_command(ldvarg362); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 24: ; if (ldv_state_variable_68 == 1) { qla25xx_fw_dump(qla25xx_isp_ops_group0, ldvarg361); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla25xx_fw_dump(qla25xx_isp_ops_group0, ldvarg361); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 25: ; if (ldv_state_variable_68 == 1) { qla24xx_config_rings(qla25xx_isp_ops_group0); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_config_rings(qla25xx_isp_ops_group0); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 26: ; if (ldv_state_variable_68 == 1) { qla24xx_load_risc(qla25xx_isp_ops_group0, ldvarg360); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_load_risc(qla25xx_isp_ops_group0, ldvarg360); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 27: ; if (ldv_state_variable_68 == 1) { qla25xx_pci_config(qla25xx_isp_ops_group0); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla25xx_pci_config(qla25xx_isp_ops_group0); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 28: ; if (ldv_state_variable_68 == 1) { qla24xx_login_fabric(qla25xx_isp_ops_group0, (int )ldvarg358, (int )ldvarg356, (int )ldvarg355, (int )ldvarg359, ldvarg354, (int )ldvarg357); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_login_fabric(qla25xx_isp_ops_group0, (int )ldvarg358, (int )ldvarg356, (int )ldvarg355, (int )ldvarg359, ldvarg354, (int )ldvarg357); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 29: ; if (ldv_state_variable_68 == 1) { qla25xx_write_nvram_data(qla25xx_isp_ops_group0, ldvarg353, ldvarg352, ldvarg351); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla25xx_write_nvram_data(qla25xx_isp_ops_group0, ldvarg353, ldvarg352, ldvarg351); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 30: ; if (ldv_state_variable_68 == 1) { qla24xx_beacon_on(qla25xx_isp_ops_group0); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_beacon_on(qla25xx_isp_ops_group0); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 31: ; if (ldv_state_variable_68 == 1) { qla25xx_read_nvram_data(qla25xx_isp_ops_group0, ldvarg350, ldvarg349, ldvarg348); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla25xx_read_nvram_data(qla25xx_isp_ops_group0, ldvarg350, ldvarg349, ldvarg348); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 32: ; if (ldv_state_variable_68 == 1) { qla24xx_prep_ms_fdmi_iocb(qla25xx_isp_ops_group0, ldvarg347, ldvarg346); ldv_state_variable_68 = 1; } else { } if (ldv_state_variable_68 == 2) { qla24xx_prep_ms_fdmi_iocb(qla25xx_isp_ops_group0, ldvarg347, ldvarg346); ldv_state_variable_68 = 2; } else { } goto ldv_63446; case 33: ; if (ldv_state_variable_68 == 2) { ldv_release_68(); ldv_state_variable_68 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63446; case 34: ; if (ldv_state_variable_68 == 1) { ldv_probe_68(); ldv_state_variable_68 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63446; default: ldv_stop(); } ldv_63446: ; } else { } goto ldv_63125; case 41: ; if (ldv_state_variable_2 != 0) { choose_timer_2(ldv_timer_list_2); } else { } goto ldv_63125; case 42: ; goto ldv_63125; case 43: ; if (ldv_state_variable_1 != 0) { choose_timer_1(ldv_timer_list_1); } else { } goto ldv_63125; case 44: ; if (ldv_state_variable_30 != 0) { ldv_main_exported_30(); } else { } goto ldv_63125; case 45: ; if (ldv_state_variable_25 != 0) { ldv_main_exported_25(); } else { } goto ldv_63125; case 46: ; if (ldv_state_variable_28 != 0) { ldv_main_exported_28(); } else { } goto ldv_63125; case 47: ; if (ldv_state_variable_40 != 0) { ldv_main_exported_40(); } else { } goto ldv_63125; case 48: ; goto ldv_63125; case 49: ; if (ldv_state_variable_69 != 0) { tmp___176 = __VERIFIER_nondet_int(); switch (tmp___176) { case 0: ; if (ldv_state_variable_69 == 1) { qla24xx_reset_adapter(qla24xx_isp_ops_group0); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_reset_adapter(qla24xx_isp_ops_group0); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 1: ; if (ldv_state_variable_69 == 2) { qla24xx_write_optrom_data(qla24xx_isp_ops_group0, ldvarg435, ldvarg434, ldvarg433); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 2: ; if (ldv_state_variable_69 == 1) { qla24xx_enable_intrs(qla24xx_isp_ops_group2); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_enable_intrs(qla24xx_isp_ops_group2); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 3: ; if (ldv_state_variable_69 == 1) { qla24xx_intr_handler(ldvarg431, ldvarg432); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_intr_handler(ldvarg431, ldvarg432); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 4: ; if (ldv_state_variable_69 == 1) { qla24xx_abort_target(qla24xx_isp_ops_group1, ldvarg430, ldvarg429); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_abort_target(qla24xx_isp_ops_group1, ldvarg430, ldvarg429); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 5: ; if (ldv_state_variable_69 == 1) { qla24xx_update_fw_options(qla24xx_isp_ops_group0); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_update_fw_options(qla24xx_isp_ops_group0); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 6: ; if (ldv_state_variable_69 == 1) { qla2x00_abort_isp(qla24xx_isp_ops_group0); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla2x00_abort_isp(qla24xx_isp_ops_group0); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 7: ; if (ldv_state_variable_69 == 1) { qla24xx_start_scsi(ldvarg428); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_start_scsi(ldvarg428); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 8: ; if (ldv_state_variable_69 == 1) { qla24xx_chip_diag(qla24xx_isp_ops_group0); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_chip_diag(qla24xx_isp_ops_group0); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 9: ; if (ldv_state_variable_69 == 1) { qla24xx_fabric_logout(qla24xx_isp_ops_group0, (int )ldvarg426, (int )ldvarg425, (int )ldvarg424, (int )ldvarg427); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_fabric_logout(qla24xx_isp_ops_group0, (int )ldvarg426, (int )ldvarg425, (int )ldvarg424, (int )ldvarg427); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 10: ; if (ldv_state_variable_69 == 1) { qla24xx_reset_chip(qla24xx_isp_ops_group0); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_reset_chip(qla24xx_isp_ops_group0); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 11: ; if (ldv_state_variable_69 == 1) { qla24xx_prep_ms_iocb(qla24xx_isp_ops_group0, ldvarg423, ldvarg422); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_prep_ms_iocb(qla24xx_isp_ops_group0, ldvarg423, ldvarg422); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 12: ; if (ldv_state_variable_69 == 1) { qla24xx_lun_reset(qla24xx_isp_ops_group1, ldvarg421, ldvarg420); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_lun_reset(qla24xx_isp_ops_group1, ldvarg421, ldvarg420); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 13: ; if (ldv_state_variable_69 == 1) { qla24xx_fw_version_str(qla24xx_isp_ops_group0, ldvarg419); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_fw_version_str(qla24xx_isp_ops_group0, ldvarg419); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 14: ; if (ldv_state_variable_69 == 1) { qla2x00_iospace_config(qla24xx_isp_ops_group2); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla2x00_iospace_config(qla24xx_isp_ops_group2); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 15: ; if (ldv_state_variable_69 == 1) { qla2x00_initialize_adapter(qla24xx_isp_ops_group0); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla2x00_initialize_adapter(qla24xx_isp_ops_group0); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 16: ; if (ldv_state_variable_69 == 1) { qla24xx_get_flash_version(qla24xx_isp_ops_group0, ldvarg418); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_get_flash_version(qla24xx_isp_ops_group0, ldvarg418); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 17: ; if (ldv_state_variable_69 == 1) { qla24xx_disable_intrs(qla24xx_isp_ops_group2); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_disable_intrs(qla24xx_isp_ops_group2); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 18: ; if (ldv_state_variable_69 == 1) { qla24xx_nvram_config(qla24xx_isp_ops_group0); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_nvram_config(qla24xx_isp_ops_group0); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 19: ; if (ldv_state_variable_69 == 1) { qla24xx_pci_info_str(qla24xx_isp_ops_group0, ldvarg417); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_pci_info_str(qla24xx_isp_ops_group0, ldvarg417); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 20: ; if (ldv_state_variable_69 == 2) { qla24xx_read_optrom_data(qla24xx_isp_ops_group0, ldvarg416, ldvarg415, ldvarg414); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 21: ; if (ldv_state_variable_69 == 1) { qla24xx_beacon_blink(qla24xx_isp_ops_group0); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_beacon_blink(qla24xx_isp_ops_group0); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 22: ; if (ldv_state_variable_69 == 1) { qla24xx_beacon_off(qla24xx_isp_ops_group0); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_beacon_off(qla24xx_isp_ops_group0); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 23: ; if (ldv_state_variable_69 == 1) { qla24xx_abort_command(ldvarg413); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_abort_command(ldvarg413); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 24: ; if (ldv_state_variable_69 == 1) { qla24xx_fw_dump(qla24xx_isp_ops_group0, ldvarg412); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_fw_dump(qla24xx_isp_ops_group0, ldvarg412); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 25: ; if (ldv_state_variable_69 == 1) { qla24xx_config_rings(qla24xx_isp_ops_group0); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_config_rings(qla24xx_isp_ops_group0); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 26: ; if (ldv_state_variable_69 == 1) { qla24xx_load_risc(qla24xx_isp_ops_group0, ldvarg411); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_load_risc(qla24xx_isp_ops_group0, ldvarg411); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 27: ; if (ldv_state_variable_69 == 1) { qla24xx_pci_config(qla24xx_isp_ops_group0); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_pci_config(qla24xx_isp_ops_group0); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 28: ; if (ldv_state_variable_69 == 1) { qla24xx_login_fabric(qla24xx_isp_ops_group0, (int )ldvarg409, (int )ldvarg407, (int )ldvarg406, (int )ldvarg410, ldvarg405, (int )ldvarg408); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_login_fabric(qla24xx_isp_ops_group0, (int )ldvarg409, (int )ldvarg407, (int )ldvarg406, (int )ldvarg410, ldvarg405, (int )ldvarg408); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 29: ; if (ldv_state_variable_69 == 1) { qla24xx_write_nvram_data(qla24xx_isp_ops_group0, ldvarg404, ldvarg403, ldvarg402); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_write_nvram_data(qla24xx_isp_ops_group0, ldvarg404, ldvarg403, ldvarg402); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 30: ; if (ldv_state_variable_69 == 1) { qla24xx_beacon_on(qla24xx_isp_ops_group0); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_beacon_on(qla24xx_isp_ops_group0); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 31: ; if (ldv_state_variable_69 == 1) { qla24xx_read_nvram_data(qla24xx_isp_ops_group0, ldvarg401, ldvarg400, ldvarg399); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_read_nvram_data(qla24xx_isp_ops_group0, ldvarg401, ldvarg400, ldvarg399); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 32: ; if (ldv_state_variable_69 == 1) { qla24xx_prep_ms_fdmi_iocb(qla24xx_isp_ops_group0, ldvarg398, ldvarg397); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { qla24xx_prep_ms_fdmi_iocb(qla24xx_isp_ops_group0, ldvarg398, ldvarg397); ldv_state_variable_69 = 2; } else { } goto ldv_63492; case 33: ; if (ldv_state_variable_69 == 2) { ldv_release_69(); ldv_state_variable_69 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63492; case 34: ; if (ldv_state_variable_69 == 1) { ldv_probe_69(); ldv_state_variable_69 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63492; default: ldv_stop(); } ldv_63492: ; } else { } goto ldv_63125; case 50: ; if (ldv_state_variable_59 != 0) { ldv_main_exported_59(); } else { } goto ldv_63125; case 51: ; if (ldv_state_variable_49 != 0) { ldv_main_exported_49(); } else { } goto ldv_63125; case 52: ; if (ldv_state_variable_24 != 0) { ldv_main_exported_24(); } else { } goto ldv_63125; case 53: ; if (ldv_state_variable_53 != 0) { ldv_main_exported_53(); } else { } goto ldv_63125; case 54: ; if (ldv_state_variable_22 != 0) { ldv_main_exported_22(); } else { } goto ldv_63125; case 55: ; if (ldv_state_variable_42 != 0) { ldv_main_exported_42(); } else { } goto ldv_63125; case 56: ; if (ldv_state_variable_0 != 0) { tmp___177 = __VERIFIER_nondet_int(); switch (tmp___177) { case 0: ; if (ldv_state_variable_0 == 3 && ref_cnt == 0) { qla2x00_module_exit(); ldv_state_variable_0 = 2; goto ldv_final; } else { } goto ldv_63537; case 1: ; if (ldv_state_variable_0 == 1) { ldv_retval_3 = qla2x00_module_init(); if (ldv_retval_3 == 0) { ldv_state_variable_0 = 3; ldv_state_variable_54 = 1; ldv_state_variable_62 = 1; ldv_initialize_pci_error_handlers_62(); ldv_state_variable_43 = 1; ldv_state_variable_19 = 1; ldv_initialize_fc_function_template_19(); ldv_state_variable_45 = 1; ldv_state_variable_37 = 1; ldv_state_variable_66 = 1; ldv_initialize_isp_operations_66(); ldv_state_variable_56 = 1; ldv_state_variable_34 = 1; ldv_state_variable_52 = 1; ldv_state_variable_38 = 1; ldv_state_variable_47 = 1; ldv_state_variable_41 = 1; ldv_initialize_device_attribute_41(); ldv_state_variable_58 = 1; ldv_initialize_bin_attribute_58(); ldv_state_variable_51 = 1; ldv_state_variable_36 = 1; ldv_state_variable_64 = 1; ldv_initialize_isp_operations_64(); ldv_state_variable_39 = 1; ldv_initialize_device_attribute_39(); ldv_state_variable_50 = 1; ldv_state_variable_29 = 1; ldv_state_variable_65 = 1; ldv_initialize_isp_operations_65(); ldv_state_variable_23 = 1; ldv_state_variable_46 = 1; ldv_state_variable_42 = 1; ldv_state_variable_22 = 1; ldv_state_variable_48 = 1; ldv_state_variable_53 = 1; ldv_state_variable_35 = 1; ldv_state_variable_31 = 1; ldv_state_variable_24 = 1; ldv_state_variable_49 = 1; ldv_state_variable_59 = 1; ldv_initialize_bin_attribute_59(); ldv_state_variable_69 = 1; ldv_initialize_isp_operations_69(); ldv_state_variable_20 = 1; ldv_initialize_fc_function_template_20(); ldv_state_variable_61 = 1; ldv_initialize_pci_driver_61(); ldv_state_variable_40 = 1; ldv_initialize_device_attribute_40(); ldv_state_variable_57 = 1; ldv_initialize_bin_attribute_57(); ldv_state_variable_28 = 1; ldv_state_variable_27 = 1; ldv_state_variable_25 = 1; ldv_state_variable_55 = 1; ldv_initialize_bin_attribute_55(); ldv_state_variable_44 = 1; ldv_state_variable_30 = 1; ldv_state_variable_26 = 1; ldv_state_variable_68 = 1; ldv_initialize_isp_operations_68(); ldv_state_variable_70 = 1; ldv_initialize_isp_operations_70(); ldv_state_variable_71 = 1; ldv_initialize_isp_operations_71(); ldv_state_variable_21 = 1; ldv_state_variable_63 = 1; ldv_initialize_isp_operations_63(); ldv_state_variable_32 = 1; ldv_state_variable_33 = 1; ldv_state_variable_67 = 1; ldv_initialize_isp_operations_67(); } else { } if (ldv_retval_3 != 0) { ldv_state_variable_0 = 2; goto ldv_final; } else { } } else { } goto ldv_63537; default: ldv_stop(); } ldv_63537: ; } else { } goto ldv_63125; case 57: ; if (ldv_state_variable_46 != 0) { ldv_main_exported_46(); } else { } goto ldv_63125; case 58: ; if (ldv_state_variable_23 != 0) { ldv_main_exported_23(); } else { } goto ldv_63125; case 59: ; goto ldv_63125; case 60: ; goto ldv_63125; case 61: ; goto ldv_63125; case 62: ; if (ldv_state_variable_36 != 0) { ldv_main_exported_36(); } else { } goto ldv_63125; case 63: ; goto ldv_63125; case 64: ; if (ldv_state_variable_51 != 0) { ldv_main_exported_51(); } else { } goto ldv_63125; case 65: ; if (ldv_state_variable_47 != 0) { ldv_main_exported_47(); } else { } goto ldv_63125; case 66: ; goto ldv_63125; case 67: ; if (ldv_state_variable_38 != 0) { ldv_main_exported_38(); } else { } goto ldv_63125; case 68: ; goto ldv_63125; case 69: ; if (ldv_state_variable_34 != 0) { ldv_main_exported_34(); } else { } goto ldv_63125; case 70: ; if (ldv_state_variable_37 != 0) { ldv_main_exported_37(); } else { } goto ldv_63125; case 71: ; if (ldv_state_variable_43 != 0) { ldv_main_exported_43(); } else { } goto ldv_63125; case 72: ; goto ldv_63125; default: ldv_stop(); } ldv_63125: ; goto ldv_63557; ldv_final: ldv_check_final_state(); return 0; } } int ldv_del_timer_1(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_2(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } int ldv_mod_timer_3(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___1 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_7(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } int ldv_del_timer_sync_4(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; { tmp = del_timer_sync(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } void ldv_scsi_remove_host_5(struct Scsi_Host *shost ) { { scsi_remove_host(shost); ldv_state_variable_72 = 0; return; } } struct Scsi_Host *ldv_scsi_host_alloc_6(struct scsi_host_template *sht , int privsize ) { ldv_func_ret_type___3 ldv_func_res ; struct Scsi_Host *tmp ; { tmp = scsi_host_alloc(sht, privsize); ldv_func_res = tmp; if ((unsigned long )ldv_func_res != (unsigned long )((ldv_func_ret_type___3 )0)) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } __inline static int ldv_register_chrdev_7(unsigned int major , char const *name , struct file_operations const *fops ) { ldv_func_ret_type___4 ldv_func_res ; int tmp ; { tmp = register_chrdev(major, name, fops); ldv_func_res = tmp; ldv_state_variable_18 = 1; ldv_file_operations_18(); return (ldv_func_res); } } __inline static void ldv_unregister_chrdev_8(unsigned int major , char const *name ) { { unregister_chrdev(major, name); ldv_state_variable_18 = 0; return; } } extern unsigned long find_first_zero_bit(unsigned long const * , unsigned long ) ; __inline static __u16 __fswab16(__u16 val ) { { return ((__u16 )((int )((short )((int )val << 8)) | (int )((short )((int )val >> 8)))); } } extern int memcmp(void const * , void const * , size_t ) ; extern char *strncpy(char * , char const * , __kernel_size_t ) ; extern void __xadd_wrong_size(void) ; __inline static int atomic_sub_and_test(int i , atomic_t *v ) { unsigned char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; subl %2,%0; sete %1": "+m" (v->counter), "=qm" (c): "ir" (i): "memory"); return ((int )c); } } __inline static int atomic_add_return(int i , atomic_t *v ) { int __ret ; { __ret = i; switch (4UL) { case 1UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddb %b0, %1\n": "+q" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5380; case 2UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddw %w0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5380; case 4UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddl %0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5380; case 8UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddq %q0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5380; default: __xadd_wrong_size(); } ldv_5380: ; return (__ret + i); } } __inline static void list_move_tail(struct list_head *list , struct list_head *head ) { { __list_del_entry(list); list_add_tail(list, head); return; } } __inline static u64 get_jiffies_64(void) { { return ((u64 )jiffies); } } extern unsigned int jiffies_to_msecs(unsigned long const ) ; int ldv_del_timer_17(struct timer_list *ldv_func_arg1 ) ; int ldv_del_timer_19(struct timer_list *ldv_func_arg1 ) ; extern void *vmalloc(unsigned long ) ; __inline static void kref_init(struct kref *kref ) { { atomic_set(& kref->refcount, 1); return; } } __inline static void kref_get(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub(kref, 1U, release); return (tmp); } } __inline static void *kmalloc_array(size_t n , size_t size , gfp_t flags ) { void *tmp ; { if (size != 0UL && 0xffffffffffffffffUL / size < n) { return ((void *)0); } else { } tmp = __kmalloc(n * size, flags); return (tmp); } } __inline static void *kcalloc(size_t n , size_t size , gfp_t flags ) { void *tmp ; { tmp = kmalloc_array(n, size, flags | 32768U); return (tmp); } } int reg_timer_3(struct timer_list *timer ) ; void activate_pending_timer_3(struct timer_list *timer , unsigned long data , int pending_flag ) ; void disable_suitable_timer_3(struct timer_list *timer ) ; void choose_timer_3(struct timer_list *timer ) ; extern int pci_find_capability(struct pci_dev * , int ) ; extern int pci_bus_write_config_byte(struct pci_bus * , unsigned int , int , u8 ) ; extern int pci_bus_write_config_word(struct pci_bus * , unsigned int , int , u16 ) ; __inline static int pci_write_config_byte(struct pci_dev const *dev , int where , u8 val ) { int tmp ; { tmp = pci_bus_write_config_byte(dev->bus, dev->devfn, where, (int )val); return (tmp); } } __inline static int pci_write_config_word(struct pci_dev const *dev , int where , u16 val ) { int tmp ; { tmp = pci_bus_write_config_word(dev->bus, dev->devfn, where, (int )val); return (tmp); } } extern void pci_set_master(struct pci_dev * ) ; extern int pci_try_set_mwi(struct pci_dev * ) ; extern void pci_clear_mwi(struct pci_dev * ) ; extern int pcix_set_mmrbc(struct pci_dev * , int ) ; extern int pcie_set_readrq(struct pci_dev * , int ) ; extern void pci_disable_rom(struct pci_dev * ) ; __inline static bool pci_is_pcie(struct pci_dev *dev ) { int tmp ; { tmp = pci_pcie_cap(dev); return (tmp != 0); } } int ldv_scsi_add_host_with_dma_18(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; __inline static u64 wwn_to_u64(u8 *wwn ) { { return (((((((((unsigned long long )*wwn << 56) | ((unsigned long long )*(wwn + 1UL) << 48)) | ((unsigned long long )*(wwn + 2UL) << 40)) | ((unsigned long long )*(wwn + 3UL) << 32)) | ((unsigned long long )*(wwn + 4UL) << 24)) | ((unsigned long long )*(wwn + 5UL) << 16)) | ((unsigned long long )*(wwn + 6UL) << 8)) | (unsigned long long )*(wwn + 7UL)); } } extern struct fc_rport *fc_remote_port_add(struct Scsi_Host * , int , struct fc_rport_identifiers * ) ; extern void fc_remote_port_rolechg(struct fc_rport * , u32 ) ; static char const * const port_state_str___0[5U] = { "Unknown", "UNCONFIGURED", "DEAD", "LOST", "ONLINE"}; void qla2x00_alloc_fw_dump(scsi_qla_host_t *vha ) ; int qla2x00_async_tm_cmd(fc_port_t *fcport , uint32_t tm_flags , uint32_t lun , uint32_t tag ) ; int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha , fc_port_t *fcport ) ; fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *vha , gfp_t flags ) ; int qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha , struct req_que *req ) ; int qla2x00_init_rings(scsi_qla_host_t *vha ) ; int qla24xx_configure_vhba(scsi_qla_host_t *vha ) ; int qla2x00_marker(struct scsi_qla_host *vha , struct req_que *req , struct rsp_que *rsp , uint16_t loop_id , uint16_t lun , uint8_t type ) ; int qla2x00_start_sp(srb_t *sp ) ; unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *vha ) ; int qla2x00_load_ram(scsi_qla_host_t *vha , dma_addr_t req_dma , uint32_t risc_addr , uint32_t risc_code_size ) ; int qla2x00_execute_fw(scsi_qla_host_t *vha , uint32_t risc_addr ) ; int qla2x00_get_fw_version(scsi_qla_host_t *vha ) ; int qla2x00_get_fw_options(scsi_qla_host_t *vha , uint16_t *fwopts ) ; int qla2x00_set_fw_options(scsi_qla_host_t *vha , uint16_t *fwopts ) ; int qla2x00_mbx_reg_test(scsi_qla_host_t *vha ) ; int qla2x00_verify_checksum(scsi_qla_host_t *vha , uint32_t risc_addr ) ; int qla2x00_get_adapter_id(scsi_qla_host_t *vha , uint16_t *id , uint8_t *al_pa , uint8_t *area , uint8_t *domain , uint16_t *top , uint16_t *sw_cap ) ; int qla2x00_get_retry_cnt(scsi_qla_host_t *vha , uint8_t *retry_cnt , uint8_t *tov , uint16_t *r_a_tov ) ; int qla2x00_init_firmware(scsi_qla_host_t *vha , uint16_t size ) ; int qla2x00_get_firmware_state(scsi_qla_host_t *vha , uint16_t *states ) ; int qla2x00_get_port_name(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t *name , uint8_t opt ) ; int qla24xx_link_initialize(scsi_qla_host_t *vha ) ; int qla2x00_login_local_device(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t *mb_ret , uint8_t opt ) ; int qla2x00_get_id_list(scsi_qla_host_t *vha , void *id_list , dma_addr_t id_list_dma , uint16_t *entries ) ; int qla2x00_get_resource_cnts(scsi_qla_host_t *vha , uint16_t *cur_xchg_cnt , uint16_t *orig_xchg_cnt , uint16_t *cur_iocb_cnt , uint16_t *orig_iocb_cnt , uint16_t *max_npiv_vports , uint16_t *max_fcfs ) ; int qla2x00_set_serdes_params(scsi_qla_host_t *vha , uint16_t sw_em_1g , uint16_t sw_em_2g , uint16_t sw_em_4g ) ; int qla2x00_stop_firmware(scsi_qla_host_t *vha ) ; int qla2x00_enable_eft_trace(scsi_qla_host_t *vha , dma_addr_t eft_dma , uint16_t buffers ) ; int qla2x00_enable_fce_trace(scsi_qla_host_t *vha , dma_addr_t fce_dma , uint16_t buffers , uint16_t *mb , uint32_t *dwords ) ; int qla82xx_set_driver_version(scsi_qla_host_t *vha , char *version ) ; int qla25xx_set_driver_version(scsi_qla_host_t *vha , char *version ) ; int qla2x00_set_idma_speed(scsi_qla_host_t *vha , uint16_t loop_id , uint16_t port_speed , uint16_t *mb ) ; int qla84xx_verify_chip(struct scsi_qla_host *vha , uint16_t *status ) ; int qla81xx_fac_get_sector_size(scsi_qla_host_t *vha , uint32_t *sector_size ) ; int qla2x00_read_ram_word(scsi_qla_host_t *vha , uint32_t risc_addr , uint32_t *data ) ; int qla2x00_write_ram_word(scsi_qla_host_t *vha , uint32_t risc_addr , uint32_t data ) ; int qla81xx_write_mpi_register(scsi_qla_host_t *vha , uint16_t *mb ) ; int qla2x00_get_data_rate(scsi_qla_host_t *vha ) ; int qla24xx_set_fcp_prio(scsi_qla_host_t *vha , uint16_t loop_id , uint16_t priority , uint16_t *mb ) ; int qla81xx_get_port_config(scsi_qla_host_t *vha , uint16_t *mb ) ; int qla2x00_dump_mctp_data(scsi_qla_host_t *vha , dma_addr_t req_dma , uint32_t addr , uint32_t size ) ; char const *qla2x00_get_link_speed_str(struct qla_hw_data *ha , uint16_t speed ) ; uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *vha , uint32_t *dwptr , uint32_t faddr , uint32_t dwords ) ; int qla2x00_is_a_vp_did(scsi_qla_host_t *vha , uint32_t rscn_entry ) ; int qla2xxx_get_flash_info(scsi_qla_host_t *vha ) ; int qla2xxx_get_vpd_field(scsi_qla_host_t *vha , char *key , char *str , size_t size ) ; int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha ) ; void ql_dump_buffer(uint32_t level , scsi_qla_host_t *vha , int32_t id , uint8_t *b , uint32_t size ) ; int qla2x00_ga_nxt(scsi_qla_host_t *vha , fc_port_t *fcport ) ; int qla2x00_gid_pt(scsi_qla_host_t *vha , sw_info_t *list ) ; int qla2x00_gpn_id(scsi_qla_host_t *vha , sw_info_t *list ) ; int qla2x00_gnn_id(scsi_qla_host_t *vha , sw_info_t *list ) ; void qla2x00_gff_id(scsi_qla_host_t *vha , sw_info_t *list ) ; int qla2x00_rft_id(scsi_qla_host_t *vha ) ; int qla2x00_rff_id(scsi_qla_host_t *vha ) ; int qla2x00_rnn_id(scsi_qla_host_t *vha ) ; int qla2x00_rsnn_nn(scsi_qla_host_t *vha ) ; int qla2x00_fdmi_register(scsi_qla_host_t *vha ) ; int qla2x00_gfpn_id(scsi_qla_host_t *vha , sw_info_t *list ) ; int qla2x00_gpsc(scsi_qla_host_t *vha , sw_info_t *list ) ; int qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha ) ; int qla25xx_init_req_que(struct scsi_qla_host *vha , struct req_que *req ) ; int qla25xx_init_rsp_que(struct scsi_qla_host *vha , struct rsp_que *rsp ) ; void qla2x00_init_response_q_entries(struct rsp_que *rsp ) ; int qlafx00_init_firmware(scsi_qla_host_t *vha , uint16_t size ) ; int qlafx00_fw_ready(scsi_qla_host_t *vha ) ; int qlafx00_configure_devices(scsi_qla_host_t *vha ) ; void qlafx00_init_response_q_entries(struct rsp_que *rsp ) ; void qla2x00_sp_free(void *data , void *ptr ) ; void qla2x00_sp_timeout(unsigned long __data ) ; __inline void qla2x00_set_model_info(scsi_qla_host_t *vha , uint8_t *model , size_t len , char *def ) ; int qla82xx_check_md_needed(scsi_qla_host_t *vha ) ; void qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha ) ; void qla8044_read_reset_template(struct scsi_qla_host *vha ) ; void qla8044_set_idc_dontreset(struct scsi_qla_host *vha ) ; int qla8044_rd_direct(struct scsi_qla_host *vha , uint32_t const crb_reg ) ; __inline static uint16_t qla2x00_debounce_register(uint16_t volatile *addr ) { uint16_t volatile first ; uint16_t volatile second ; unsigned short tmp ; unsigned short tmp___0 ; { ldv_43298: tmp = readw((void const volatile *)addr); first = tmp; __asm__ volatile ("": : : "memory"); cpu_relax(); tmp___0 = readw((void const volatile *)addr); second = tmp___0; if ((int )((unsigned short )first) != (int )((unsigned short )second)) { goto ldv_43298; } else { } return ((uint16_t )first); } } __inline static void qla2x00_set_fcport_state___0(fc_port_t *fcport , int state ) { int old_state ; { old_state = atomic_read((atomic_t const *)(& fcport->state)); atomic_set(& fcport->state, state); if (old_state != 0 && old_state != state) { ql_dbg(268435456U, fcport->vha, 8317, "FCPort state transitioned from %s to %s - portid=%02x%02x%02x.\n", port_state_str___0[old_state], port_state_str___0[state], (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } return; } } __inline static void qla2x00_init_timer(srb_t *sp , unsigned long tmo ) { { reg_timer_7(& sp->u.iocb_cmd.timer); sp->u.iocb_cmd.timer.expires = tmo * 250UL + (unsigned long )jiffies; sp->u.iocb_cmd.timer.data = (unsigned long )sp; sp->u.iocb_cmd.timer.function = & qla2x00_sp_timeout; add_timer(& sp->u.iocb_cmd.timer); sp->free = & qla2x00_sp_free; if (((((sp->fcport)->vha)->hw)->device_type & 131072U) != 0U && (unsigned int )sp->type == 10U) { init_completion(& sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); } else { } return; } } extern void __const_udelay(unsigned long ) ; static char *qla2x00_model_name[184U] = { (char *)"QLA2340", (char *)"133MHz PCI-X to 2Gb FC, Single Channel", (char *)"QLA2342", (char *)"133MHz PCI-X to 2Gb FC, Dual Channel", (char *)"QLA2344", (char *)"133MHz PCI-X to 2Gb FC, Quad Channel", (char *)"QCP2342", (char *)"cPCI to 2Gb FC, Dual Channel", (char *)"QSB2340", (char *)"SBUS to 2Gb FC, Single Channel", (char *)"QSB2342", (char *)"SBUS to 2Gb FC, Dual Channel", (char *)"QLA2310", (char *)"Sun 66MHz PCI-X to 2Gb FC, Single Channel", (char *)"QLA2332", (char *)"Sun 66MHz PCI-X to 2Gb FC, Single Channel", (char *)"QCP2332", (char *)"Sun cPCI to 2Gb FC, Dual Channel", (char *)"QCP2340", (char *)"cPCI to 2Gb FC, Single Channel", (char *)"QLA2342", (char *)"Sun 133MHz PCI-X to 2Gb FC, Dual Channel", (char *)"QCP2342", (char *)"Sun - cPCI to 2Gb FC, Dual Channel", (char *)"QLA2350", (char *)"133MHz PCI-X to 2Gb FC, Single Channel", (char *)"QLA2352", (char *)"133MHz PCI-X to 2Gb FC, Dual Channel", (char *)"QLA2352", (char *)"Sun 133MHz PCI-X to 2Gb FC, Dual Channel", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QLA2360", (char *)"133MHz PCI-X to 2Gb FC, Single Channel", (char *)"QLA2362", (char *)"133MHz PCI-X to 2Gb FC, Dual Channel", (char *)"QLE2360", (char *)"PCI-Express to 2Gb FC, Single Channel", (char *)"QLE2362", (char *)"PCI-Express to 2Gb FC, Dual Channel", (char *)"QLA200", (char *)"133MHz PCI-X to 2Gb FC Optical", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QLA200P", (char *)"133MHz PCI-X to 2Gb FC SFP", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QLA210", (char *)"133MHz PCI-X to 2Gb FC, Single Channel", (char *)"EMC 250", (char *)"133MHz PCI-X to 2Gb FC, Single Channel", (char *)"HP A7538A", (char *)"HP 1p2g PCI-X to 2Gb FC, Single Channel", (char *)"QLA210", (char *)"Sun 133MHz PCI-X to 2Gb FC, Single Channel", (char *)"QLA2460", (char *)"PCI-X 2.0 to 4Gb FC, Single Channel", (char *)"QLA2462", (char *)"PCI-X 2.0 to 4Gb FC, Dual Channel", (char *)"QMC2462", (char *)"IBM eServer BC 4Gb FC Expansion Card", (char *)"QMC2462S", (char *)"IBM eServer BC 4Gb FC Expansion Card SFF", (char *)"QLE2460", (char *)"PCI-Express to 4Gb FC, Single Channel", (char *)"QLE2462", (char *)"PCI-Express to 4Gb FC, Dual Channel", (char *)"QME2462", (char *)"Dell BS PCI-Express to 4Gb FC, Dual Channel", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QEM2462", (char *)"Sun Server I/O Module 4Gb FC, Dual Channel", (char *)"QLE210", (char *)"PCI-Express to 2Gb FC, Single Channel", (char *)"QLE220", (char *)"PCI-Express to 4Gb FC, Single Channel", (char *)"QLA2460", (char *)"Sun PCI-X 2.0 to 4Gb FC, Single Channel", (char *)"QLA2462", (char *)"Sun PCI-X 2.0 to 4Gb FC, Dual Channel", (char *)"QLE2460", (char *)"Sun PCI-Express to 2Gb FC, Single Channel", (char *)"QLE2462", (char *)"Sun PCI-Express to 4Gb FC, Single Channel", (char *)"QEM2462", (char *)"Server I/O Module 4Gb FC, Dual Channel", (char *)"QLE2440", (char *)"PCI-Express to 4Gb FC, Single Channel", (char *)"QLE2464", (char *)"PCI-Express to 4Gb FC, Quad Channel", (char *)"QLA2440", (char *)"PCI-X 2.0 to 4Gb FC, Single Channel", (char *)"HP AE369A", (char *)"PCI-X 2.0 to 4Gb FC, Dual Channel", (char *)"QLA2340", (char *)"Sun 133MHz PCI-X to 2Gb FC, Single Channel", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QMC2432M", (char *)"IBM eServer BC 4Gb FC Expansion Card CFFE", (char *)"QMC2422M", (char *)"IBM eServer BC 4Gb FC Expansion Card CFFX", (char *)"QLE220", (char *)"Sun PCI-Express to 4Gb FC, Single Channel", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QME2462", (char *)"PCI-Express to 4Gb FC, Dual Channel Mezz HBA", (char *)"QMH2462", (char *)"PCI-Express to 4Gb FC, Dual Channel Mezz HBA", (char *)" ", (char *)" ", (char *)"QLE220", (char *)"PCI-Express to 4Gb FC, Single Channel", (char *)"QLE220", (char *)"PCI-Express to 4Gb FC, Single Channel", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)" ", (char *)"QME2472", (char *)"Dell BS PCI-Express to 4Gb FC, Dual Channel"}; void qlt_fc_port_added(struct scsi_qla_host *vha , fc_port_t *fcport ) ; void qlt_update_vp_map(struct scsi_qla_host *vha , int cmd ) ; __inline static bool qla_tgt_mode_enabled(struct scsi_qla_host *ha ) { { return (((int )(ha->host)->active_mode & 2) != 0); } } void qlt_init_atio_q_entries(struct scsi_qla_host *vha ) ; void qlt_24xx_process_atio_queue(struct scsi_qla_host *vha ) ; void qlt_24xx_config_rings(struct scsi_qla_host *vha ) ; void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha , struct nvram_24xx *nv ) ; void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha , struct init_cb_24xx *icb ) ; void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha , struct init_cb_81xx *icb ) ; void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha , struct nvram_81xx *nv ) ; static int qla2x00_isp_firmware(scsi_qla_host_t *vha ) ; static int qla2x00_setup_chip(scsi_qla_host_t *vha ) ; static int qla2x00_fw_ready(scsi_qla_host_t *vha ) ; static int qla2x00_configure_hba(scsi_qla_host_t *vha ) ; static int qla2x00_configure_loop(scsi_qla_host_t *vha ) ; static int qla2x00_configure_local_loop(scsi_qla_host_t *vha ) ; static int qla2x00_configure_fabric(scsi_qla_host_t *vha ) ; static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha , struct list_head *new_fcports ) ; static int qla2x00_fabric_dev_login(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t *next_loopid ) ; static int qla2x00_restart_isp(scsi_qla_host_t *vha ) ; static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *vha ) ; static int qla84xx_init_chip(scsi_qla_host_t *vha ) ; static int qla25xx_init_queues(struct qla_hw_data *ha ) ; void qla2x00_sp_timeout(unsigned long __data ) { srb_t *sp ; struct srb_iocb *iocb ; fc_port_t *fcport ; struct qla_hw_data *ha ; struct req_que *req ; unsigned long flags ; raw_spinlock_t *tmp ; { sp = (srb_t *)__data; fcport = sp->fcport; ha = (fcport->vha)->hw; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); req = *(ha->req_q_map); *(req->outstanding_cmds + (unsigned long )sp->handle) = (srb_t *)0; iocb = & sp->u.iocb_cmd; (*(iocb->timeout))((void *)sp); (*(sp->free))((void *)fcport->vha, (void *)sp); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } void qla2x00_sp_free(void *data , void *ptr ) { srb_t *sp ; struct srb_iocb *iocb ; struct scsi_qla_host *vha ; { sp = (srb_t *)ptr; iocb = & sp->u.iocb_cmd; vha = (struct scsi_qla_host *)data; ldv_del_timer_19(& iocb->timer); qla2x00_rel_sp(vha, sp); return; } } unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *vha ) { unsigned long tmo ; struct qla_hw_data *ha ; { ha = vha->hw; tmo = (unsigned long )((int )((unsigned int )ha->r_a_tov / 10U) * 2); if ((ha->device_type & 131072U) != 0U) { tmo = 20UL; } else if ((ha->device_type & 134217728U) == 0U) { tmo = (unsigned long )ha->login_timeout; } else { } return (tmo); } } static void qla2x00_async_iocb_timeout(void *data ) { srb_t *sp ; fc_port_t *fcport ; struct srb_iocb *lio ; { sp = (srb_t *)data; fcport = sp->fcport; ql_dbg(268435456U, fcport->vha, 8305, "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n", sp->name, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); fcport->flags = fcport->flags & 4294967287U; if ((unsigned int )sp->type == 1U) { lio = & sp->u.iocb_cmd; qla2x00_post_async_logout_work(fcport->vha, fcport, (uint16_t *)0U); lio->u.logio.data[0] = 16389U; lio->u.logio.data[1] = (unsigned int )lio->u.logio.flags & 1U; qla2x00_post_async_login_done_work(fcport->vha, fcport, (uint16_t *)(& lio->u.logio.data)); } else { } return; } } static void qla2x00_async_login_sp_done(void *data , void *ptr , int res ) { srb_t *sp ; struct srb_iocb *lio ; struct scsi_qla_host *vha ; int tmp ; { sp = (srb_t *)ptr; lio = & sp->u.iocb_cmd; vha = (struct scsi_qla_host *)data; tmp = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp == 0) { qla2x00_post_async_login_done_work((sp->fcport)->vha, sp->fcport, (uint16_t *)(& lio->u.logio.data)); } else { } (*(sp->free))((void *)(sp->fcport)->vha, (void *)sp); return; } } int qla2x00_async_login(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { srb_t *sp ; struct srb_iocb *lio ; int rval ; unsigned long tmp ; { rval = 258; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } sp->type = 1U; sp->name = (char *)"login"; tmp = qla2x00_get_async_timeout(vha); qla2x00_init_timer(sp, tmp + 2UL); lio = & sp->u.iocb_cmd; lio->timeout = & qla2x00_async_iocb_timeout; sp->done = & qla2x00_async_login_sp_done; lio->u.logio.flags = (uint16_t )((unsigned int )lio->u.logio.flags | 2U); if ((int )*(data + 1UL) & 1) { lio->u.logio.flags = (uint16_t )((unsigned int )lio->u.logio.flags | 1U); } else { } rval = qla2x00_start_sp(sp); if (rval != 0) { goto done_free_sp; } else { } ql_dbg(268435456U, vha, 8306, "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x retries=%d.\n", sp->handle, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, fcport->login_retry); return (rval); done_free_sp: (*(sp->free))((void *)fcport->vha, (void *)sp); done: ; return (rval); } } static void qla2x00_async_logout_sp_done(void *data , void *ptr , int res ) { srb_t *sp ; struct srb_iocb *lio ; struct scsi_qla_host *vha ; int tmp ; { sp = (srb_t *)ptr; lio = & sp->u.iocb_cmd; vha = (struct scsi_qla_host *)data; tmp = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp == 0) { qla2x00_post_async_logout_done_work((sp->fcport)->vha, sp->fcport, (uint16_t *)(& lio->u.logio.data)); } else { } (*(sp->free))((void *)(sp->fcport)->vha, (void *)sp); return; } } int qla2x00_async_logout(struct scsi_qla_host *vha , fc_port_t *fcport ) { srb_t *sp ; struct srb_iocb *lio ; int rval ; unsigned long tmp ; { rval = 258; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } sp->type = 2U; sp->name = (char *)"logout"; tmp = qla2x00_get_async_timeout(vha); qla2x00_init_timer(sp, tmp + 2UL); lio = & sp->u.iocb_cmd; lio->timeout = & qla2x00_async_iocb_timeout; sp->done = & qla2x00_async_logout_sp_done; rval = qla2x00_start_sp(sp); if (rval != 0) { goto done_free_sp; } else { } ql_dbg(268435456U, vha, 8304, "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", sp->handle, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); return (rval); done_free_sp: (*(sp->free))((void *)fcport->vha, (void *)sp); done: ; return (rval); } } static void qla2x00_async_adisc_sp_done(void *data , void *ptr , int res ) { srb_t *sp ; struct srb_iocb *lio ; struct scsi_qla_host *vha ; int tmp ; { sp = (srb_t *)ptr; lio = & sp->u.iocb_cmd; vha = (struct scsi_qla_host *)data; tmp = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp == 0) { qla2x00_post_async_adisc_done_work((sp->fcport)->vha, sp->fcport, (uint16_t *)(& lio->u.logio.data)); } else { } (*(sp->free))((void *)(sp->fcport)->vha, (void *)sp); return; } } int qla2x00_async_adisc(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { srb_t *sp ; struct srb_iocb *lio ; int rval ; unsigned long tmp ; { rval = 258; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } sp->type = 6U; sp->name = (char *)"adisc"; tmp = qla2x00_get_async_timeout(vha); qla2x00_init_timer(sp, tmp + 2UL); lio = & sp->u.iocb_cmd; lio->timeout = & qla2x00_async_iocb_timeout; sp->done = & qla2x00_async_adisc_sp_done; if ((int )*(data + 1UL) & 1) { lio->u.logio.flags = (uint16_t )((unsigned int )lio->u.logio.flags | 1U); } else { } rval = qla2x00_start_sp(sp); if (rval != 0) { goto done_free_sp; } else { } ql_dbg(268435456U, vha, 8303, "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n", sp->handle, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); return (rval); done_free_sp: (*(sp->free))((void *)fcport->vha, (void *)sp); done: ; return (rval); } } static void qla2x00_async_tm_cmd_done(void *data , void *ptr , int res ) { srb_t *sp ; struct srb_iocb *iocb ; struct scsi_qla_host *vha ; uint32_t flags ; uint16_t lun ; int rval ; int tmp ; { sp = (srb_t *)ptr; iocb = & sp->u.iocb_cmd; vha = (struct scsi_qla_host *)data; tmp = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp == 0) { flags = iocb->u.tmf.flags; lun = (unsigned short )iocb->u.tmf.lun; rval = qla2x00_marker(vha, *((vha->hw)->req_q_map), *((vha->hw)->rsp_q_map), (int )(sp->fcport)->loop_id, (int )lun, flags != 16U); if (rval != 0 || iocb->u.tmf.data != 0U) { ql_dbg(4194304U, vha, 32816, "TM IOCB failed (%x).\n", rval); } else { } } else { } (*(sp->free))((void *)(sp->fcport)->vha, (void *)sp); return; } } int qla2x00_async_tm_cmd(fc_port_t *fcport , uint32_t tm_flags , uint32_t lun , uint32_t tag ) { struct scsi_qla_host *vha ; srb_t *sp ; struct srb_iocb *tcf ; int rval ; unsigned long tmp ; { vha = fcport->vha; rval = 258; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } sp->type = 7U; sp->name = (char *)"tmf"; tmp = qla2x00_get_async_timeout(vha); qla2x00_init_timer(sp, tmp + 2UL); tcf = & sp->u.iocb_cmd; tcf->u.tmf.flags = tm_flags; tcf->u.tmf.lun = lun; tcf->u.tmf.data = tag; tcf->timeout = & qla2x00_async_iocb_timeout; sp->done = & qla2x00_async_tm_cmd_done; rval = qla2x00_start_sp(sp); if (rval != 0) { goto done_free_sp; } else { } ql_dbg(4194304U, vha, 32815, "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", sp->handle, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); return (rval); done_free_sp: (*(sp->free))((void *)fcport->vha, (void *)sp); done: ; return (rval); } } void qla2x00_async_login_done(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { int rval ; { switch ((int )*data) { case 16384: rval = qla2x00_get_port_database(vha, fcport, 0); if (rval == 10) { fcport->flags = fcport->flags & 4294967287U; fcport->flags = fcport->flags | 2U; set_bit(8L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_60865; } else { } if (rval != 0) { qla2x00_post_async_logout_work(vha, fcport, (uint16_t *)0U); qla2x00_post_async_login_work(vha, fcport, (uint16_t *)0U); goto ldv_60865; } else { } if ((fcport->flags & 4U) != 0U) { qla2x00_post_async_adisc_work(vha, fcport, data); goto ldv_60865; } else { } qla2x00_update_fcport(vha, fcport); goto ldv_60865; case 16389: fcport->flags = fcport->flags & 4294967287U; if ((int )*(data + 1UL) & 1) { set_bit(8L, (unsigned long volatile *)(& vha->dpc_flags)); } else { qla2x00_mark_device_lost(vha, fcport, 1, 0); } goto ldv_60865; case 16391: fcport->loop_id = *(data + 1UL); qla2x00_post_async_logout_work(vha, fcport, (uint16_t *)0U); qla2x00_post_async_login_work(vha, fcport, (uint16_t *)0U); goto ldv_60865; case 16392: fcport->loop_id = (uint16_t )((int )fcport->loop_id + 1); rval = qla2x00_find_new_loop_id(vha, fcport); if (rval != 0) { fcport->flags = fcport->flags & 4294967287U; qla2x00_mark_device_lost(vha, fcport, 1, 0); goto ldv_60865; } else { } qla2x00_post_async_login_work(vha, fcport, (uint16_t *)0U); goto ldv_60865; } ldv_60865: ; return; } } void qla2x00_async_logout_done(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { { qla2x00_mark_device_lost(vha, fcport, 1, 0); return; } } void qla2x00_async_adisc_done(struct scsi_qla_host *vha , fc_port_t *fcport , uint16_t *data ) { { if ((unsigned int )*data == 16384U) { qla2x00_update_fcport(vha, fcport); return; } else { } fcport->flags = fcport->flags & 4294967287U; if ((int )*(data + 1UL) & 1) { set_bit(8L, (unsigned long volatile *)(& vha->dpc_flags)); } else { qla2x00_mark_device_lost(vha, fcport, 1, 0); } return; } } static int qla83xx_nic_core_fw_load(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; uint32_t idc_major_ver ; uint32_t idc_minor_ver ; uint16_t config[4U] ; int tmp ; int tmp___0 ; { rval = 0; ha = vha->hw; qla83xx_idc_lock(vha, 0); ha->fcoe_dev_init_timeout = 30U; ha->fcoe_reset_timeout = 10U; tmp = __qla83xx_set_drv_presence(vha); if (tmp != 0) { ql_dbg(524288U, vha, 45175, "Error while setting DRV-Presence.\n"); rval = 258; goto exit; } else { } qla83xx_reset_ownership(vha); qla83xx_rd_reg(vha, 571483008U, & idc_major_ver); if (*((unsigned long *)ha + 2UL) != 0UL) { idc_major_ver = 1U; qla83xx_wr_reg(vha, 571483008U, idc_major_ver); qla83xx_wr_reg(vha, 571483036U, 0U); } else if (idc_major_ver != 1U) { ql_log(1U, vha, 45181, "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n", idc_major_ver, 1); __qla83xx_clear_drv_presence(vha); rval = 258; goto exit; } else { } qla83xx_rd_reg(vha, 571483032U, & idc_minor_ver); idc_minor_ver = idc_minor_ver; qla83xx_wr_reg(vha, 571483032U, idc_minor_ver); if (*((unsigned long *)ha + 2UL) != 0UL) { memset((void *)(& config), 0, 8UL); tmp___0 = qla81xx_get_port_config(vha, (uint16_t *)(& config)); if (tmp___0 == 0) { qla83xx_wr_reg(vha, 571483012U, 3U); } else { } } else { } rval = qla83xx_idc_state_handler(vha); exit: qla83xx_idc_unlock(vha, 0); return (rval); } } int qla2x00_initialize_adapter(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; struct req_que *req ; int tmp ; bool tmp___0 ; { ha = vha->hw; req = *(ha->req_q_map); vha->flags.online = 0U; ha->flags.chip_reset_done = 0U; vha->flags.reset_active = 0U; ha->flags.pci_channel_io_perm_failure = 0U; ha->flags.eeh_busy = 0U; vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); atomic_set(& vha->loop_down_timer, 255); atomic_set(& vha->loop_state, 2); vha->device_flags = 2U; vha->dpc_flags = 0UL; vha->flags.management_server_logged_in = 0U; vha->marker_needed = 0U; ha->isp_abort_cnt = 0U; ha->beacon_blink_led = 0U; set_bit(0L, (unsigned long volatile *)(& ha->req_qid_map)); set_bit(0L, (unsigned long volatile *)(& ha->rsp_qid_map)); ql_dbg(1073741824U, vha, 64, "Configuring PCI space...\n"); rval = (*((ha->isp_ops)->pci_config))(vha); if (rval != 0) { ql_log(1U, vha, 68, "Unable to configure PCI space.\n"); return (rval); } else { } (*((ha->isp_ops)->reset_chip))(vha); rval = qla2xxx_get_flash_info(vha); if (rval != 0) { ql_log(0U, vha, 79, "Unable to validate FLASH data.\n"); return (rval); } else { } if ((ha->device_type & 262144U) != 0U) { qla8044_read_reset_template(vha); if (ql2xdontresethba == 1) { qla8044_set_idc_dontreset(vha); } else { } } else { } (*((ha->isp_ops)->get_flash_version))(vha, (void *)req->ring); ql_dbg(1073741824U, vha, 97, "Configure NVRAM parameters...\n"); (*((ha->isp_ops)->nvram_config))(vha); if (*((unsigned long *)ha + 2UL) != 0UL) { ql_log(2U, vha, 119, "Masking HBA WWPN %8phN (via NVRAM).\n", (uint8_t *)(& vha->port_name)); return (258); } else { } ql_dbg(1073741824U, vha, 120, "Verifying loaded RISC code...\n"); tmp = qla2x00_isp_firmware(vha); if (tmp != 0) { rval = (*((ha->isp_ops)->chip_diag))(vha); if (rval != 0) { return (rval); } else { } rval = qla2x00_setup_chip(vha); if (rval != 0) { return (rval); } else { } } else { } if ((ha->device_type & 4096U) != 0U) { ha->cs84xx = qla84xx_get_chip(vha); if ((unsigned long )ha->cs84xx == (unsigned long )((struct qla_chip_state_84xx *)0)) { ql_log(1U, vha, 208, "Unable to configure ISP84XX.\n"); return (258); } else { } } else { } tmp___0 = qla_ini_mode_enabled(vha); if ((int )tmp___0) { rval = qla2x00_init_rings(vha); } else { } ha->flags.chip_reset_done = 1U; if (rval == 0 && (ha->device_type & 4096U) != 0U) { rval = qla84xx_init_chip(vha); if (rval != 0) { ql_log(1U, vha, 212, "Unable to initialize ISP84XX.\n"); qla84xx_put_chip(vha); } else { } } else { } if ((ha->device_type & 65536U) != 0U) { rval = qla83xx_nic_core_fw_load(vha); if (rval != 0) { ql_log(1U, vha, 292, "Error in initializing NIC Core f/w.\n"); } else { } } else { } if (((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) { qla24xx_read_fcp_prio_cfg(vha); } else { } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { qla82xx_set_driver_version(vha, (char *)"8.06.00.08-k"); } else { qla25xx_set_driver_version(vha, (char *)"8.06.00.08-k"); } return (rval); } } int qla2100_pci_config(struct scsi_qla_host *vha ) { uint16_t w ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; unsigned short tmp___0 ; { ha = vha->hw; reg = & (ha->iobase)->isp; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & w); w = (uint16_t )((unsigned int )w | 320U); pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )w); pci_disable_rom(ha->pdev); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = readw((void const volatile *)(& reg->ctrl_status)); ha->pci_attr = (uint32_t )tmp___0; spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); } } int qla2300_pci_config(struct scsi_qla_host *vha ) { uint16_t w ; unsigned long flags ; uint32_t cnt ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; unsigned short tmp___0 ; unsigned short tmp___1 ; raw_spinlock_t *tmp___2 ; unsigned short tmp___3 ; { flags = 0UL; ha = vha->hw; reg = & (ha->iobase)->isp; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & w); w = (uint16_t )((unsigned int )w | 320U); if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { w = (unsigned int )w & 64511U; } else { } pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )w); if ((ha->device_type & 4U) != 0U) { tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writew(8192, (void volatile *)(& reg->hccr)); cnt = 0U; goto ldv_60917; ldv_60916: tmp___0 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___0 & 32) != 0) { goto ldv_60915; } else { } __const_udelay(42950UL); cnt = cnt + 1U; ldv_60917: ; if (cnt <= 29999U) { goto ldv_60916; } else { } ldv_60915: writew(32, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); ha->fb_rev = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->fb_cmd_2100 : & reg->u.isp2300.fb_cmd)); if ((unsigned int )ha->fb_rev == 6U) { pci_clear_mwi(ha->pdev); } else { } writew(0, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); writew(12288, (void volatile *)(& reg->hccr)); cnt = 0U; goto ldv_60920; ldv_60919: tmp___1 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___1 & 32) == 0) { goto ldv_60918; } else { } __const_udelay(42950UL); cnt = cnt + 1U; ldv_60920: ; if (cnt <= 29999U) { goto ldv_60919; } else { } ldv_60918: spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } pci_write_config_byte((struct pci_dev const *)ha->pdev, 13, 128); pci_disable_rom(ha->pdev); tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); tmp___3 = readw((void const volatile *)(& reg->ctrl_status)); ha->pci_attr = (uint32_t )tmp___3; spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); } } int qla24xx_pci_config(scsi_qla_host_t *vha ) { uint16_t w ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; int tmp ; bool tmp___0 ; raw_spinlock_t *tmp___1 ; { flags = 0UL; ha = vha->hw; reg = & (ha->iobase)->isp24; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & w); w = (uint16_t )((unsigned int )w | 320U); w = (unsigned int )w & 64511U; pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )w); pci_write_config_byte((struct pci_dev const *)ha->pdev, 13, 128); tmp = pci_find_capability(ha->pdev, 7); if (tmp != 0) { pcix_set_mmrbc(ha->pdev, 2048); } else { } tmp___0 = pci_is_pcie(ha->pdev); if ((int )tmp___0) { pcie_set_readrq(ha->pdev, 4096); } else { } pci_disable_rom(ha->pdev); ha->chip_revision = (uint16_t )(ha->pdev)->revision; tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); ha->pci_attr = readl((void const volatile *)(& reg->ctrl_status)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); } } int qla25xx_pci_config(scsi_qla_host_t *vha ) { uint16_t w ; struct qla_hw_data *ha ; bool tmp ; { ha = vha->hw; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & w); w = (uint16_t )((unsigned int )w | 320U); w = (unsigned int )w & 64511U; pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )w); tmp = pci_is_pcie(ha->pdev); if ((int )tmp) { pcie_set_readrq(ha->pdev, 4096); } else { } pci_disable_rom(ha->pdev); ha->chip_revision = (uint16_t )(ha->pdev)->revision; return (0); } } static int qla2x00_isp_firmware(scsi_qla_host_t *vha ) { int rval ; uint16_t loop_id ; uint16_t topo ; uint16_t sw_cap ; uint8_t domain ; uint8_t area ; uint8_t al_pa ; struct qla_hw_data *ha ; { ha = vha->hw; rval = 258; if (*((unsigned long *)ha + 2UL) != 0UL) { ql_log(2U, vha, 121, "RISC CODE NOT loaded.\n"); rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); if (rval == 0) { rval = qla2x00_get_adapter_id(vha, & loop_id, & al_pa, & area, & domain, & topo, & sw_cap); } else { } } else { } if (rval != 0) { ql_dbg(1073741824U, vha, 122, "**** Load RISC code ****.\n"); } else { } return (rval); } } void qla2x00_reset_chip(struct scsi_qla_host *vha ) { unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; uint32_t cnt ; uint16_t cmd ; int tmp ; long tmp___0 ; raw_spinlock_t *tmp___1 ; unsigned short tmp___2 ; unsigned short tmp___3 ; unsigned short tmp___4 ; unsigned short tmp___5 ; { flags = 0UL; ha = vha->hw; reg = & (ha->iobase)->isp; tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return; } else { } (*((ha->isp_ops)->disable_intrs))(ha); tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); cmd = 0U; pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & cmd); cmd = (unsigned int )cmd & 65531U; pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )cmd); if ((ha->device_type & 1U) == 0U) { writew(8192, (void volatile *)(& reg->hccr)); if ((ha->device_type & 2U) != 0U || (ha->device_type & 4U) != 0U) { cnt = 0U; goto ldv_60963; ldv_60962: tmp___2 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___2 & 32) != 0) { goto ldv_60961; } else { } __const_udelay(429500UL); cnt = cnt + 1U; ldv_60963: ; if (cnt <= 29999U) { goto ldv_60962; } else { } ldv_60961: ; } else { readw((void const volatile *)(& reg->hccr)); __const_udelay(42950UL); } writew(32, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); writew(256, (void volatile *)(& reg->fpm_diag_config)); readw((void const volatile *)(& reg->fpm_diag_config)); if ((ha->device_type & 2U) == 0U) { writew(0, (void volatile *)(& reg->fpm_diag_config)); readw((void const volatile *)(& reg->fpm_diag_config)); } else { } writew(16, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); if ((ha->device_type & 2U) != 0U) { writew(40960, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->fb_cmd_2100 : & reg->u.isp2300.fb_cmd)); readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->fb_cmd_2100 : & reg->u.isp2300.fb_cmd)); } else { writew(252, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->fb_cmd_2100 : & reg->u.isp2300.fb_cmd)); cnt = 0U; goto ldv_60966; ldv_60965: tmp___3 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->fb_cmd_2100 : & reg->u.isp2300.fb_cmd)); if (((int )tmp___3 & 255) == 0) { goto ldv_60964; } else { } __const_udelay(429500UL); cnt = cnt + 1U; ldv_60966: ; if (cnt <= 2999U) { goto ldv_60965; } else { } ldv_60964: ; } writew(0, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); writew(4096, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); writew(12288, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); } else { } writew(28672, (void volatile *)(& reg->hccr)); writew(24576, (void volatile *)(& reg->hccr)); writew(1, (void volatile *)(& reg->ctrl_status)); if (((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) || (ha->device_type & 4U) != 0U) { __const_udelay(85900UL); cnt = 30000U; goto ldv_60969; ldv_60968: tmp___4 = readw((void const volatile *)(& reg->ctrl_status)); if (((int )tmp___4 & 1) == 0) { goto ldv_60967; } else { } __const_udelay(429500UL); cnt = cnt - 1U; ldv_60969: ; if (cnt != 0U) { goto ldv_60968; } else { } ldv_60967: ; } else { __const_udelay(42950UL); } writew(4096, (void volatile *)(& reg->hccr)); writew(0, (void volatile *)(& reg->semaphore)); writew(12288, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); if (((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) || (ha->device_type & 4U) != 0U) { cnt = 0U; goto ldv_60972; ldv_60971: tmp___5 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); if ((unsigned int )tmp___5 != 4U) { goto ldv_60970; } else { } __const_udelay(429500UL); cnt = cnt + 1U; ldv_60972: ; if (cnt <= 29999U) { goto ldv_60971; } else { } ldv_60970: ; } else { __const_udelay(429500UL); } cmd = (uint16_t )((unsigned int )cmd | 4U); pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )cmd); if ((ha->device_type & 1U) == 0U) { writew(16385, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static int qla81xx_reset_mpi(scsi_qla_host_t *vha ) { uint16_t mb[4U] ; int tmp ; { mb[0] = 4112U; mb[1] = 0U; mb[2] = 1U; mb[3] = 0U; if (((vha->hw)->device_type & 8192U) == 0U) { return (0); } else { } tmp = qla81xx_write_mpi_register(vha, (uint16_t *)(& mb)); return (tmp); } } __inline static void qla24xx_reset_risc(scsi_qla_host_t *vha ) { unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; uint32_t cnt ; uint32_t d2 ; uint16_t wd ; int abts_cnt ; raw_spinlock_t *tmp ; unsigned int tmp___0 ; unsigned short tmp___1 ; unsigned short tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned short tmp___5 ; unsigned short tmp___6 ; { flags = 0UL; ha = vha->hw; reg = & (ha->iobase)->isp24; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writel(65584U, (void volatile *)(& reg->ctrl_status)); cnt = 0U; goto ldv_60992; ldv_60991: tmp___0 = readl((void const volatile *)(& reg->ctrl_status)); if ((tmp___0 & 131072U) == 0U) { goto ldv_60990; } else { } __const_udelay(42950UL); cnt = cnt + 1U; ldv_60992: ; if (cnt <= 29999U) { goto ldv_60991; } else { } ldv_60990: writel(65585U, (void volatile *)(& reg->ctrl_status)); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & wd); __const_udelay(429500UL); tmp___1 = readw((void const volatile *)(& reg->mailbox0)); d2 = (unsigned int )tmp___1; cnt = 10000U; goto ldv_60994; ldv_60993: __const_udelay(21475UL); tmp___2 = readw((void const volatile *)(& reg->mailbox0)); d2 = (unsigned int )tmp___2; __asm__ volatile ("": : : "memory"); cnt = cnt - 1U; ldv_60994: ; if (cnt != 0U && d2 != 0U) { goto ldv_60993; } else { } d2 = readl((void const volatile *)(& reg->ctrl_status)); cnt = 6000000U; goto ldv_60997; ldv_60996: __const_udelay(21475UL); d2 = readl((void const volatile *)(& reg->ctrl_status)); __asm__ volatile ("": : : "memory"); cnt = cnt - 1U; ldv_60997: ; if (cnt != 0U && (int )d2 & 1) { goto ldv_60996; } else { } tmp___4 = test_and_clear_bit(19L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___4 != 0) { tmp___3 = qla81xx_reset_mpi(vha); if (tmp___3 != 0) { abts_cnt = abts_cnt + 1; if (abts_cnt <= 4) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(19L, (unsigned long volatile *)(& vha->dpc_flags)); } else { abts_cnt = 0; vha->flags.online = 0U; } } else { } } else { } writel(268435456U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); writel(1073741824U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); writel(536870912U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); tmp___5 = readw((void const volatile *)(& reg->mailbox0)); d2 = (unsigned int )tmp___5; cnt = 6000000U; goto ldv_61000; ldv_60999: __const_udelay(21475UL); tmp___6 = readw((void const volatile *)(& reg->mailbox0)); d2 = (unsigned int )tmp___6; __asm__ volatile ("": : : "memory"); cnt = cnt - 1U; ldv_61000: ; if (cnt != 0U && d2 != 0U) { goto ldv_60999; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((((ha->device_type & 2048U) != 0U || (ha->device_type & 8192U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) && *((unsigned long *)ha + 2UL) != 0UL) { (*((ha->isp_ops)->enable_intrs))(ha); } else { } return; } } static void qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha , uint32_t *data ) { struct device_reg_24xx *reg ; { reg = & ((vha->hw)->iobase)->isp24; writel(28688U, (void volatile *)(& reg->iobase_addr)); *data = readl((void const volatile *)(& reg->iobase_window) + 6U); return; } } static void qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha , uint32_t data ) { struct device_reg_24xx *reg ; { reg = & ((vha->hw)->iobase)->isp24; writel(28688U, (void volatile *)(& reg->iobase_addr)); writel(data, (void volatile *)(& reg->iobase_window) + 6U); return; } } static void qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t wd32 ; uint delta_msec ; uint elapsed_msec ; uint timeout_msec ; ulong n ; ulong tmp ; ulong tmp___0 ; { ha = vha->hw; wd32 = 0U; delta_msec = 100U; elapsed_msec = 0U; if ((ha->device_type & 2048U) == 0U && (ha->device_type & 32768U) == 0U) { return; } else { } attempt: timeout_msec = 2500U; n = (ulong )(timeout_msec / delta_msec); goto ldv_61025; ldv_61024: qla25xx_write_risc_sema_reg(vha, 65537U); qla25xx_read_risc_sema_reg(vha, & wd32); if ((int )wd32 & 1) { goto ldv_61022; } else { } msleep(delta_msec); elapsed_msec = elapsed_msec + delta_msec; if (elapsed_msec > 4500U) { goto force; } else { } ldv_61025: tmp = n; n = n - 1UL; if (tmp != 0UL) { goto ldv_61024; } else { } ldv_61022: ; if (((unsigned long )wd32 & 1UL) == 0UL) { goto force; } else { } if (((unsigned long )wd32 & 32768UL) == 0UL) { goto acquired; } else { } qla25xx_write_risc_sema_reg(vha, 65536U); timeout_msec = 2000U; n = (ulong )(timeout_msec / delta_msec); goto ldv_61029; ldv_61028: qla25xx_read_risc_sema_reg(vha, & wd32); if (((unsigned long )wd32 & 32768UL) == 0UL) { goto ldv_61027; } else { } msleep(delta_msec); elapsed_msec = elapsed_msec + delta_msec; if (elapsed_msec > 4500U) { goto force; } else { } ldv_61029: tmp___0 = n; n = n - 1UL; if (tmp___0 != 0UL) { goto ldv_61028; } else { } ldv_61027: ; if (((unsigned long )wd32 & 32768UL) != 0UL) { qla25xx_write_risc_sema_reg(vha, 2147483648U); } else { } goto attempt; force: qla25xx_write_risc_sema_reg(vha, 2147516416U); acquired: ; return; } } void qla24xx_reset_chip(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = pci_channel_offline(ha->pdev); if (tmp != 0 && *((unsigned long *)ha + 2UL) != 0UL) { return; } else { } (*((ha->isp_ops)->disable_intrs))(ha); qla25xx_manipulate_risc_semaphore(vha); qla24xx_reset_risc(vha); return; } } int qla2x00_chip_diag(struct scsi_qla_host *vha ) { int rval ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; unsigned long flags ; uint16_t data ; uint32_t cnt ; uint16_t mb[5U] ; struct req_que *req ; raw_spinlock_t *tmp ; unsigned short tmp___0 ; raw_spinlock_t *tmp___1 ; { ha = vha->hw; reg = & (ha->iobase)->isp; flags = 0UL; req = *(ha->req_q_map); rval = 258; ql_dbg(1073741824U, vha, 123, "Testing device at %lx.\n", (unsigned long )(& reg->flash_address)); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writew(1, (void volatile *)(& reg->ctrl_status)); __const_udelay(85900UL); data = qla2x00_debounce_register((uint16_t volatile *)(& reg->ctrl_status)); cnt = 6000000U; goto ldv_61049; ldv_61048: __const_udelay(21475UL); data = readw((void const volatile *)(& reg->ctrl_status)); __asm__ volatile ("": : : "memory"); cnt = cnt - 1U; ldv_61049: ; if (cnt != 0U && (int )data & 1) { goto ldv_61048; } else { } if (cnt == 0U) { goto chip_diag_failed; } else { } ql_dbg(1073741824U, vha, 124, "Reset register cleared by chip reset.\n"); writew(4096, (void volatile *)(& reg->hccr)); writew(12288, (void volatile *)(& reg->hccr)); if (((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) || (ha->device_type & 4U) != 0U) { data = qla2x00_debounce_register((uint16_t volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); cnt = 6000000U; goto ldv_61053; ldv_61052: __const_udelay(21475UL); data = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); __asm__ volatile ("": : : "memory"); cnt = cnt - 1U; ldv_61053: ; if (cnt != 0U && (unsigned int )data == 4U) { goto ldv_61052; } else { } } else { __const_udelay(42950UL); } if (cnt == 0U) { goto chip_diag_failed; } else { } ql_dbg(1073741824U, vha, 125, "Checking product Id of chip.\n"); mb[1] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 1U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 1U); mb[2] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 2U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 2U); mb[3] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 3U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 3U); mb[4] = qla2x00_debounce_register((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (uint16_t volatile *)(& reg->u.isp2100.mailbox0) + 4U : (uint16_t volatile *)(& reg->u.isp2300.mailbox0) + 4U); if (((unsigned int )mb[1] != 18771U || ((unsigned int )mb[2] != 0U && (unsigned int )mb[2] != 20512U)) || (unsigned int )mb[3] != 8224U) { ql_log(1U, vha, 98, "Wrong product ID = 0x%x,0x%x,0x%x.\n", (int )mb[1], (int )mb[2], (int )mb[3]); goto chip_diag_failed; } else { } ha->product_id[0] = mb[1]; ha->product_id[1] = mb[2]; ha->product_id[2] = mb[3]; ha->product_id[3] = mb[4]; if ((unsigned int )req->length > 1024U) { ha->fw_transfer_size = 65536U; } else { ha->fw_transfer_size = (uint32_t )req->length * 64U; } if ((ha->device_type & 2U) != 0U) { tmp___0 = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 7U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 7U); if ((unsigned int )tmp___0 == 4U) { ql_dbg(1073741824U, vha, 126, "Found QLA2200A Chip.\n"); ha->device_type = ha->device_type | 1073741824U; ha->fw_transfer_size = 128U; } else { } } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); ql_dbg(1073741824U, vha, 127, "Checking mailboxes.\n"); rval = qla2x00_mbx_reg_test(vha); if (rval != 0) { ql_log(1U, vha, 128, "Failed mailbox send register test.\n"); } else { rval = 0; } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); chip_diag_failed: ; if (rval != 0) { ql_log(2U, vha, 129, "Chip diagnostics **** FAILED ****.\n"); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (rval); } } int qla24xx_chip_diag(struct scsi_qla_host *vha ) { int rval ; struct qla_hw_data *ha ; struct req_que *req ; { ha = vha->hw; req = *(ha->req_q_map); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return (0); } else { } ha->fw_transfer_size = (uint32_t )req->length * 64U; rval = qla2x00_mbx_reg_test(vha); if (rval != 0) { ql_log(1U, vha, 130, "Failed mailbox send register test.\n"); } else { rval = 0; } return (rval); } } void qla2x00_alloc_fw_dump(scsi_qla_host_t *vha ) { int rval ; uint32_t dump_size ; uint32_t fixed_size ; uint32_t mem_size ; uint32_t req_q_size ; uint32_t rsp_q_size ; uint32_t eft_size ; uint32_t fce_size ; uint32_t mq_size ; dma_addr_t tc_dma ; void *tc ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; void *tmp ; __u32 tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; __u32 tmp___5 ; __u32 tmp___6 ; { ha = vha->hw; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); if ((unsigned long )ha->fw_dump != (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_dbg(1073741824U, vha, 189, "Firmware dump already allocated.\n"); return; } else { } ha->fw_dumped = 0; mq_size = 0U; fce_size = mq_size; eft_size = fce_size; mem_size = eft_size; fixed_size = mem_size; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { fixed_size = 123634U; } else if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { fixed_size = 136098U; mem_size = (ha->fw_memory_size + 2147414017U) * 2U; } else if ((ha->device_type & 134217728U) != 0U) { if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { fixed_size = 51192U; } else if ((ha->device_type & 8192U) != 0U) { fixed_size = 39416U; } else if ((ha->device_type & 2048U) != 0U) { fixed_size = 39224U; } else { fixed_size = 37972U; } mem_size = (ha->fw_memory_size + 1072693249U) * 4U; if ((unsigned int )ha->mqenable != 0U) { if ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) { mq_size = 524U; } else { } mq_size = ((uint32_t )ha->max_req_queues * (uint32_t )req->length) * 64U + mq_size; mq_size = ((uint32_t )ha->max_rsp_queues * (uint32_t )rsp->length) * 64U + mq_size; } else { } if ((unsigned long )ha->tgt.atio_ring != (unsigned long )((struct atio *)0)) { mq_size = (uint32_t )ha->tgt.atio_q_length * 64U + mq_size; } else { } if (((ha->device_type & 2048U) == 0U && (ha->device_type & 8192U) == 0U) && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) { goto try_eft; } else { } tc = dma_alloc_attrs(& (ha->pdev)->dev, 65536UL, & tc_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )tc == (unsigned long )((void *)0)) { ql_log(1U, vha, 190, "Unable to allocate (%d KB) for FCE.\n", 64); goto try_eft; } else { } memset(tc, 0, 65536UL); rval = qla2x00_enable_fce_trace(vha, tc_dma, 64, (uint16_t *)(& ha->fce_mb), & ha->fce_bufs); if (rval != 0) { ql_log(1U, vha, 191, "Unable to initialize FCE (%d).\n", rval); dma_free_attrs(& (ha->pdev)->dev, 65536UL, tc, tc_dma, (struct dma_attrs *)0); ha->flags.fce_enabled = 0U; goto try_eft; } else { } ql_dbg(1073741824U, vha, 192, "Allocate (%d KB) for FCE...\n", 64); fce_size = 65588U; ha->flags.fce_enabled = 1U; ha->fce_dma = tc_dma; ha->fce = tc; try_eft: tc = dma_alloc_attrs(& (ha->pdev)->dev, 65536UL, & tc_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )tc == (unsigned long )((void *)0)) { ql_log(1U, vha, 193, "Unable to allocate (%d KB) for EFT.\n", 64); goto cont_alloc; } else { } memset(tc, 0, 65536UL); rval = qla2x00_enable_eft_trace(vha, tc_dma, 4); if (rval != 0) { ql_log(1U, vha, 194, "Unable to initialize EFT (%d).\n", rval); dma_free_attrs(& (ha->pdev)->dev, 65536UL, tc, tc_dma, (struct dma_attrs *)0); goto cont_alloc; } else { } ql_dbg(1073741824U, vha, 195, "Allocated (%d KB) EFT ...\n", 64); eft_size = 65536U; ha->eft_dma = tc_dma; ha->eft = tc; } else { } cont_alloc: req_q_size = (uint32_t )req->length * 64U; rsp_q_size = (uint32_t )rsp->length * 64U; dump_size = 72U; dump_size = ((((fixed_size + mem_size) + req_q_size) + rsp_q_size) + eft_size) + dump_size; ha->chain_offset = dump_size; dump_size = (mq_size + fce_size) + dump_size; tmp = vmalloc((unsigned long )dump_size); ha->fw_dump = (struct qla2xxx_fw_dump *)tmp; if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 196, "Unable to allocate (%d KB) for firmware dump.\n", dump_size / 1024U); if ((unsigned long )ha->fce != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 65536UL, ha->fce, ha->fce_dma, (struct dma_attrs *)0); ha->fce = (void *)0; ha->fce_dma = 0ULL; } else { } if ((unsigned long )ha->eft != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )eft_size, ha->eft, ha->eft_dma, (struct dma_attrs *)0); ha->eft = (void *)0; ha->eft_dma = 0ULL; } else { } return; } else { } ql_dbg(1073741824U, vha, 197, "Allocated (%d KB) for firmware dump.\n", dump_size / 1024U); ha->fw_dump_len = dump_size; (ha->fw_dump)->signature[0] = 81U; (ha->fw_dump)->signature[1] = 76U; (ha->fw_dump)->signature[2] = 71U; (ha->fw_dump)->signature[3] = 67U; (ha->fw_dump)->version = 16777216U; tmp___0 = __fswab32(fixed_size); (ha->fw_dump)->fixed_size = tmp___0; tmp___1 = __fswab32(mem_size); (ha->fw_dump)->mem_size = tmp___1; tmp___2 = __fswab32(req_q_size); (ha->fw_dump)->req_q_size = tmp___2; tmp___3 = __fswab32(rsp_q_size); (ha->fw_dump)->rsp_q_size = tmp___3; tmp___4 = __fswab32(eft_size); (ha->fw_dump)->eft_size = tmp___4; tmp___5 = __fswab32((unsigned int )ha->eft_dma); (ha->fw_dump)->eft_addr_l = tmp___5; tmp___6 = __fswab32((unsigned int )(ha->eft_dma >> 32ULL)); (ha->fw_dump)->eft_addr_h = tmp___6; (ha->fw_dump)->header_size = 1207959552U; return; } } static int qla81xx_mpi_sync(scsi_qla_host_t *vha ) { int rval ; uint16_t dc ; uint32_t dw ; { if (((vha->hw)->device_type & 8192U) == 0U) { return (0); } else { } rval = qla2x00_write_ram_word(vha, 31744U, 1U); if (rval != 0) { ql_log(1U, vha, 261, "Unable to acquire semaphore.\n"); goto done; } else { } pci_read_config_word((struct pci_dev const *)(vha->hw)->pdev, 84, & dc); rval = qla2x00_read_ram_word(vha, 31253U, & dw); if (rval != 0) { ql_log(1U, vha, 103, "Unable to read sync.\n"); goto done_release; } else { } dc = (unsigned int )dc & 224U; if ((uint32_t )dc == (dw & 224U)) { goto done_release; } else { } dw = dw & 4294967071U; dw = (uint32_t )dc | dw; rval = qla2x00_write_ram_word(vha, 31253U, dw); if (rval != 0) { ql_log(1U, vha, 276, "Unable to gain sync.\n"); } else { } done_release: rval = qla2x00_write_ram_word(vha, 31744U, 0U); if (rval != 0) { ql_log(1U, vha, 109, "Unable to release semaphore.\n"); } else { } done: ; return (rval); } } int qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha , struct req_que *req ) { void *tmp ; void *tmp___0 ; { if ((unsigned long )req->outstanding_cmds != (unsigned long )((srb_t **)0)) { return (0); } else { } if ((ha->device_type & 134217728U) == 0U || ((unsigned long )ha->mqiobase != (unsigned long )((device_reg_t *)0) && (ql2xmultique_tag != 0 || ql2xmaxqueues > 1))) { req->num_outstanding_cmds = 1024U; } else if ((int )ha->fw_xcb_count <= (int )ha->fw_iocb_count) { req->num_outstanding_cmds = ha->fw_xcb_count; } else { req->num_outstanding_cmds = ha->fw_iocb_count; } tmp = kzalloc((unsigned long )req->num_outstanding_cmds * 8UL, 208U); req->outstanding_cmds = (srb_t **)tmp; if ((unsigned long )req->outstanding_cmds == (unsigned long )((srb_t **)0)) { req->num_outstanding_cmds = 128U; tmp___0 = kzalloc((unsigned long )req->num_outstanding_cmds * 8UL, 208U); req->outstanding_cmds = (srb_t **)tmp___0; if ((unsigned long )req->outstanding_cmds == (unsigned long )((srb_t **)0)) { ql_log(0U, (scsi_qla_host_t *)0, 294, "Failed to allocate memory for outstanding_cmds for req_que %p.\n", req); req->num_outstanding_cmds = 0U; return (258); } else { } } else { } return (0); } } static int qla2x00_setup_chip(scsi_qla_host_t *vha ) { int rval ; uint32_t srisc_address ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; unsigned long flags ; uint16_t fw_major_version ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; uint32_t size ; { srisc_address = 0U; ha = vha->hw; reg = & (ha->iobase)->isp; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { rval = (*((ha->isp_ops)->load_risc))(vha, & srisc_address); if (rval == 0) { qla2x00_stop_firmware(vha); goto enable_82xx_npiv; } else { goto failed; } } else { } if (((ha->device_type & 134217728U) == 0U && (ha->device_type & 1U) == 0U) && (ha->device_type & 2U) == 0U) { tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writew(40960, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } qla81xx_mpi_sync(vha); rval = (*((ha->isp_ops)->load_risc))(vha, & srisc_address); if (rval == 0) { ql_dbg(1073741824U, vha, 201, "Verifying Checksum of loaded RISC code.\n"); rval = qla2x00_verify_checksum(vha, srisc_address); if (rval == 0) { ql_dbg(1073741824U, vha, 202, "Starting firmware.\n"); rval = qla2x00_execute_fw(vha, srisc_address); if (rval == 0) { enable_82xx_npiv: fw_major_version = ha->fw_major_version; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { qla82xx_check_md_needed(vha); } else { rval = qla2x00_get_fw_version(vha); } if (rval != 0) { goto failed; } else { } ha->flags.npiv_supported = 0U; if (((((((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) || (ha->device_type & 8192U) != 0U) || (ha->device_type & 16384U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || (ha->device_type & 262144U) != 0U) && ((int )ha->fw_attributes & 4) != 0) { ha->flags.npiv_supported = 1U; if ((unsigned int )ha->max_npiv_vports == 0U || ((unsigned int )((int )ha->max_npiv_vports + 1) & 63U) != 0U) { ha->max_npiv_vports = 63U; } else { } } else { } qla2x00_get_resource_cnts(vha, (uint16_t *)0U, & ha->fw_xcb_count, (uint16_t *)0U, & ha->fw_iocb_count, & ha->max_npiv_vports, (uint16_t *)0U); rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); if (rval != 0) { goto failed; } else { } if (((unsigned int )fw_major_version == 0U && ql2xallocfwdump != 0) && ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U)) { qla2x00_alloc_fw_dump(vha); } else { } } else { } } else { ql_log(0U, vha, 205, "ISP Firmware failed checksum.\n"); goto failed; } } else { goto failed; } if (((ha->device_type & 134217728U) == 0U && (ha->device_type & 1U) == 0U) && (ha->device_type & 2U) == 0U) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); if ((ha->device_type & 4U) != 0U) { writew(40961, (void volatile *)(& reg->hccr)); } else { writew(40967, (void volatile *)(& reg->hccr)); } readw((void const volatile *)(& reg->hccr)); spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { goto skip_fac_check; } else { } if (rval == 0 && ((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U))) { rval = qla81xx_fac_get_sector_size(vha, & size); if (rval == 0) { ha->flags.fac_supported = 1U; ha->fdt_block_size = size << 2; } else { ql_log(1U, vha, 206, "Unsupported FAC firmware (%d.%02d.%02d).\n", (int )ha->fw_major_version, (int )ha->fw_minor_version, (int )ha->fw_subminor_version); skip_fac_check: ; if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { ha->flags.fac_supported = 0U; rval = 0; } else { } } } else { } failed: ; if (rval != 0) { ql_log(0U, vha, 207, "Setup chip ****FAILED****.\n"); } else { } return (rval); } } void qla2x00_init_response_q_entries(struct rsp_que *rsp ) { uint16_t cnt ; response_t *pkt ; { rsp->ring_ptr = rsp->ring; rsp->ring_index = 0U; rsp->status_srb = (srb_t *)0; pkt = rsp->ring_ptr; cnt = 0U; goto ldv_61120; ldv_61119: pkt->signature = 3735936685U; pkt = pkt + 1; cnt = (uint16_t )((int )cnt + 1); ldv_61120: ; if ((int )rsp->length > (int )cnt) { goto ldv_61119; } else { } return; } } void qla2x00_update_fw_options(struct scsi_qla_host *vha ) { uint16_t swing ; uint16_t emphasis ; uint16_t tx_sens ; uint16_t rx_sens ; struct qla_hw_data *ha ; { ha = vha->hw; memset((void *)(& ha->fw_options), 0, 32UL); qla2x00_get_fw_options(vha, (uint16_t *)(& ha->fw_options)); if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { return; } else { } ql_dbg(1073872896U, vha, 277, "Serial link options.\n"); ql_dump_buffer(1073872896U, vha, 265, (uint8_t *)(& ha->fw_seriallink_options), 4U); ha->fw_options[1] = (unsigned int )ha->fw_options[1] & 65279U; if (((int )ha->fw_seriallink_options[3] & 4) != 0) { ha->fw_options[1] = (uint16_t )((unsigned int )ha->fw_options[1] | 256U); swing = (unsigned int )((uint16_t )ha->fw_seriallink_options[2]) & 7U; emphasis = (uint16_t )(((int )ha->fw_seriallink_options[2] & 24) >> 3); tx_sens = (unsigned int )((uint16_t )ha->fw_seriallink_options[0]) & 15U; rx_sens = (uint16_t )((int )ha->fw_seriallink_options[0] >> 4); ha->fw_options[10] = (uint16_t )((int )((short )((int )emphasis << 14)) | (int )((short )((int )swing << 8))); if (((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 32U) != 0U) { if ((unsigned int )rx_sens == 0U) { rx_sens = 3U; } else { } ha->fw_options[10] = (uint16_t )((int )((short )ha->fw_options[10]) | ((int )((short )((int )tx_sens << 4)) | (int )((short )rx_sens))); } else if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { ha->fw_options[10] = (uint16_t )((int )((short )ha->fw_options[10]) | (((int )((short )(((int )rx_sens & 3) << 2)) | 32) | ((int )((short )tx_sens) & 3))); } else { } swing = (uint16_t )((int )ha->fw_seriallink_options[2] >> 5); emphasis = (unsigned int )((uint16_t )ha->fw_seriallink_options[3]) & 3U; tx_sens = (unsigned int )((uint16_t )ha->fw_seriallink_options[1]) & 15U; rx_sens = (uint16_t )((int )ha->fw_seriallink_options[1] >> 4); ha->fw_options[11] = (uint16_t )((int )((short )((int )emphasis << 14)) | (int )((short )((int )swing << 8))); if (((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 32U) != 0U) { if ((unsigned int )rx_sens == 0U) { rx_sens = 3U; } else { } ha->fw_options[11] = (uint16_t )((int )((short )ha->fw_options[11]) | ((int )((short )((int )tx_sens << 4)) | (int )((short )rx_sens))); } else if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { ha->fw_options[11] = (uint16_t )((int )((short )ha->fw_options[11]) | (((int )((short )(((int )rx_sens & 3) << 2)) | 32) | ((int )((short )tx_sens) & 3))); } else { } } else { } ha->fw_options[3] = (uint16_t )((unsigned int )ha->fw_options[3] | 8192U); if (*((unsigned long *)ha + 2UL) != 0UL) { ha->fw_options[2] = (uint16_t )((unsigned int )ha->fw_options[2] | 4096U); } else { } if ((ha->device_type & 32U) != 0U) { ha->fw_options[2] = (uint16_t )((unsigned int )ha->fw_options[2] | 8192U); } else { } qla2x00_set_fw_options(vha, (uint16_t *)(& ha->fw_options)); return; } } void qla24xx_update_fw_options(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; { ha = vha->hw; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return; } else { } if (((int )ha->fw_seriallink_options24[0] & 1) == 0) { return; } else { } rval = qla2x00_set_serdes_params(vha, (int )ha->fw_seriallink_options24[1], (int )ha->fw_seriallink_options24[2], (int )ha->fw_seriallink_options24[3]); if (rval != 0) { ql_log(1U, vha, 260, "Unable to update Serial Link options (%x).\n", rval); } else { } return; } } void qla2x00_config_rings(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; struct req_que *req ; struct rsp_que *rsp ; { ha = vha->hw; reg = & (ha->iobase)->isp; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); (ha->init_cb)->request_q_outpointer = 0U; (ha->init_cb)->response_q_inpointer = 0U; (ha->init_cb)->request_q_length = req->length; (ha->init_cb)->response_q_length = rsp->length; (ha->init_cb)->request_q_address[0] = (unsigned int )req->dma; (ha->init_cb)->request_q_address[1] = (unsigned int )(req->dma >> 32ULL); (ha->init_cb)->response_q_address[0] = (unsigned int )rsp->dma; (ha->init_cb)->response_q_address[1] = (unsigned int )(rsp->dma >> 32ULL); writew(0, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox4 : & reg->u.isp2300.req_q_in)); writew(0, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox4 : & reg->u.isp2300.req_q_out)); writew(0, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox5 : & reg->u.isp2300.rsp_q_in)); writew(0, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox5 : & reg->u.isp2300.rsp_q_out)); readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox5 : & reg->u.isp2300.rsp_q_out)); return; } } void qla24xx_config_rings(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; device_reg_t *reg ; struct device_reg_2xxx *ioreg ; struct qla_msix_entry *msix ; struct init_cb_24xx *icb ; uint16_t rid ; struct req_que *req ; struct rsp_que *rsp ; { ha = vha->hw; reg = (unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) ? ha->mqiobase : ha->iobase; ioreg = & (ha->iobase)->isp; rid = 0U; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); icb = (struct init_cb_24xx *)ha->init_cb; icb->request_q_outpointer = 0U; icb->response_q_inpointer = 0U; icb->request_q_length = req->length; icb->response_q_length = rsp->length; icb->request_q_address[0] = (unsigned int )req->dma; icb->request_q_address[1] = (unsigned int )(req->dma >> 32ULL); icb->response_q_address[0] = (unsigned int )rsp->dma; icb->response_q_address[1] = (unsigned int )(rsp->dma >> 32ULL); icb->atio_q_inpointer = 0U; icb->atio_q_length = ha->tgt.atio_q_length; icb->atio_q_address[0] = (unsigned int )ha->tgt.atio_dma; icb->atio_q_address[1] = (unsigned int )(ha->tgt.atio_dma >> 32ULL); if ((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { icb->qos = 5U; icb->rid = rid; if (*((unsigned long *)ha + 2UL) != 0UL) { msix = ha->msix_entries + 1UL; ql_dbg(1073741824U, vha, 253, "Registering vector 0x%x for base que.\n", (int )msix->entry); icb->msix = msix->entry; } else { } if ((unsigned int )((unsigned char )((int )rid >> 8)) != 0U) { icb->firmware_options_2 = icb->firmware_options_2 | 524288U; } else { } if ((unsigned int )((unsigned char )rid) != 0U) { icb->firmware_options_2 = icb->firmware_options_2 | 262144U; } else { } if ((((int )ha->fw_attributes & 64) != 0 && ((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U))) && *((unsigned long *)ha + 2UL) != 0UL) { icb->firmware_options_2 = icb->firmware_options_2 & 4290772991U; ha->flags.disable_msix_handshake = 1U; ql_dbg(1073741824U, vha, 254, "MSIX Handshake Disable Mode turned on.\n"); } else { icb->firmware_options_2 = icb->firmware_options_2 | 4194304U; } icb->firmware_options_2 = icb->firmware_options_2 | 8388608U; writel(0U, (void volatile *)(& reg->isp25mq.req_q_in)); writel(0U, (void volatile *)(& reg->isp25mq.req_q_out)); writel(0U, (void volatile *)(& reg->isp25mq.rsp_q_in)); writel(0U, (void volatile *)(& reg->isp25mq.rsp_q_out)); } else { writel(0U, (void volatile *)(& reg->isp24.req_q_in)); writel(0U, (void volatile *)(& reg->isp24.req_q_out)); writel(0U, (void volatile *)(& reg->isp24.rsp_q_in)); writel(0U, (void volatile *)(& reg->isp24.rsp_q_out)); } qlt_24xx_config_rings(vha); readl((void const volatile *)(& ioreg->hccr)); return; } } int qla2x00_init_rings(scsi_qla_host_t *vha ) { int rval ; unsigned long flags ; int cnt ; int que ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; struct mid_init_cb_24xx *mid_init_cb ; raw_spinlock_t *tmp ; { flags = 0UL; ha = vha->hw; mid_init_cb = (struct mid_init_cb_24xx *)ha->init_cb; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); que = 0; goto ldv_61172; ldv_61171: req = *(ha->req_q_map + (unsigned long )que); if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { goto ldv_61167; } else { } cnt = 1; goto ldv_61169; ldv_61168: *(req->outstanding_cmds + (unsigned long )cnt) = (srb_t *)0; cnt = cnt + 1; ldv_61169: ; if ((int )req->num_outstanding_cmds > cnt) { goto ldv_61168; } else { } req->current_outstanding_cmd = 1U; req->ring_ptr = req->ring; req->ring_index = 0U; req->cnt = req->length; ldv_61167: que = que + 1; ldv_61172: ; if ((int )ha->max_req_queues > que) { goto ldv_61171; } else { } que = 0; goto ldv_61176; ldv_61175: rsp = *(ha->rsp_q_map + (unsigned long )que); if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { goto ldv_61174; } else { } if ((ha->device_type & 131072U) != 0U) { qlafx00_init_response_q_entries(rsp); } else { qla2x00_init_response_q_entries(rsp); } ldv_61174: que = que + 1; ldv_61176: ; if ((int )ha->max_rsp_queues > que) { goto ldv_61175; } else { } ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; ha->tgt.atio_ring_index = 0U; qlt_init_atio_q_entries(vha); (*((ha->isp_ops)->config_rings))(vha); spin_unlock_irqrestore(& ha->hardware_lock, flags); ql_dbg(1073741824U, vha, 209, "Issue init firmware.\n"); if ((ha->device_type & 131072U) != 0U) { rval = qlafx00_init_firmware(vha, (int )((uint16_t )ha->init_cb_size)); goto next_check; } else { } (*((ha->isp_ops)->update_fw_options))(vha); if (*((unsigned long *)ha + 2UL) != 0UL) { if ((unsigned int )ha->operating_mode == 0U && ((((ha->device_type & 8192U) == 0U && (ha->device_type & 16384U) == 0U) && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U)) { ha->max_npiv_vports = 63U; } else { } mid_init_cb->count = ha->max_npiv_vports; } else { } if ((ha->device_type & 134217728U) != 0U) { mid_init_cb->options = 2U; mid_init_cb->init_cb.execution_throttle = ha->fw_xcb_count; } else { } rval = qla2x00_init_firmware(vha, (int )((uint16_t )ha->init_cb_size)); next_check: ; if (rval != 0) { ql_log(0U, vha, 210, "Init Firmware **** FAILED ****.\n"); } else { ql_dbg(1073741824U, vha, 211, "Init Firmware -- success.\n"); } return (rval); } } static int qla2x00_fw_ready(scsi_qla_host_t *vha ) { int rval ; unsigned long wtime ; unsigned long mtime ; unsigned long cs84xx_time ; uint16_t min_wait ; uint16_t wait_time ; uint16_t state[5U] ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = vha->hw; if (((vha->hw)->device_type & 131072U) != 0U) { tmp = qlafx00_fw_ready(vha); return (tmp); } else { } rval = 0; min_wait = 20U; wait_time = (unsigned int )((int )((uint16_t )ha->retry_count) * (int )((uint16_t )ha->login_timeout)) + 5U; if ((int )wait_time < (int )min_wait) { wait_time = min_wait; } else { } mtime = (unsigned long )((int )min_wait * 250) + (unsigned long )jiffies; wtime = (unsigned long )((int )wait_time * 250) + (unsigned long )jiffies; if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(2U, vha, 32798, "Waiting for LIP to complete.\n"); } else { } ldv_61209: memset((void *)(& state), -1, 10UL); rval = qla2x00_get_firmware_state(vha, (uint16_t *)(& state)); if (rval == 0) { if ((unsigned int )state[0] <= 3U) { vha->device_flags = vha->device_flags & 4294967293U; } else { } if ((ha->device_type & 4096U) != 0U && (unsigned int )state[0] != 3U) { ql_dbg(4194304U, vha, 32799, "fw_state=%x 84xx=%x.\n", (int )state[0], (int )state[2]); if (((int )state[2] & 8) != 0 && ((int )state[2] & 16) != 0) { ql_dbg(4194304U, vha, 32808, "Sending verify iocb.\n"); cs84xx_time = jiffies; rval = qla84xx_init_chip(vha); if (rval != 0) { ql_log(1U, vha, 32775, "Init chip failed.\n"); goto ldv_61190; } else { } cs84xx_time = (unsigned long )jiffies - cs84xx_time; wtime = wtime + cs84xx_time; mtime = mtime + cs84xx_time; ql_dbg(4194304U, vha, 32776, "Increasing wait time by %ld. New time %ld.\n", cs84xx_time, wtime); } else { } } else if ((unsigned int )state[0] == 3U) { ql_dbg(4194304U, vha, 32823, "F/W Ready - OK.\n"); qla2x00_get_retry_cnt(vha, & ha->retry_count, & ha->login_timeout, & ha->r_a_tov); rval = 0; goto ldv_61190; } else { } rval = 258; tmp___0 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___0 != 0 && (unsigned int )state[0] != 3U) { if ((long )((unsigned long )jiffies - mtime) >= 0L) { ql_log(2U, vha, 32824, "Cable is unplugged...\n"); vha->device_flags = vha->device_flags | 2U; goto ldv_61190; } else { } } else { } } else if ((long )((unsigned long )jiffies - mtime) >= 0L || *((unsigned long *)ha + 2UL) != 0UL) { goto ldv_61190; } else { } if ((long )((unsigned long )jiffies - wtime) >= 0L) { goto ldv_61190; } else { } msleep(500U); goto ldv_61209; ldv_61190: ql_dbg(4194304U, vha, 32826, "fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n", (int )state[0], (int )state[1], (int )state[2], (int )state[3], (int )state[4], jiffies); if (rval != 0 && (vha->device_flags & 2U) == 0U) { ql_log(1U, vha, 32827, "Firmware ready **** FAILED ****.\n"); } else { } return (rval); } } static int qla2x00_configure_hba(scsi_qla_host_t *vha ) { int rval ; uint16_t loop_id ; uint16_t topo ; uint16_t sw_cap ; uint8_t al_pa ; uint8_t area ; uint8_t domain ; char connect_type[22U] ; struct qla_hw_data *ha ; unsigned long flags ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; raw_spinlock_t *tmp___5 ; { ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; rval = qla2x00_get_adapter_id(vha, & loop_id, & al_pa, & area, & domain, & topo, & sw_cap); if (rval != 0) { tmp___1 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { ql_dbg(268435456U, vha, 8200, "Loop is in a transition state.\n"); } else { tmp___2 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 != 0) { ql_dbg(268435456U, vha, 8200, "Loop is in a transition state.\n"); } else { tmp___3 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___3 == 2) { ql_dbg(268435456U, vha, 8200, "Loop is in a transition state.\n"); } else { tmp___4 = atomic_read((atomic_t const *)(& ha->loop_down_timer)); if (tmp___4 != 0) { ql_dbg(268435456U, vha, 8200, "Loop is in a transition state.\n"); } else if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { ql_dbg(268435456U, vha, 8200, "Loop is in a transition state.\n"); } else if (rval == 5 && (unsigned int )loop_id == 7U) { ql_dbg(268435456U, vha, 8200, "Loop is in a transition state.\n"); } else { ql_log(1U, vha, 8201, "Unable to get host loop ID.\n"); if (((ha->device_type & 134217728U) != 0U && (unsigned long )vha == (unsigned long )base_vha) && (rval == 5 && (unsigned int )loop_id == 27U)) { ql_log(1U, vha, 4433, "Doing link init.\n"); tmp___0 = qla24xx_link_initialize(vha); if (tmp___0 == 0) { return (rval); } else { } } else { } set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } } } } return (rval); } else { } if ((unsigned int )topo == 4U) { ql_log(2U, vha, 8202, "Cannot get topology - retrying.\n"); return (258); } else { } vha->loop_id = loop_id; ha->min_external_loopid = 129U; ha->operating_mode = 0U; ha->switch_cap = 0U; switch ((int )topo) { case 0: ql_dbg(268435456U, vha, 8203, "HBA in NL topology.\n"); ha->current_topology = 1U; strcpy((char *)(& connect_type), "(Loop)"); goto ldv_61225; case 1: ql_dbg(268435456U, vha, 8204, "HBA in FL topology.\n"); ha->switch_cap = sw_cap; ha->current_topology = 4U; strcpy((char *)(& connect_type), "(FL_Port)"); goto ldv_61225; case 2: ql_dbg(268435456U, vha, 8205, "HBA in N P2P topology.\n"); ha->operating_mode = 1U; ha->current_topology = 2U; strcpy((char *)(& connect_type), "(N_Port-to-N_Port)"); goto ldv_61225; case 3: ql_dbg(268435456U, vha, 8206, "HBA in F P2P topology.\n"); ha->switch_cap = sw_cap; ha->operating_mode = 1U; ha->current_topology = 8U; strcpy((char *)(& connect_type), "(F_Port)"); goto ldv_61225; default: ql_dbg(268435456U, vha, 8207, "HBA in unknown topology %x, using NL.\n", (int )topo); ha->current_topology = 1U; strcpy((char *)(& connect_type), "(Loop)"); goto ldv_61225; } ldv_61225: vha->d_id.b.domain = domain; vha->d_id.b.area = area; vha->d_id.b.al_pa = al_pa; tmp___5 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___5); qlt_update_vp_map(vha, 2); spin_unlock_irqrestore(& ha->vport_slock, flags); if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(2U, vha, 8208, "Topology - %s, Host Loop address 0x%x.\n", (char *)(& connect_type), (int )vha->loop_id); } else { } return (rval); } } __inline void qla2x00_set_model_info(scsi_qla_host_t *vha , uint8_t *model , size_t len , char *def ) { char *st ; char *en ; uint16_t index ; struct qla_hw_data *ha ; int use_tbl ; char *tmp ; int tmp___0 ; { ha = vha->hw; use_tbl = ((((((ha->device_type & 128U) == 0U && (ha->device_type & 256U) == 0U) && ((ha->device_type & 512U) == 0U && (ha->device_type & 1024U) == 0U)) && (ha->device_type & 4096U) == 0U) && (ha->device_type & 2048U) == 0U) && ((((ha->device_type & 8192U) == 0U && (ha->device_type & 16384U) == 0U) && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U)) && (ha->device_type & 32768U) == 0U; tmp___0 = memcmp((void const *)model, (void const *)"", len); if (tmp___0 != 0) { strncpy((char *)(& ha->model_number), (char const *)model, len); en = (char *)(& ha->model_number); st = en; en = en + (len + 0xffffffffffffffffUL); goto ldv_61246; ldv_61245: ; if ((int )((signed char )*en) != 32 && (int )((signed char )*en) != 0) { goto ldv_61244; } else { } tmp = en; en = en - 1; *tmp = 0; ldv_61246: ; if ((unsigned long )en > (unsigned long )st) { goto ldv_61245; } else { } ldv_61244: index = (unsigned int )(ha->pdev)->subsystem_device & 255U; if ((use_tbl != 0 && (unsigned int )(ha->pdev)->subsystem_vendor == 4215U) && (unsigned int )index <= 91U) { strncpy((char *)(& ha->model_desc), (char const *)qla2x00_model_name[(int )index * 2 + 1], 79UL); } else { } } else { index = (unsigned int )(ha->pdev)->subsystem_device & 255U; if ((use_tbl != 0 && (unsigned int )(ha->pdev)->subsystem_vendor == 4215U) && (unsigned int )index <= 91U) { strcpy((char *)(& ha->model_number), (char const *)qla2x00_model_name[(int )index * 2]); strncpy((char *)(& ha->model_desc), (char const *)qla2x00_model_name[(int )index * 2 + 1], 79UL); } else { strcpy((char *)(& ha->model_number), (char const *)def); } } if ((ha->device_type & 134217728U) != 0U) { qla2xxx_get_vpd_field(vha, (char *)"\202", (char *)(& ha->model_desc), 80UL); } else { } return; } } static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha , nvram_t *nv ) { { return; } } int qla2x00_nvram_config(struct scsi_qla_host *vha ) { int rval ; uint8_t chksum ; uint16_t cnt ; uint8_t *dptr1 ; uint8_t *dptr2 ; struct qla_hw_data *ha ; init_cb_t *icb ; nvram_t *nv ; uint8_t *ptr ; struct device_reg_2xxx *reg ; unsigned short tmp ; uint8_t *tmp___0 ; uint8_t *tmp___1 ; uint8_t *tmp___2 ; uint16_t tmp___3 ; uint8_t *tmp___4 ; uint8_t *tmp___5 ; uint16_t tmp___6 ; size_t __len ; void *__ret ; size_t __len___0 ; void *__ret___0 ; size_t __len___1 ; void *__ret___1 ; size_t __len___2 ; void *__ret___2 ; size_t __len___3 ; void *__ret___3 ; size_t __len___4 ; void *__ret___4 ; { chksum = 0U; ha = vha->hw; icb = ha->init_cb; nv = (nvram_t *)ha->nvram; ptr = (uint8_t *)ha->nvram; reg = & (ha->iobase)->isp; rval = 0; ha->nvram_size = 256U; ha->nvram_base = 0U; if (((ha->device_type & 1U) == 0U && (ha->device_type & 2U) == 0U) && (ha->device_type & 4U) == 0U) { tmp = readw((void const volatile *)(& reg->ctrl_status)); if ((unsigned int )((int )tmp >> 14) == 1U) { ha->nvram_base = 128U; } else { } } else { } (*((ha->isp_ops)->read_nvram))(vha, ptr, (uint32_t )ha->nvram_base, (uint32_t )ha->nvram_size); cnt = 0U; chksum = 0U; goto ldv_61265; ldv_61264: tmp___0 = ptr; ptr = ptr + 1; chksum = (int )*tmp___0 + (int )chksum; cnt = (uint16_t )((int )cnt + 1); ldv_61265: ; if ((int )ha->nvram_size > (int )cnt) { goto ldv_61264; } else { } ql_dbg(1073872896U, vha, 271, "Contents of NVRAM.\n"); ql_dump_buffer(1073872896U, vha, 272, (uint8_t *)nv, (uint32_t )ha->nvram_size); if ((((((unsigned int )chksum != 0U || (unsigned int )nv->id[0] != 73U) || (unsigned int )nv->id[1] != 83U) || (unsigned int )nv->id[2] != 80U) || (unsigned int )nv->id[3] != 32U) || (unsigned int )nv->nvram_version == 0U) { ql_log(1U, vha, 100, "Inconsistent NVRAM detected: checksum=0x%x id=%c version=0x%x.\n", (int )chksum, (int )nv->id[0], (int )nv->nvram_version); ql_log(1U, vha, 101, "Falling back to functioning (yet invalid -- WWPN) defaults.\n"); memset((void *)nv, 0, (size_t )ha->nvram_size); nv->parameter_block_version = 1U; if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { nv->firmware_options[0] = 6U; nv->firmware_options[1] = 160U; nv->add_firmware_options[0] = 32U; nv->add_firmware_options[1] = 48U; nv->frame_payload_size = 2048U; nv->special_options[1] = 128U; } else if ((ha->device_type & 2U) != 0U) { nv->firmware_options[0] = 6U; nv->firmware_options[1] = 160U; nv->add_firmware_options[0] = 32U; nv->add_firmware_options[1] = 48U; nv->frame_payload_size = 1024U; } else if ((int )ha->device_type & 1) { nv->firmware_options[0] = 10U; nv->firmware_options[1] = 32U; nv->frame_payload_size = 1024U; } else { } nv->max_iocb_allocation = 256U; nv->execution_throttle = 16U; nv->retry_count = 8U; nv->retry_delay = 1U; nv->port_name[0] = 33U; nv->port_name[3] = 224U; nv->port_name[4] = 139U; qla2xxx_nvram_wwn_from_ofw(vha, nv); nv->login_timeout = 4U; nv->host_p[1] = 4U; nv->reset_delay = 5U; nv->port_down_retry_count = 8U; nv->max_luns_per_target = 8U; nv->link_down_timeout = 60U; rval = 1; } else { } memset((void *)icb, 0, (size_t )ha->init_cb_size); nv->firmware_options[0] = (uint8_t )((unsigned int )nv->firmware_options[0] | 66U); nv->firmware_options[0] = (unsigned int )nv->firmware_options[0] & 207U; nv->firmware_options[1] = (uint8_t )((unsigned int )nv->firmware_options[1] | 33U); nv->firmware_options[1] = (unsigned int )nv->firmware_options[1] & 239U; if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { nv->firmware_options[0] = (uint8_t )((unsigned int )nv->firmware_options[0] | 4U); nv->firmware_options[0] = (unsigned int )nv->firmware_options[0] & 247U; nv->special_options[0] = (unsigned int )nv->special_options[0] & 191U; nv->add_firmware_options[1] = (uint8_t )((unsigned int )nv->add_firmware_options[1] | 48U); if ((ha->device_type & 4U) != 0U) { if ((unsigned int )ha->fb_rev == 7U) { strcpy((char *)(& ha->model_number), "QLA2310"); } else { strcpy((char *)(& ha->model_number), "QLA2300"); } } else { qla2x00_set_model_info(vha, (uint8_t *)(& nv->model_number), 16UL, (char *)"QLA23xx"); } } else if ((ha->device_type & 2U) != 0U) { nv->firmware_options[0] = (uint8_t )((unsigned int )nv->firmware_options[0] | 4U); if (((int )nv->add_firmware_options[0] & 112) == 48) { nv->add_firmware_options[0] = (unsigned int )nv->add_firmware_options[0] & 143U; nv->add_firmware_options[0] = (uint8_t )((unsigned int )nv->add_firmware_options[0] | 32U); } else { } strcpy((char *)(& ha->model_number), "QLA22xx"); } else { strcpy((char *)(& ha->model_number), "QLA2100"); } dptr1 = (uint8_t *)icb; dptr2 = & nv->parameter_block_version; cnt = 32U; goto ldv_61268; ldv_61267: tmp___1 = dptr1; dptr1 = dptr1 + 1; tmp___2 = dptr2; dptr2 = dptr2 + 1; *tmp___1 = *tmp___2; ldv_61268: tmp___3 = cnt; cnt = (uint16_t )((int )cnt - 1); if ((unsigned int )tmp___3 != 0U) { goto ldv_61267; } else { } dptr1 = (uint8_t *)(& icb->add_firmware_options); cnt = 6U; goto ldv_61271; ldv_61270: tmp___4 = dptr1; dptr1 = dptr1 + 1; tmp___5 = dptr2; dptr2 = dptr2 + 1; *tmp___4 = *tmp___5; ldv_61271: tmp___6 = cnt; cnt = (uint16_t )((int )cnt - 1); if ((unsigned int )tmp___6 != 0U) { goto ldv_61270; } else { } if ((int )((signed char )nv->host_p[1]) < 0) { __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& icb->node_name), (void const *)(& nv->alternate_node_name), __len); } else { __ret = __builtin_memcpy((void *)(& icb->node_name), (void const *)(& nv->alternate_node_name), __len); } __len___0 = 8UL; if (__len___0 > 63UL) { __ret___0 = __memcpy((void *)(& icb->port_name), (void const *)(& nv->alternate_port_name), __len___0); } else { __ret___0 = __builtin_memcpy((void *)(& icb->port_name), (void const *)(& nv->alternate_port_name), __len___0); } } else { } if (((int )icb->firmware_options[1] & 64) == 0) { __len___1 = 8UL; if (__len___1 > 63UL) { __ret___1 = __memcpy((void *)(& icb->node_name), (void const *)(& icb->port_name), __len___1); } else { __ret___1 = __builtin_memcpy((void *)(& icb->node_name), (void const *)(& icb->port_name), __len___1); } icb->node_name[0] = (unsigned int )icb->node_name[0] & 240U; } else { } if ((int )((signed char )nv->host_p[0]) < 0) { ql2xextended_error_logging = 507510784; } else { } ha->flags.disable_risc_code_load = ((int )nv->host_p[0] & 16) != 0; if ((ha->device_type & 1U) == 0U && (ha->device_type & 2U) == 0U) { ha->flags.disable_risc_code_load = 0U; } else { } ha->flags.enable_lip_reset = ((int )nv->host_p[1] & 2) != 0; ha->flags.enable_lip_full_login = ((int )nv->host_p[1] & 4) != 0; ha->flags.enable_target_reset = ((int )nv->host_p[1] & 8) != 0; ha->flags.enable_led_scheme = ((int )nv->special_options[1] & 16) != 0; ha->flags.disable_serdes = 0U; ha->operating_mode = (uint8_t )(((int )icb->add_firmware_options[0] & 112) >> 4); __len___2 = 4UL; if (__len___2 > 63UL) { __ret___2 = __memcpy((void *)(& ha->fw_seriallink_options), (void const *)(& nv->seriallink_options), __len___2); } else { __ret___2 = __builtin_memcpy((void *)(& ha->fw_seriallink_options), (void const *)(& nv->seriallink_options), __len___2); } ha->serial0 = icb->port_name[5]; ha->serial1 = icb->port_name[6]; ha->serial2 = icb->port_name[7]; __len___3 = 8UL; if (__len___3 > 63UL) { __ret___3 = __memcpy((void *)(& vha->node_name), (void const *)(& icb->node_name), __len___3); } else { __ret___3 = __builtin_memcpy((void *)(& vha->node_name), (void const *)(& icb->node_name), __len___3); } __len___4 = 8UL; if (__len___4 > 63UL) { __ret___4 = __memcpy((void *)(& vha->port_name), (void const *)(& icb->port_name), __len___4); } else { __ret___4 = __builtin_memcpy((void *)(& vha->port_name), (void const *)(& icb->port_name), __len___4); } icb->execution_throttle = 65535U; ha->retry_count = nv->retry_count; if ((int )nv->login_timeout != ql2xlogintimeout) { nv->login_timeout = (uint8_t )ql2xlogintimeout; } else { } if ((unsigned int )nv->login_timeout <= 3U) { nv->login_timeout = 4U; } else { } ha->login_timeout = nv->login_timeout; icb->login_timeout = nv->login_timeout; ha->r_a_tov = 100U; ha->loop_reset_delay = (uint16_t )nv->reset_delay; if ((unsigned int )nv->link_down_timeout == 0U) { ha->loop_down_abort_time = 195U; } else { ha->link_down_timeout = nv->link_down_timeout; ha->loop_down_abort_time = ~ ((int )ha->link_down_timeout); } ha->port_down_retry_count = (int )nv->port_down_retry_count; if (qlport_down_retry != 0) { ha->port_down_retry_count = qlport_down_retry; } else { } ha->login_retry_count = (uint32_t )nv->retry_count; if (ha->port_down_retry_count == (int )nv->port_down_retry_count && ha->port_down_retry_count > 3) { ha->login_retry_count = (uint32_t )ha->port_down_retry_count; } else if (ha->port_down_retry_count > (int )ha->login_retry_count) { ha->login_retry_count = (uint32_t )ha->port_down_retry_count; } else { } if (ql2xloginretrycount != 0) { ha->login_retry_count = (uint32_t )ql2xloginretrycount; } else { } icb->lun_enables = 0U; icb->command_resource_count = 0U; icb->immediate_notify_resource_count = 0U; icb->timeout = 0U; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { icb->firmware_options[0] = (unsigned int )icb->firmware_options[0] & 247U; icb->add_firmware_options[0] = (unsigned int )icb->add_firmware_options[0] & 240U; icb->add_firmware_options[0] = (uint8_t )((unsigned int )icb->add_firmware_options[0] | 4U); icb->response_accumulation_timer = 3U; icb->interrupt_delay_timer = 5U; vha->flags.process_response_queue = 1U; } else { if (*((unsigned long *)vha + 19UL) == 0UL) { ha->zio_mode = (unsigned int )((uint16_t )icb->add_firmware_options[0]) & 15U; ha->zio_timer = (unsigned int )icb->interrupt_delay_timer != 0U ? (uint16_t )icb->interrupt_delay_timer : 2U; } else { } icb->add_firmware_options[0] = (unsigned int )icb->add_firmware_options[0] & 240U; vha->flags.process_response_queue = 0U; if ((unsigned int )ha->zio_mode != 0U) { ha->zio_mode = 6U; ql_log(2U, vha, 104, "ZIO mode %d enabled; timer delay (%d us).\n", (int )ha->zio_mode, (int )ha->zio_timer * 100); icb->add_firmware_options[0] = (uint8_t )((int )icb->add_firmware_options[0] | (int )((unsigned char )ha->zio_mode)); icb->interrupt_delay_timer = (unsigned char )ha->zio_timer; vha->flags.process_response_queue = 1U; } else { } } if (rval != 0) { ql_log(1U, vha, 105, "NVRAM configuration failed.\n"); } else { } return (rval); } } static void qla2x00_rport_del(void *data ) { fc_port_t *fcport ; struct fc_rport *rport ; scsi_qla_host_t *vha ; unsigned long flags ; raw_spinlock_t *tmp ; { fcport = (fc_port_t *)data; vha = fcport->vha; tmp = spinlock_check(((fcport->vha)->host)->host_lock); flags = _raw_spin_lock_irqsave(tmp); rport = (unsigned long )fcport->drport != (unsigned long )((struct fc_rport *)0) ? fcport->drport : fcport->rport; fcport->drport = (struct fc_rport *)0; spin_unlock_irqrestore(((fcport->vha)->host)->host_lock, flags); if ((unsigned long )rport != (unsigned long )((struct fc_rport *)0)) { fc_remote_port_delete(rport); qlt_fc_port_deleted(vha, fcport); } else { } return; } } fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *vha , gfp_t flags ) { fc_port_t *fcport ; void *tmp ; { tmp = kzalloc(128UL, flags); fcport = (fc_port_t *)tmp; if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return ((fc_port_t *)0); } else { } fcport->vha = vha; fcport->port_type = 0; fcport->loop_id = 4096U; qla2x00_set_fcport_state___0(fcport, 1); fcport->supported_classes = 0U; return (fcport); } } static int qla2x00_configure_loop(scsi_qla_host_t *vha ) { int rval ; unsigned long flags ; unsigned long save_flags ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; { ha = vha->hw; rval = 0; tmp = constant_test_bit(6L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp != 0) { rval = qla2x00_configure_hba(vha); if (rval != 0) { ql_dbg(268435456U, vha, 8211, "Unable to configure HBA.\n"); return (rval); } else { } } else { } flags = vha->dpc_flags; save_flags = flags; ql_dbg(268435456U, vha, 8212, "Configure loop -- dpc flags = 0x%lx.\n", flags); clear_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); clear_bit(7L, (unsigned long volatile *)(& vha->dpc_flags)); qla2x00_get_data_rate(vha); if ((unsigned int )ha->current_topology == 4U) { tmp___2 = constant_test_bit(6L, (unsigned long const volatile *)(& flags)); if (tmp___2 != 0) { set_bit(7L, (unsigned long volatile *)(& flags)); } else { goto _L___0; } } else _L___0: /* CIL Label */ if ((unsigned int )ha->current_topology == 8U) { tmp___1 = constant_test_bit(6L, (unsigned long const volatile *)(& flags)); if (tmp___1 != 0) { set_bit(7L, (unsigned long volatile *)(& flags)); clear_bit(6L, (unsigned long volatile *)(& flags)); } else { goto _L; } } else _L: /* CIL Label */ if ((unsigned int )ha->current_topology == 2U) { clear_bit(7L, (unsigned long volatile *)(& flags)); } else if (*((unsigned long *)vha + 19UL) == 0UL) { set_bit(7L, (unsigned long volatile *)(& flags)); set_bit(6L, (unsigned long volatile *)(& flags)); } else { tmp___0 = constant_test_bit(3L, (unsigned long const volatile *)(& flags)); if (tmp___0 != 0) { set_bit(7L, (unsigned long volatile *)(& flags)); set_bit(6L, (unsigned long volatile *)(& flags)); } else { } } tmp___4 = constant_test_bit(6L, (unsigned long const volatile *)(& flags)); if (tmp___4 != 0) { tmp___3 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___3 != 0) { ql_dbg(268435456U, vha, 8213, "Loop resync needed, failing.\n"); rval = 258; } else { rval = qla2x00_configure_local_loop(vha); } } else { } if (rval == 0) { tmp___8 = constant_test_bit(7L, (unsigned long const volatile *)(& flags)); if (tmp___8 != 0) { tmp___5 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___5 != 0) { ql_dbg(268435456U, vha, 8222, "Needs RSCN update and loop transition.\n"); rval = 258; } else { tmp___6 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___6 != 0) { ql_dbg(268435456U, vha, 8222, "Needs RSCN update and loop transition.\n"); rval = 258; } else { tmp___7 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___7 == 2) { ql_dbg(268435456U, vha, 8222, "Needs RSCN update and loop transition.\n"); rval = 258; } else { rval = qla2x00_configure_fabric(vha); } } } } else { } } else { } if (rval == 0) { tmp___9 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___9 != 0) { rval = 258; } else { tmp___10 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___10 != 0) { rval = 258; } else { atomic_set(& vha->loop_state, 5); ql_dbg(268435456U, vha, 8297, "LOOP READY.\n"); } } } else { } if (rval != 0) { ql_dbg(268435456U, vha, 8298, "%s *** FAILED ***.\n", "qla2x00_configure_loop"); } else { ql_dbg(268435456U, vha, 8299, "%s: exiting normally.\n", "qla2x00_configure_loop"); } tmp___13 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___13 != 0) { tmp___11 = constant_test_bit(6L, (unsigned long const volatile *)(& save_flags)); if (tmp___11 != 0) { set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } tmp___12 = constant_test_bit(7L, (unsigned long const volatile *)(& save_flags)); if (tmp___12 != 0) { set_bit(7L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } } else { } return (rval); } } static int qla2x00_configure_local_loop(scsi_qla_host_t *vha ) { int rval ; int rval2 ; int found_devs ; int found ; fc_port_t *fcport ; fc_port_t *new_fcport ; uint16_t index ; uint16_t entries ; char *id_iter ; uint16_t loop_id ; uint8_t domain ; uint8_t area ; uint8_t al_pa ; struct qla_hw_data *ha ; int tmp ; struct list_head const *__mptr ; int tmp___0 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int tmp___1 ; size_t __len ; void *__ret ; struct list_head const *__mptr___2 ; { ha = vha->hw; found_devs = 0; new_fcport = (fc_port_t *)0; entries = 128U; tmp = qla2x00_gid_list_size(ha); memset((void *)ha->gid_list, 0, (size_t )tmp); rval = qla2x00_get_id_list(vha, (void *)ha->gid_list, ha->gid_list_dma, & entries); if (rval != 0) { goto cleanup_allocation; } else { } ql_dbg(268435456U, vha, 8215, "Entries in ID list (%d).\n", (int )entries); ql_dump_buffer(268566528U, vha, 8309, (uint8_t *)ha->gid_list, (uint32_t )entries * 8U); new_fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )new_fcport == (unsigned long )((fc_port_t *)0)) { ql_log(1U, vha, 8216, "Memory allocation failed for fcport.\n"); rval = 259; goto cleanup_allocation; } else { } new_fcport->flags = new_fcport->flags & 4294967294U; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_61337; ldv_61336: tmp___0 = atomic_read((atomic_t const *)(& fcport->state)); if ((tmp___0 == 4 && (unsigned int )fcport->port_type != 3U) && (fcport->flags & 1U) == 0U) { ql_dbg(268435456U, vha, 8217, "Marking port lost loop_id=0x%04x.\n", (int )fcport->loop_id); qla2x00_set_fcport_state___0(fcport, 3); } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_61337: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_61336; } else { } id_iter = (char *)ha->gid_list; index = 0U; goto ldv_61352; ldv_61351: domain = ((struct gid_list_info *)id_iter)->domain; area = ((struct gid_list_info *)id_iter)->area; al_pa = ((struct gid_list_info *)id_iter)->al_pa; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { loop_id = (unsigned short )((struct gid_list_info *)id_iter)->loop_id_2100; } else { loop_id = ((struct gid_list_info *)id_iter)->loop_id; } id_iter = id_iter + (unsigned long )ha->gid_list_info_size; if (((int )domain & 240) == 240) { goto ldv_61339; } else { } if (((unsigned int )area != 0U && (unsigned int )domain != 0U) && ((int )vha->d_id.b.area != (int )area || (int )vha->d_id.b.domain != (int )domain)) { goto ldv_61339; } else { } if ((unsigned int )loop_id > 125U) { goto ldv_61339; } else { } memset((void *)new_fcport, 0, 128UL); new_fcport->d_id.b.domain = domain; new_fcport->d_id.b.area = area; new_fcport->d_id.b.al_pa = al_pa; new_fcport->loop_id = loop_id; rval2 = qla2x00_get_port_database(vha, new_fcport, 0); if (rval2 != 0) { ql_dbg(268435456U, vha, 8218, "Failed to retrieve fcport information -- get_port_database=%x, loop_id=0x%04x.\n", rval2, (int )new_fcport->loop_id); ql_dbg(268435456U, vha, 8219, "Scheduling resync.\n"); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_61339; } else { } found = 0; fcport = (fc_port_t *)0; __mptr___1 = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr___1; goto ldv_61350; ldv_61349: tmp___1 = memcmp((void const *)(& new_fcport->port_name), (void const *)(& fcport->port_name), 8UL); if (tmp___1 != 0) { goto ldv_61344; } else { } fcport->flags = fcport->flags & 4294967294U; fcport->loop_id = new_fcport->loop_id; fcport->port_type = new_fcport->port_type; fcport->d_id.b24 = new_fcport->d_id.b24; __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& fcport->node_name), (void const *)(& new_fcport->node_name), __len); } else { __ret = __builtin_memcpy((void *)(& fcport->node_name), (void const *)(& new_fcport->node_name), __len); } found = found + 1; goto ldv_61348; ldv_61344: __mptr___2 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___2; ldv_61350: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_61349; } else { } ldv_61348: ; if (found == 0) { list_add_tail(& new_fcport->list, & vha->vp_fcports); fcport = new_fcport; new_fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )new_fcport == (unsigned long )((fc_port_t *)0)) { ql_log(1U, vha, 8220, "Failed to allocate memory for fcport.\n"); rval = 259; goto cleanup_allocation; } else { } new_fcport->flags = new_fcport->flags & 4294967294U; } else { } fcport->fp_speed = ha->link_data_rate; qla2x00_update_fcport(vha, fcport); found_devs = found_devs + 1; ldv_61339: index = (uint16_t )((int )index + 1); ldv_61352: ; if ((int )index < (int )entries) { goto ldv_61351; } else { } cleanup_allocation: kfree((void const *)new_fcport); if (rval != 0) { ql_dbg(268435456U, vha, 8221, "Configure local loop error exit: rval=%x.\n", rval); } else { } return (rval); } } static void qla2x00_iidma_fcport(scsi_qla_host_t *vha , fc_port_t *fcport ) { int rval ; uint16_t mb[4U] ; struct qla_hw_data *ha ; int tmp ; char const *tmp___0 ; { ha = vha->hw; if ((ha->device_type & 67108864U) == 0U) { return; } else { } tmp = atomic_read((atomic_t const *)(& fcport->state)); if (tmp != 4) { return; } else { } if ((unsigned int )fcport->fp_speed == 65535U || (int )fcport->fp_speed > (int )ha->link_data_rate) { return; } else { } rval = qla2x00_set_idma_speed(vha, (int )fcport->loop_id, (int )fcport->fp_speed, (uint16_t *)(& mb)); if (rval != 0) { ql_dbg(268435456U, vha, 8196, "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n", (uint8_t *)(& fcport->port_name), rval, (int )fcport->fp_speed, (int )mb[0], (int )mb[1]); } else { tmp___0 = qla2x00_get_link_speed_str(ha, (int )fcport->fp_speed); ql_dbg(268435456U, vha, 8197, "iIDMA adjusted to %s GB/s on %8phN.\n", tmp___0, (uint8_t *)(& fcport->port_name)); } return; } } static void qla2x00_reg_remote_port(scsi_qla_host_t *vha , fc_port_t *fcport ) { struct fc_rport_identifiers rport_ids ; struct fc_rport *rport ; unsigned long flags ; raw_spinlock_t *tmp ; { qla2x00_rport_del((void *)fcport); rport_ids.node_name = wwn_to_u64((u8 *)(& fcport->node_name)); rport_ids.port_name = wwn_to_u64((u8 *)(& fcport->port_name)); rport_ids.port_id = (u32 )((((int )fcport->d_id.b.domain << 16) | ((int )fcport->d_id.b.area << 8)) | (int )fcport->d_id.b.al_pa); rport_ids.roles = 0U; rport = fc_remote_port_add(vha->host, 0, & rport_ids); fcport->rport = rport; if ((unsigned long )rport == (unsigned long )((struct fc_rport *)0)) { ql_log(1U, vha, 8198, "Unable to allocate fc remote port.\n"); return; } else { } qlt_fc_port_added(vha, fcport); tmp = spinlock_check(((fcport->vha)->host)->host_lock); flags = _raw_spin_lock_irqsave(tmp); *((fc_port_t **)rport->dd_data) = fcport; spin_unlock_irqrestore(((fcport->vha)->host)->host_lock, flags); rport->supported_classes = fcport->supported_classes; rport_ids.roles = 0U; if ((unsigned int )fcport->port_type == 4U) { rport_ids.roles = rport_ids.roles | 2U; } else { } if ((unsigned int )fcport->port_type == 5U) { rport_ids.roles = rport_ids.roles | 1U; } else { } fc_remote_port_rolechg(rport, rport_ids.roles); return; } } void qla2x00_update_fcport(scsi_qla_host_t *vha , fc_port_t *fcport ) { { fcport->vha = vha; if (((vha->hw)->device_type & 131072U) != 0U) { qla2x00_set_fcport_state___0(fcport, 4); qla2x00_reg_remote_port(vha, fcport); return; } else { } fcport->login_retry = 0; fcport->flags = fcport->flags & 4294967285U; qla2x00_set_fcport_state___0(fcport, 4); qla2x00_iidma_fcport(vha, fcport); qla24xx_update_fcport_fcp_prio(vha, fcport); qla2x00_reg_remote_port(vha, fcport); return; } } static int qla2x00_configure_fabric(scsi_qla_host_t *vha ) { int rval ; fc_port_t *fcport ; fc_port_t *fcptemp ; uint16_t next_loopid ; uint16_t mb[32U] ; uint16_t loop_id ; struct list_head new_fcports ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int tmp___6 ; int tmp___7 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; int tmp___8 ; int tmp___9 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; struct list_head const *__mptr___6 ; int tmp___10 ; int tmp___11 ; struct list_head const *__mptr___7 ; struct list_head const *__mptr___8 ; struct list_head const *__mptr___9 ; struct list_head const *__mptr___10 ; { new_fcports.next = & new_fcports; new_fcports.prev = & new_fcports; ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; if ((ha->device_type & 134217728U) != 0U) { loop_id = 2046U; } else { loop_id = 126U; } rval = qla2x00_get_port_name(vha, (int )loop_id, (uint8_t *)(& vha->fabric_node_name), 1); if (rval != 0) { ql_dbg(268435456U, vha, 8223, "MBX_GET_PORT_NAME failed, No FL Port.\n"); vha->device_flags = vha->device_flags & 4294967294U; return (0); } else { } vha->device_flags = vha->device_flags | 1U; if (ql2xfdmienable != 0) { tmp___0 = test_and_clear_bit(12L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { qla2x00_fdmi_register(vha); } else { } } else { } if ((ha->device_type & 134217728U) != 0U) { loop_id = 2044U; } else { loop_id = 128U; } rval = (*((ha->isp_ops)->fabric_login))(vha, (int )loop_id, 255, 255, 252, (uint16_t *)(& mb), 3); if (rval != 0) { set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); return (rval); } else { } if ((unsigned int )mb[0] != 16384U) { ql_dbg(268435456U, vha, 8258, "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", (int )loop_id, (int )mb[0], (int )mb[1], (int )mb[2], (int )mb[6], (int )mb[7]); return (0); } else { } tmp___5 = test_and_clear_bit(9L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___5 != 0) { tmp___1 = qla2x00_rft_id(vha); if (tmp___1 != 0) { ql_dbg(268435456U, vha, 8261, "Register FC-4 TYPE failed.\n"); } else { } tmp___2 = qla2x00_rff_id(vha); if (tmp___2 != 0) { ql_dbg(268435456U, vha, 8265, "Register FC-4 Features failed.\n"); } else { } tmp___4 = qla2x00_rnn_id(vha); if (tmp___4 != 0) { ql_dbg(268435456U, vha, 8271, "Register Node Name failed.\n"); } else { tmp___3 = qla2x00_rsnn_nn(vha); if (tmp___3 != 0) { ql_dbg(268435456U, vha, 8275, "Register Symobilic Node Name failed.\n"); } else { } } } else { } __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_61392; ldv_61391: fcport->scan_state = 1U; __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_61392: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_61391; } else { } rval = qla2x00_find_all_fabric_devs(vha, & new_fcports); if (rval != 0) { goto ldv_61394; } else { } __mptr___1 = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr___1; goto ldv_61402; ldv_61401: tmp___6 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___6 != 0) { goto ldv_61399; } else { } if ((fcport->flags & 1U) == 0U) { goto ldv_61400; } else { } if ((unsigned int )fcport->scan_state == 1U) { tmp___7 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___7 == 4) { qla2x00_mark_device_lost(vha, fcport, ql2xplogiabsentdevice, 0); if ((((unsigned int )fcport->loop_id != 4096U && (fcport->flags & 4U) == 0U) && (unsigned int )fcport->port_type != 4U) && (unsigned int )fcport->port_type != 3U) { (*((ha->isp_ops)->fabric_logout))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); fcport->loop_id = 4096U; } else { } } else { } } else { } ldv_61400: __mptr___2 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___2; ldv_61402: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_61401; } else { } ldv_61399: next_loopid = ha->min_external_loopid; __mptr___3 = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr___3; goto ldv_61410; ldv_61409: tmp___8 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___8 != 0) { goto ldv_61407; } else { tmp___9 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___9 != 0) { goto ldv_61407; } else { } } if ((fcport->flags & 1U) == 0U || (fcport->flags & 2U) == 0U) { goto ldv_61408; } else { } if ((unsigned int )fcport->loop_id == 4096U) { fcport->loop_id = next_loopid; rval = qla2x00_find_new_loop_id(base_vha, fcport); if (rval != 0) { goto ldv_61407; } else { } } else { } qla2x00_fabric_dev_login(vha, fcport, & next_loopid); ldv_61408: __mptr___4 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___4; ldv_61410: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_61409; } else { } ldv_61407: ; if (rval != 0) { goto ldv_61394; } else { } __mptr___5 = (struct list_head const *)new_fcports.next; fcport = (fc_port_t *)__mptr___5; __mptr___6 = (struct list_head const *)fcport->list.next; fcptemp = (fc_port_t *)__mptr___6; goto ldv_61419; ldv_61418: tmp___10 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___10 != 0) { goto ldv_61417; } else { tmp___11 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___11 != 0) { goto ldv_61417; } else { } } fcport->loop_id = next_loopid; rval = qla2x00_find_new_loop_id(base_vha, fcport); if (rval != 0) { goto ldv_61417; } else { } qla2x00_fabric_dev_login(vha, fcport, & next_loopid); list_move_tail(& fcport->list, & vha->vp_fcports); fcport = fcptemp; __mptr___7 = (struct list_head const *)fcptemp->list.next; fcptemp = (fc_port_t *)__mptr___7; ldv_61419: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& new_fcports)) { goto ldv_61418; } else { } ldv_61417: ; ldv_61394: __mptr___8 = (struct list_head const *)new_fcports.next; fcport = (fc_port_t *)__mptr___8; __mptr___9 = (struct list_head const *)fcport->list.next; fcptemp = (fc_port_t *)__mptr___9; goto ldv_61427; ldv_61426: list_del(& fcport->list); kfree((void const *)fcport); fcport = fcptemp; __mptr___10 = (struct list_head const *)fcptemp->list.next; fcptemp = (fc_port_t *)__mptr___10; ldv_61427: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& new_fcports)) { goto ldv_61426; } else { } if (rval != 0) { ql_dbg(268435456U, vha, 8296, "Configure fabric error exit rval=%d.\n", rval); } else { } return (rval); } } static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha , struct list_head *new_fcports ) { int rval ; uint16_t loop_id ; fc_port_t *fcport ; fc_port_t *new_fcport ; fc_port_t *fcptemp ; int found ; sw_info_t *swl ; int swl_idx ; int first_dev ; int last_dev ; port_id_t wrap ; port_id_t nxt_d_id ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; size_t __len ; void *__ret ; size_t __len___0 ; void *__ret___0 ; size_t __len___1 ; void *__ret___1 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int tmp___9 ; struct list_head const *__mptr___2 ; int tmp___10 ; size_t __len___2 ; void *__ret___2 ; int tmp___11 ; struct list_head const *__mptr___3 ; { wrap.b24 = 0U; ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; rval = 0; if ((unsigned long )ha->swl == (unsigned long )((void *)0)) { ha->swl = kcalloc((size_t )ha->max_fibre_devices, 32UL, 208U); } else { } swl = (sw_info_t *)ha->swl; if ((unsigned long )swl == (unsigned long )((sw_info_t *)0)) { ql_dbg(268435456U, vha, 8276, "GID_PT allocations failed, fallback on GA_NXT.\n"); } else { memset((void *)swl, 0, (unsigned long )ha->max_fibre_devices * 32UL); tmp___3 = qla2x00_gid_pt(vha, swl); if (tmp___3 != 0) { swl = (sw_info_t *)0; } else { tmp___2 = qla2x00_gpn_id(vha, swl); if (tmp___2 != 0) { swl = (sw_info_t *)0; } else { tmp___1 = qla2x00_gnn_id(vha, swl); if (tmp___1 != 0) { swl = (sw_info_t *)0; } else if (ql2xiidmaenable != 0) { tmp___0 = qla2x00_gfpn_id(vha, swl); if (tmp___0 == 0) { qla2x00_gpsc(vha, swl); } else { } } else { } } } if ((unsigned long )swl != (unsigned long )((sw_info_t *)0)) { qla2x00_gff_id(vha, swl); } else { } } swl_idx = 0; new_fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )new_fcport == (unsigned long )((fc_port_t *)0)) { ql_log(1U, vha, 8286, "Failed to allocate memory for fcport.\n"); return (259); } else { } new_fcport->flags = new_fcport->flags | 3U; first_dev = 1; last_dev = 0; loop_id = ha->min_external_loopid; goto ldv_61479; ldv_61478: tmp___4 = qla2x00_is_reserved_id(vha, (int )loop_id); if (tmp___4 != 0) { goto ldv_61447; } else { } if ((unsigned int )ha->current_topology == 4U) { tmp___5 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___5 != 0) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_61448; } else { tmp___6 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___6 != 0) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_61448; } else { tmp___7 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___7 != 0) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_61448; } else { tmp___8 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___8 == 2) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_61448; } else { } } } } } else { } if ((unsigned long )swl != (unsigned long )((sw_info_t *)0)) { if (last_dev != 0) { wrap.b24 = new_fcport->d_id.b24; } else { new_fcport->d_id.b24 = (swl + (unsigned long )swl_idx)->d_id.b24; __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& new_fcport->node_name), (void const *)(& (swl + (unsigned long )swl_idx)->node_name), __len); } else { __ret = __builtin_memcpy((void *)(& new_fcport->node_name), (void const *)(& (swl + (unsigned long )swl_idx)->node_name), __len); } __len___0 = 8UL; if (__len___0 > 63UL) { __ret___0 = __memcpy((void *)(& new_fcport->port_name), (void const *)(& (swl + (unsigned long )swl_idx)->port_name), __len___0); } else { __ret___0 = __builtin_memcpy((void *)(& new_fcport->port_name), (void const *)(& (swl + (unsigned long )swl_idx)->port_name), __len___0); } __len___1 = 8UL; if (__len___1 > 63UL) { __ret___1 = __memcpy((void *)(& new_fcport->fabric_port_name), (void const *)(& (swl + (unsigned long )swl_idx)->fabric_port_name), __len___1); } else { __ret___1 = __builtin_memcpy((void *)(& new_fcport->fabric_port_name), (void const *)(& (swl + (unsigned long )swl_idx)->fabric_port_name), __len___1); } new_fcport->fp_speed = (swl + (unsigned long )swl_idx)->fp_speed; new_fcport->fc4_type = (swl + (unsigned long )swl_idx)->fc4_type; if ((unsigned int )(swl + (unsigned long )swl_idx)->d_id.b.rsvd_1 != 0U) { last_dev = 1; } else { } swl_idx = swl_idx + 1; } } else { rval = qla2x00_ga_nxt(vha, new_fcport); if (rval != 0) { ql_log(1U, vha, 8292, "SNS scan failed -- assuming zero-entry result.\n"); __mptr = (struct list_head const *)new_fcports->next; fcport = (fc_port_t *)__mptr; __mptr___0 = (struct list_head const *)fcport->list.next; fcptemp = (fc_port_t *)__mptr___0; goto ldv_61465; ldv_61464: list_del(& fcport->list); kfree((void const *)fcport); fcport = fcptemp; __mptr___1 = (struct list_head const *)fcptemp->list.next; fcptemp = (fc_port_t *)__mptr___1; ldv_61465: ; if ((unsigned long )(& fcport->list) != (unsigned long )new_fcports) { goto ldv_61464; } else { } rval = 0; goto ldv_61448; } else { } } if (first_dev != 0) { wrap.b24 = new_fcport->d_id.b24; first_dev = 0; } else if (new_fcport->d_id.b24 == wrap.b24) { ql_dbg(268435456U, vha, 8293, "Device wrap (%02x%02x%02x).\n", (int )new_fcport->d_id.b.domain, (int )new_fcport->d_id.b.area, (int )new_fcport->d_id.b.al_pa); goto ldv_61448; } else { } if (new_fcport->d_id.b24 == base_vha->d_id.b24) { goto ldv_61447; } else { } tmp___9 = qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24); if (tmp___9 != 0) { goto ldv_61447; } else { } if ((((int )new_fcport->d_id.b24 ^ (int )vha->d_id.b24) & 16776960) == 0 && (unsigned int )ha->current_topology == 4U) { goto ldv_61447; } else { } if (((int )new_fcport->d_id.b.domain & 240) == 240) { goto ldv_61447; } else { } if (ql2xgffidenable != 0 && ((unsigned int )new_fcport->fc4_type != 8U && (unsigned int )new_fcport->fc4_type != 255U)) { goto ldv_61447; } else { } found = 0; __mptr___2 = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr___2; goto ldv_61477; ldv_61476: tmp___10 = memcmp((void const *)(& new_fcport->port_name), (void const *)(& fcport->port_name), 8UL); if (tmp___10 != 0) { goto ldv_61471; } else { } fcport->scan_state = 2U; found = found + 1; __len___2 = 8UL; if (__len___2 > 63UL) { __ret___2 = __memcpy((void *)(& fcport->fabric_port_name), (void const *)(& new_fcport->fabric_port_name), __len___2); } else { __ret___2 = __builtin_memcpy((void *)(& fcport->fabric_port_name), (void const *)(& new_fcport->fabric_port_name), __len___2); } fcport->fp_speed = new_fcport->fp_speed; if (fcport->d_id.b24 == new_fcport->d_id.b24) { tmp___11 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___11 == 4) { goto ldv_61475; } else { } } else { } if ((fcport->flags & 1U) == 0U) { fcport->d_id.b24 = new_fcport->d_id.b24; qla2x00_clear_loop_id(fcport); fcport->flags = fcport->flags | 3U; goto ldv_61475; } else { } fcport->d_id.b24 = new_fcport->d_id.b24; fcport->flags = fcport->flags | 2U; if (((((unsigned int )fcport->loop_id != 4096U && (fcport->flags & 4U) == 0U) && (fcport->flags & 8U) == 0U) && (unsigned int )fcport->port_type != 4U) && (unsigned int )fcport->port_type != 3U) { (*((ha->isp_ops)->fabric_logout))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); qla2x00_clear_loop_id(fcport); } else { } goto ldv_61475; ldv_61471: __mptr___3 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___3; ldv_61477: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_61476; } else { } ldv_61475: ; if (found != 0) { goto ldv_61447; } else { } list_add_tail(& new_fcport->list, new_fcports); nxt_d_id.b24 = new_fcport->d_id.b24; new_fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )new_fcport == (unsigned long )((fc_port_t *)0)) { ql_log(1U, vha, 8294, "Memory allocation failed for fcport.\n"); return (259); } else { } new_fcport->flags = new_fcport->flags | 3U; new_fcport->d_id.b24 = nxt_d_id.b24; ldv_61447: loop_id = (uint16_t )((int )loop_id + 1); ldv_61479: ; if ((int )ha->max_loop_id >= (int )loop_id) { goto ldv_61478; } else { } ldv_61448: kfree((void const *)new_fcport); return (rval); } } int qla2x00_find_new_loop_id(scsi_qla_host_t *vha , fc_port_t *dev ) { int rval ; struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; unsigned long tmp___0 ; int tmp___1 ; { ha = vha->hw; flags = 0UL; rval = 0; tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = find_first_zero_bit((unsigned long const *)ha->loop_id_map, (unsigned long )ha->max_fibre_devices); dev->loop_id = (uint16_t )tmp___0; if ((int )dev->loop_id >= (int )ha->max_fibre_devices) { dev->loop_id = 4096U; rval = 258; } else { tmp___1 = qla2x00_is_reserved_id(vha, (int )dev->loop_id); if (tmp___1 != 0) { dev->loop_id = 4096U; rval = 258; } else { set_bit((long )dev->loop_id, (unsigned long volatile *)ha->loop_id_map); } } spin_unlock_irqrestore(& ha->vport_slock, flags); if (rval == 0) { ql_dbg(268435456U, dev->vha, 8326, "Assigning new loopid=%x, portid=%x.\n", (int )dev->loop_id, (int )dev->d_id.b24); } else { ql_log(1U, dev->vha, 8327, "No loop_id\'s available, portid=%x.\n", (int )dev->d_id.b24); } return (rval); } } static int qla2x00_fabric_dev_login(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t *next_loopid ) { int rval ; int retry ; uint8_t opts ; struct qla_hw_data *ha ; { ha = vha->hw; rval = 0; retry = 0; if ((((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) || (ha->device_type & 134217728U) != 0U) { if ((fcport->flags & 8U) != 0U) { return (rval); } else { } fcport->flags = fcport->flags | 8U; rval = qla2x00_post_async_login_work(vha, fcport, (uint16_t *)0U); if (rval == 0) { return (rval); } else { } } else { } fcport->flags = fcport->flags & 4294967287U; rval = qla2x00_fabric_login(vha, fcport, next_loopid); if (rval == 0) { opts = 0U; if ((fcport->flags & 4U) != 0U) { opts = (uint8_t )((unsigned int )opts | 2U); } else { } rval = qla2x00_get_port_database(vha, fcport, (int )opts); if (rval != 0) { (*((ha->isp_ops)->fabric_logout))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); qla2x00_mark_device_lost(vha, fcport, 1, 0); } else { qla2x00_update_fcport(vha, fcport); } } else { qla2x00_mark_device_lost(vha, fcport, 1, 0); } return (rval); } } int qla2x00_fabric_login(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t *next_loopid ) { int rval ; int retry ; uint16_t tmp_loopid ; uint16_t mb[32U] ; struct qla_hw_data *ha ; { ha = vha->hw; retry = 0; tmp_loopid = 0U; ldv_61510: ql_dbg(268435456U, vha, 8192, "Trying Fabric Login w/loop id 0x%04x for port %02x%02x%02x.\n", (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); rval = (*((ha->isp_ops)->fabric_login))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (uint16_t *)(& mb), 1); if (rval != 0) { return (rval); } else { } if ((unsigned int )mb[0] == 16391U) { retry = retry + 1; tmp_loopid = fcport->loop_id; fcport->loop_id = mb[1]; ql_dbg(268435456U, vha, 8193, "Fabric Login: port in use - next loop id=0x%04x, port id= %02x%02x%02x.\n", (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else if ((unsigned int )mb[0] == 16384U) { if (retry != 0) { *next_loopid = tmp_loopid; } else { *next_loopid = (unsigned int )fcport->loop_id + 1U; } if ((int )mb[1] & 1) { fcport->port_type = 4; } else { fcport->port_type = 5; if (((int )mb[1] & 2) != 0) { fcport->flags = fcport->flags | 4U; } else { } } if ((int )mb[10] & 1) { fcport->supported_classes = fcport->supported_classes | 4U; } else { } if (((int )mb[10] & 2) != 0) { fcport->supported_classes = fcport->supported_classes | 8U; } else { } if ((ha->device_type & 134217728U) != 0U) { if (((int )mb[10] & 128) != 0) { fcport->flags = fcport->flags | 16U; } else { } } else { } rval = 0; goto ldv_61509; } else if ((unsigned int )mb[0] == 16392U) { fcport->loop_id = (uint16_t )((int )fcport->loop_id + 1); rval = qla2x00_find_new_loop_id(vha, fcport); if (rval != 0) { goto ldv_61509; } else { } } else if ((unsigned int )mb[0] == 16389U) { *next_loopid = fcport->loop_id; (*((ha->isp_ops)->fabric_logout))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); qla2x00_mark_device_lost(vha, fcport, 1, 0); rval = 1; goto ldv_61509; } else { ql_dbg(268435456U, vha, 8194, "Failed=%x port_id=%02x%02x%02x loop_id=%x jiffies=%lx.\n", (int )mb[0], (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )fcport->loop_id, jiffies); *next_loopid = fcport->loop_id; (*((ha->isp_ops)->fabric_logout))(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); qla2x00_clear_loop_id(fcport); fcport->login_retry = 0; rval = 3; goto ldv_61509; } goto ldv_61510; ldv_61509: ; return (rval); } } int qla2x00_local_device_login(scsi_qla_host_t *vha , fc_port_t *fcport ) { int rval ; uint16_t mb[32U] ; { memset((void *)(& mb), 0, 64UL); rval = qla2x00_login_local_device(vha, fcport, (uint16_t *)(& mb), 1); if (rval == 0) { if ((unsigned int )mb[0] == 16389U) { rval = 1; } else if ((unsigned int )mb[0] == 16390U) { rval = 3; } else { } } else { } return (rval); } } int qla2x00_loop_resync(scsi_qla_host_t *vha ) { int rval ; uint32_t wait_time ; struct req_que *req ; struct rsp_que *rsp ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { rval = 0; if (*((unsigned long *)vha->hw + 2UL) != 0UL) { req = *((vha->hw)->req_q_map); } else { req = vha->req; } rsp = req->rsp; clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); if (*((unsigned long *)vha + 19UL) != 0UL) { rval = qla2x00_fw_ready(vha); if (rval == 0) { wait_time = 256U; ldv_61524: ; if (((vha->hw)->device_type & 131072U) == 0U) { qla2x00_marker(vha, req, rsp, 0, 0, 2); vha->marker_needed = 0U; } else { } clear_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); if (((vha->hw)->device_type & 131072U) != 0U) { qlafx00_configure_devices(vha); } else { qla2x00_configure_loop(vha); } wait_time = wait_time - 1U; tmp = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp == 0) { tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 == 0) { if (wait_time != 0U) { tmp___1 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { goto ldv_61524; } else { goto ldv_61525; } } else { goto ldv_61525; } } else { goto ldv_61525; } } else { } ldv_61525: ; } else { } } else { } tmp___2 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 != 0) { return (258); } else { } if (rval != 0) { ql_dbg(268435456U, vha, 8300, "%s *** FAILED ***.\n", "qla2x00_loop_resync"); } else { } return (rval); } } int qla2x00_perform_loop_resync(scsi_qla_host_t *ha ) { int32_t rval ; int tmp ; { rval = 0; tmp = test_and_set_bit(5L, (unsigned long volatile *)(& ha->dpc_flags)); if (tmp == 0) { atomic_set(& ha->loop_down_timer, 0); if ((ha->device_flags & 2U) == 0U) { atomic_set(& ha->loop_state, 3); set_bit(6L, (unsigned long volatile *)(& ha->dpc_flags)); set_bit(9L, (unsigned long volatile *)(& ha->dpc_flags)); set_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); rval = qla2x00_loop_resync(ha); } else { atomic_set(& ha->loop_state, 6); } clear_bit(5L, (unsigned long volatile *)(& ha->dpc_flags)); } else { } return (rval); } } void qla2x00_update_fcports(scsi_qla_host_t *base_vha ) { fc_port_t *fcport ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; raw_spinlock_t *tmp___0 ; int tmp___1 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; { ha = base_vha->hw; tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)(base_vha->hw)->vp_list.next; vha = (struct scsi_qla_host *)__mptr; goto ldv_61556; ldv_61555: atomic_inc(& vha->vref_count); __mptr___0 = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr___0; goto ldv_61553; ldv_61552: ; if ((unsigned long )fcport->drport != (unsigned long )((struct fc_rport *)0)) { tmp___1 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___1 != 1) { spin_unlock_irqrestore(& ha->vport_slock, flags); qla2x00_rport_del((void *)fcport); tmp___0 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } } else { } __mptr___1 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___1; ldv_61553: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_61552; } else { } atomic_dec(& vha->vref_count); __mptr___2 = (struct list_head const *)vha->list.next; vha = (struct scsi_qla_host *)__mptr___2; ldv_61556: ; if ((unsigned long )(& vha->list) != (unsigned long )(& (base_vha->hw)->vp_list)) { goto ldv_61555; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); return; } } void qla83xx_reset_ownership(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t drv_presence ; uint32_t drv_presence_mask ; uint32_t dev_part_info1 ; uint32_t dev_part_info2 ; uint32_t class_type ; uint32_t class_type_mask ; uint16_t fcoe_other_function ; uint16_t i ; int tmp ; int tmp___0 ; int tmp___1 ; { ha = vha->hw; class_type_mask = 3U; fcoe_other_function = 65535U; if ((ha->device_type & 262144U) != 0U) { tmp = qla8044_rd_direct(vha, 3U); drv_presence = (uint32_t )tmp; tmp___0 = qla8044_rd_direct(vha, 7U); dev_part_info1 = (uint32_t )tmp___0; tmp___1 = qla8044_rd_direct(vha, 14308U); dev_part_info2 = (uint32_t )tmp___1; } else { qla83xx_rd_reg(vha, 571483016U, & drv_presence); qla83xx_rd_reg(vha, 571483104U, & dev_part_info1); qla83xx_rd_reg(vha, 571483108U, & dev_part_info2); } i = 0U; goto ldv_61572; ldv_61571: class_type = (dev_part_info1 >> (int )i * 4) & class_type_mask; if (class_type == 2U && (int )ha->portnum != (int )i) { fcoe_other_function = i; goto ldv_61570; } else { } i = (uint16_t )((int )i + 1); ldv_61572: ; if ((unsigned int )i <= 7U) { goto ldv_61571; } else { } ldv_61570: ; if ((unsigned int )fcoe_other_function == 65535U) { i = 0U; goto ldv_61575; ldv_61574: class_type = (dev_part_info2 >> (int )i * 4) & class_type_mask; if (class_type == 2U && (int )i + 8 != (int )ha->portnum) { fcoe_other_function = (unsigned int )i + 8U; goto ldv_61573; } else { } i = (uint16_t )((int )i + 1); ldv_61575: ; if ((unsigned int )i <= 7U) { goto ldv_61574; } else { } ldv_61573: ; } else { } drv_presence_mask = (uint32_t )(~ ((1 << (int )ha->portnum) | ((unsigned int )fcoe_other_function != 65535U ? 1 << (int )fcoe_other_function : 0))); if ((drv_presence & drv_presence_mask) == 0U && (int )ha->portnum < (int )fcoe_other_function) { ql_dbg(524288U, vha, 45183, "This host is Reset owner.\n"); ha->flags.nic_core_reset_owner = 1U; } else { } return; } } static int __qla83xx_set_drv_ack(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; uint32_t drv_ack ; { rval = 0; ha = vha->hw; rval = qla83xx_rd_reg(vha, 571483020U, & drv_ack); if (rval == 0) { drv_ack = (uint32_t )(1 << (int )ha->portnum) | drv_ack; rval = qla83xx_wr_reg(vha, 571483020U, drv_ack); } else { } return (rval); } } static int __qla83xx_clear_drv_ack(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; uint32_t drv_ack ; { rval = 0; ha = vha->hw; rval = qla83xx_rd_reg(vha, 571483020U, & drv_ack); if (rval == 0) { drv_ack = (uint32_t )(~ (1 << (int )ha->portnum)) & drv_ack; rval = qla83xx_wr_reg(vha, 571483020U, drv_ack); } else { } return (rval); } } static char const *qla83xx_dev_state_to_string(uint32_t dev_state ) { { switch (dev_state) { case 1U: ; return ("COLD/RE-INIT"); case 2U: ; return ("INITIALIZING"); case 3U: ; return ("READY"); case 4U: ; return ("NEED RESET"); case 5U: ; return ("NEED QUIESCENT"); case 6U: ; return ("FAILED"); case 7U: ; return ("QUIESCENT"); default: ; return ("Unknown"); } } } void qla83xx_idc_audit(scsi_qla_host_t *vha , int audit_type ) { struct qla_hw_data *ha ; uint32_t idc_audit_reg ; uint32_t duration_secs ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; { ha = vha->hw; idc_audit_reg = 0U; duration_secs = 0U; switch (audit_type) { case 0: tmp = jiffies_to_msecs(jiffies); ha->idc_audit_ts = tmp / 1000U; idc_audit_reg = (uint32_t )ha->portnum | (ha->idc_audit_ts << 8); qla83xx_wr_reg(vha, 571483028U, idc_audit_reg); goto ldv_61607; case 1: tmp___0 = jiffies_to_msecs(jiffies); tmp___1 = jiffies_to_msecs((unsigned long const )ha->idc_audit_ts); duration_secs = (tmp___0 - tmp___1) / 1000U; idc_audit_reg = ((unsigned int )ha->portnum | 128U) | (duration_secs << 8); qla83xx_wr_reg(vha, 571483028U, idc_audit_reg); goto ldv_61607; default: ql_log(1U, vha, 45176, "Invalid audit type specified.\n"); goto ldv_61607; } ldv_61607: ; return; } } static int qla83xx_initiating_reset(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t idc_control ; uint32_t dev_state ; char const *state ; char const *tmp ; { ha = vha->hw; __qla83xx_get_idc_control(vha, & idc_control); if ((int )idc_control & 1) { ql_log(2U, vha, 45184, "NIC Core reset has been disabled. idc-control=0x%x\n", idc_control); return (258); } else { } qla83xx_rd_reg(vha, 571483012U, & dev_state); if (*((unsigned long *)ha + 2UL) != 0UL && dev_state == 3U) { qla83xx_wr_reg(vha, 571483012U, 4U); ql_log(2U, vha, 45142, "HW State: NEED RESET.\n"); qla83xx_idc_audit(vha, 0); } else { tmp = qla83xx_dev_state_to_string(dev_state); state = tmp; ql_log(2U, vha, 45143, "HW State: %s.\n", state); goto ldv_61618; ldv_61617: qla83xx_idc_unlock(vha, 0); msleep(200U); qla83xx_idc_lock(vha, 0); qla83xx_rd_reg(vha, 571483012U, & dev_state); ldv_61618: ; if (dev_state == 3U) { goto ldv_61617; } else { } } __qla83xx_set_drv_ack(vha); return (0); } } int __qla83xx_set_idc_control(scsi_qla_host_t *vha , uint32_t idc_control ) { int tmp ; { tmp = qla83xx_wr_reg(vha, 571483024U, idc_control); return (tmp); } } int __qla83xx_get_idc_control(scsi_qla_host_t *vha , uint32_t *idc_control ) { int tmp ; { tmp = qla83xx_rd_reg(vha, 571483024U, idc_control); return (tmp); } } static int qla83xx_check_driver_presence(scsi_qla_host_t *vha ) { uint32_t drv_presence ; struct qla_hw_data *ha ; { drv_presence = 0U; ha = vha->hw; qla83xx_rd_reg(vha, 571483016U, & drv_presence); if (((uint32_t )(1 << (int )ha->portnum) & drv_presence) != 0U) { return (0); } else { return (3); } } } int qla83xx_nic_core_reset(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; int tmp ; { rval = 0; ha = vha->hw; ql_dbg(524288U, vha, 45144, "Entered %s().\n", "qla83xx_nic_core_reset"); if ((vha->device_flags & 32U) != 0U) { ql_log(1U, vha, 45145, "Device in unrecoverable FAILED state.\n"); return (258); } else { } qla83xx_idc_lock(vha, 0); tmp = qla83xx_check_driver_presence(vha); if (tmp != 0) { ql_log(1U, vha, 45146, "Function=0x%x has been removed from IDC participation.\n", (int )ha->portnum); rval = 258; goto exit; } else { } qla83xx_reset_ownership(vha); rval = qla83xx_initiating_reset(vha); if (rval == 0) { rval = qla83xx_idc_state_handler(vha); if (rval == 0) { ha->flags.nic_core_hung = 0U; } else { } __qla83xx_clear_drv_ack(vha); } else { } exit: qla83xx_idc_unlock(vha, 0); ql_dbg(524288U, vha, 45147, "Exiting %s.\n", "qla83xx_nic_core_reset"); return (rval); } } int qla2xxx_mctp_dump(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int rval ; { ha = vha->hw; rval = 258; if ((ha->device_type & 32768U) == 0U || ((int )ha->fw_attributes_ext[0] & 1) == 0) { ql_log(2U, vha, 20589, "This board is not MCTP capable\n"); return (rval); } else { } if ((unsigned long )ha->mctp_dump == (unsigned long )((void *)0)) { ha->mctp_dump = dma_alloc_attrs(& (ha->pdev)->dev, 548964UL, & ha->mctp_dump_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )ha->mctp_dump == (unsigned long )((void *)0)) { ql_log(1U, vha, 20590, "Failed to allocate memory for mctp dump\n"); return (rval); } else { } } else { } rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, 0U, 137241U); if (rval != 0) { ql_log(1U, vha, 20591, "Failed to capture mctp dump\n"); } else { ql_log(2U, vha, 20592, "Mctp dump capture for host (%ld/%p).\n", vha->host_no, ha->mctp_dump); ha->mctp_dumped = 1; } if (*((unsigned long *)ha + 2UL) == 0UL && (unsigned int )ha->portnum == 0U) { ha->flags.nic_core_reset_hdlr_active = 1U; rval = qla83xx_restart_nic_firmware(vha); if (rval != 0) { ql_log(1U, vha, 20593, "Failed to restart nic firmware\n"); } else { ql_dbg(524288U, vha, 45188, "Restarted NIC firmware successfully.\n"); } ha->flags.nic_core_reset_hdlr_active = 0U; } else { } return (rval); } } void qla2x00_quiesce_io(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct scsi_qla_host *vp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp ; int tmp___0 ; { ha = vha->hw; ql_dbg(67108864U, vha, 16413, "Quiescing I/O - ha=%p.\n", ha); atomic_set(& ha->loop_down_timer, 255); tmp___0 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___0 != 2) { atomic_set(& vha->loop_state, 2); qla2x00_mark_all_devices_lost(vha, 0); __mptr = (struct list_head const *)ha->vp_list.next; vp = (struct scsi_qla_host *)__mptr; goto ldv_61655; ldv_61654: qla2x00_mark_all_devices_lost(vp, 0); __mptr___0 = (struct list_head const *)vp->list.next; vp = (struct scsi_qla_host *)__mptr___0; ldv_61655: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_61654; } else { } } else { tmp = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp == 0) { atomic_set(& vha->loop_down_timer, 255); } else { } } qla2x00_eh_wait_for_pending_commands(vha, 0U, 0U, 0); return; } } void qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct scsi_qla_host *vp ; unsigned long flags ; fc_port_t *fcport ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; raw_spinlock_t *tmp___0 ; struct list_head const *__mptr___0 ; int tmp___1 ; int tmp___2 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; raw_spinlock_t *tmp___3 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; raw_spinlock_t *tmp___4 ; struct list_head const *__mptr___6 ; { ha = vha->hw; if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { vha->flags.online = 0U; } else { } ha->flags.chip_reset_done = 0U; clear_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); vha->qla_stats.total_isp_aborts = vha->qla_stats.total_isp_aborts + 1U; ql_log(2U, vha, 175, "Performing ISP error recovery - ha=%p.\n", ha); if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { (*((ha->isp_ops)->reset_chip))(vha); } else { } atomic_set(& vha->loop_down_timer, 255); tmp___2 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___2 != 2) { atomic_set(& vha->loop_state, 2); qla2x00_mark_all_devices_lost(vha, 0); tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)ha->vp_list.next; vp = (struct scsi_qla_host *)__mptr; goto ldv_61675; ldv_61674: atomic_inc(& vp->vref_count); spin_unlock_irqrestore(& ha->vport_slock, flags); qla2x00_mark_all_devices_lost(vp, 0); tmp___0 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___0); atomic_dec(& vp->vref_count); __mptr___0 = (struct list_head const *)vp->list.next; vp = (struct scsi_qla_host *)__mptr___0; ldv_61675: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_61674; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); } else { tmp___1 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___1 == 0) { atomic_set(& vha->loop_down_timer, 255); } else { } } __mptr___1 = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr___1; goto ldv_61682; ldv_61681: fcport->flags = fcport->flags & 4294967285U; __mptr___2 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___2; ldv_61682: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_61681; } else { } tmp___3 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___3); __mptr___3 = (struct list_head const *)ha->vp_list.next; vp = (struct scsi_qla_host *)__mptr___3; goto ldv_61702; ldv_61701: atomic_inc(& vp->vref_count); spin_unlock_irqrestore(& ha->vport_slock, flags); __mptr___4 = (struct list_head const *)vp->vp_fcports.next; fcport = (fc_port_t *)__mptr___4; goto ldv_61696; ldv_61695: fcport->flags = fcport->flags & 4294967285U; __mptr___5 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___5; ldv_61696: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vp->vp_fcports)) { goto ldv_61695; } else { } tmp___4 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___4); atomic_dec(& vp->vref_count); __mptr___6 = (struct list_head const *)vp->list.next; vp = (struct scsi_qla_host *)__mptr___6; ldv_61702: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_61701; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); if (*((unsigned long *)ha + 2UL) == 0UL) { if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { qla82xx_chip_reset_cleanup(vha); ql_log(2U, vha, 180, "Done chip reset cleanup.\n"); vha->flags.online = 0U; } else { } qla2x00_abort_all_cmds(vha, 524288); } else { } return; } } int qla2x00_abort_isp(scsi_qla_host_t *vha ) { int rval ; uint8_t status ; struct qla_hw_data *ha ; struct scsi_qla_host *vp ; struct req_que *req ; unsigned long flags ; int tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; raw_spinlock_t *tmp___6 ; struct list_head const *__mptr ; raw_spinlock_t *tmp___7 ; struct list_head const *__mptr___0 ; int tmp___8 ; { status = 0U; ha = vha->hw; req = *(ha->req_q_map); if (*((unsigned long *)vha + 19UL) != 0UL) { qla2x00_abort_isp_cleanup(vha); if ((ha->device_type & 65536U) != 0U) { ql_dbg(524288U, vha, 45148, "Clearing fcoe driver presence.\n"); tmp = qla83xx_clear_drv_presence(vha); if (tmp != 0) { ql_dbg(524288U, vha, 45171, "Error while clearing DRV-Presence.\n"); } else { } } else { } tmp___0 = pci_channel_offline(ha->pdev); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 0L); if (tmp___1 != 0L) { tmp___2 = ldv__builtin_expect(*((unsigned long *)ha + 2UL) != 0UL, 0L); if (tmp___2 != 0L) { clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); status = 0U; return ((int )status); } else { } } else { } (*((ha->isp_ops)->get_flash_version))(vha, (void *)req->ring); (*((ha->isp_ops)->nvram_config))(vha); tmp___5 = qla2x00_restart_isp(vha); if (tmp___5 == 0) { clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); tmp___3 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___3 == 0) { vha->marker_needed = 1U; } else { } vha->flags.online = 1U; (*((ha->isp_ops)->enable_intrs))(ha); ha->isp_abort_cnt = 0U; clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); if ((ha->device_type & 8192U) != 0U || (ha->device_type & 65536U) != 0U) { qla2x00_get_fw_version(vha); } else { } if ((unsigned long )ha->fce != (unsigned long )((void *)0)) { ha->flags.fce_enabled = 1U; memset(ha->fce, 0, (size_t )(ha->fce_bufs * 1024U)); rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, (int )((uint16_t )ha->fce_bufs), (uint16_t *)(& ha->fce_mb), & ha->fce_bufs); if (rval != 0) { ql_log(1U, vha, 32819, "Unable to reinitialize FCE (%d).\n", rval); ha->flags.fce_enabled = 0U; } else { } } else { } if ((unsigned long )ha->eft != (unsigned long )((void *)0)) { memset(ha->eft, 0, 65536UL); rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, 4); if (rval != 0) { ql_log(1U, vha, 32820, "Unable to reinitialize EFT (%d).\n", rval); } else { } } else { } } else { vha->flags.online = 1U; tmp___4 = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___4 != 0) { if (ha->isp_abort_cnt == 0U) { ql_log(0U, vha, 32821, "ISP error recover failed - board disabled.\n"); (*((ha->isp_ops)->reset_adapter))(vha); vha->flags.online = 0U; clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); status = 0U; } else { ha->isp_abort_cnt = ha->isp_abort_cnt - 1U; ql_dbg(4194304U, vha, 32800, "ISP abort - retry remaining %d.\n", ha->isp_abort_cnt); status = 1U; } } else { ha->isp_abort_cnt = 5U; ql_dbg(4194304U, vha, 32801, "ISP error recovery - retrying (%d) more times.\n", ha->isp_abort_cnt); set_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); status = 1U; } } } else { } if ((unsigned int )status == 0U) { ql_dbg(4194304U, vha, 32802, "%s succeeded.\n", "qla2x00_abort_isp"); tmp___6 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___6); __mptr = (struct list_head const *)ha->vp_list.next; vp = (struct scsi_qla_host *)__mptr; goto ldv_61725; ldv_61724: ; if ((unsigned int )vp->vp_idx != 0U) { atomic_inc(& vp->vref_count); spin_unlock_irqrestore(& ha->vport_slock, flags); qla2x00_vp_abort_isp(vp); tmp___7 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___7); atomic_dec(& vp->vref_count); } else { } __mptr___0 = (struct list_head const *)vp->list.next; vp = (struct scsi_qla_host *)__mptr___0; ldv_61725: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_61724; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); if ((ha->device_type & 65536U) != 0U) { ql_dbg(524288U, vha, 45149, "Setting back fcoe driver presence.\n"); tmp___8 = qla83xx_set_drv_presence(vha); if (tmp___8 != 0) { ql_dbg(524288U, vha, 45172, "Error while setting DRV-Presence.\n"); } else { } } else { } } else { ql_log(1U, vha, 32803, "%s **** FAILED ****.\n", "qla2x00_abort_isp"); } return ((int )status); } } static int qla2x00_restart_isp(scsi_qla_host_t *vha ) { int status ; uint32_t wait_time ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; unsigned long flags ; int tmp ; raw_spinlock_t *tmp___0 ; bool tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { status = 0; ha = vha->hw; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); tmp = qla2x00_isp_firmware(vha); if (tmp != 0) { vha->flags.online = 0U; status = (*((ha->isp_ops)->chip_diag))(vha); if (status == 0) { status = qla2x00_setup_chip(vha); } else { } } else { } if (status == 0) { status = qla2x00_init_rings(vha); if (status == 0) { clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); ha->flags.chip_reset_done = 1U; qla25xx_init_queues(ha); status = qla2x00_fw_ready(vha); if (status == 0) { ql_dbg(4194304U, vha, 32817, "Start configure loop status = %d.\n", status); qla2x00_marker(vha, req, rsp, 0, 0, 2); vha->flags.online = 1U; tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); tmp___1 = qla_tgt_mode_enabled(vha); if ((int )tmp___1) { qlt_24xx_process_atio_queue(vha); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); wait_time = 256U; ldv_61739: clear_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); qla2x00_configure_loop(vha); wait_time = wait_time - 1U; tmp___2 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___2 == 0) { tmp___3 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___3 == 0) { if (wait_time != 0U) { tmp___4 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___4 != 0) { goto ldv_61739; } else { goto ldv_61740; } } else { goto ldv_61740; } } else { goto ldv_61740; } } else { } ldv_61740: ; } else { } if ((vha->device_flags & 2U) != 0U) { status = 0; } else { } ql_dbg(4194304U, vha, 32818, "Configure loop done, status = 0x%x.\n", status); } else { } } else { } return (status); } } static int qla25xx_init_queues(struct qla_hw_data *ha ) { struct rsp_que *rsp ; struct req_que *req ; struct scsi_qla_host *base_vha ; void *tmp ; int ret ; int i ; { rsp = (struct rsp_que *)0; req = (struct req_que *)0; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; ret = -1; i = 1; goto ldv_61751; ldv_61750: rsp = *(ha->rsp_q_map + (unsigned long )i); if ((unsigned long )rsp != (unsigned long )((struct rsp_que *)0)) { rsp->options = (unsigned int )rsp->options & 65534U; ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != 0) { ql_dbg(1073741824U, base_vha, 255, "%s Rsp que: %d init failed.\n", "qla25xx_init_queues", (int )rsp->id); } else { ql_dbg(1073741824U, base_vha, 256, "%s Rsp que: %d inited.\n", "qla25xx_init_queues", (int )rsp->id); } } else { } i = i + 1; ldv_61751: ; if ((int )ha->max_rsp_queues > i) { goto ldv_61750; } else { } i = 1; goto ldv_61754; ldv_61753: req = *(ha->req_q_map + (unsigned long )i); if ((unsigned long )req != (unsigned long )((struct req_que *)0)) { req->options = (unsigned int )req->options & 65534U; ret = qla25xx_init_req_que(base_vha, req); if (ret != 0) { ql_dbg(1073741824U, base_vha, 257, "%s Req que: %d init failed.\n", "qla25xx_init_queues", (int )req->id); } else { ql_dbg(1073741824U, base_vha, 258, "%s Req que: %d inited.\n", "qla25xx_init_queues", (int )req->id); } } else { } i = i + 1; ldv_61754: ; if ((int )ha->max_req_queues > i) { goto ldv_61753; } else { } return (ret); } } void qla2x00_reset_adapter(struct scsi_qla_host *vha ) { unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; { flags = 0UL; ha = vha->hw; reg = & (ha->iobase)->isp; vha->flags.online = 0U; (*((ha->isp_ops)->disable_intrs))(ha); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writew(4096, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); writew(12288, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } void qla24xx_reset_adapter(struct scsi_qla_host *vha ) { unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; raw_spinlock_t *tmp ; { flags = 0UL; ha = vha->hw; reg = & (ha->iobase)->isp24; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return; } else { } vha->flags.online = 0U; (*((ha->isp_ops)->disable_intrs))(ha); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writel(268435456U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); writel(1073741824U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((((ha->device_type & 2048U) != 0U || (ha->device_type & 8192U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) && *((unsigned long *)ha + 2UL) != 0UL) { (*((ha->isp_ops)->enable_intrs))(ha); } else { } return; } } static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha , struct nvram_24xx *nv ) { { return; } } int qla24xx_nvram_config(struct scsi_qla_host *vha ) { int rval ; struct init_cb_24xx *icb ; struct nvram_24xx *nv ; uint32_t *dptr ; uint8_t *dptr1 ; uint8_t *dptr2 ; uint32_t chksum ; uint16_t cnt ; struct qla_hw_data *ha ; uint32_t *tmp ; bool tmp___0 ; int tmp___1 ; uint8_t *tmp___2 ; uint8_t *tmp___3 ; uint16_t tmp___4 ; uint8_t *tmp___5 ; uint8_t *tmp___6 ; uint16_t tmp___7 ; size_t __len ; void *__ret ; size_t __len___0 ; void *__ret___0 ; size_t __len___1 ; void *__ret___1 ; size_t __len___2 ; void *__ret___2 ; size_t __len___3 ; void *__ret___3 ; size_t __len___4 ; void *__ret___4 ; { ha = vha->hw; rval = 0; icb = (struct init_cb_24xx *)ha->init_cb; nv = (struct nvram_24xx *)ha->nvram; if (*((unsigned long *)ha + 2UL) != 0UL) { ha->nvram_base = 128U; ha->vpd_base = 0U; } else { ha->nvram_base = 384U; ha->vpd_base = 256U; } ha->nvram_size = 512U; ha->vpd_size = 512U; ha->vpd = ha->nvram + 2048UL; (*((ha->isp_ops)->read_nvram))(vha, (uint8_t *)ha->vpd, (uint32_t )((int )ha->nvram_base + -128), 2048U); dptr = (uint32_t *)nv; (*((ha->isp_ops)->read_nvram))(vha, (uint8_t *)dptr, (uint32_t )ha->nvram_base, (uint32_t )ha->nvram_size); cnt = 0U; chksum = 0U; goto ldv_61791; ldv_61790: tmp = dptr; dptr = dptr + 1; chksum = *tmp + chksum; cnt = (uint16_t )((int )cnt + 1); ldv_61791: ; if ((int )ha->nvram_size >> 2 > (int )cnt) { goto ldv_61790; } else { } ql_dbg(1073872896U, vha, 106, "Contents of NVRAM\n"); ql_dump_buffer(1073872896U, vha, 269, (uint8_t *)nv, (uint32_t )ha->nvram_size); if (((((chksum != 0U || (unsigned int )nv->id[0] != 73U) || (unsigned int )nv->id[1] != 83U) || (unsigned int )nv->id[2] != 80U) || (unsigned int )nv->id[3] != 32U) || (unsigned int )nv->nvram_version == 0U) { ql_log(1U, vha, 107, "Inconsistent NVRAM detected: checksum=0x%x id=%c version=0x%x.\n", chksum, (int )nv->id[0], (int )nv->nvram_version); ql_log(1U, vha, 108, "Falling back to functioning (yet invalid -- WWPN) defaults.\n"); memset((void *)nv, 0, (size_t )ha->nvram_size); nv->nvram_version = 1U; nv->version = 1U; nv->frame_payload_size = 2048U; nv->execution_throttle = 65535U; nv->exchange_count = 0U; nv->hard_address = 124U; nv->port_name[0] = 33U; nv->port_name[1] = ha->port_no; nv->port_name[2] = 0U; nv->port_name[3] = 224U; nv->port_name[4] = 139U; nv->port_name[5] = 28U; nv->port_name[6] = 85U; nv->port_name[7] = 134U; nv->node_name[0] = 32U; nv->node_name[1] = 0U; nv->node_name[2] = 0U; nv->node_name[3] = 224U; nv->node_name[4] = 139U; nv->node_name[5] = 28U; nv->node_name[6] = 85U; nv->node_name[7] = 134U; qla24xx_nvram_wwn_from_ofw(vha, nv); nv->login_retry_count = 8U; nv->interrupt_delay_timer = 0U; nv->login_timeout = 0U; nv->firmware_options_1 = 24582U; nv->firmware_options_2 = 32U; nv->firmware_options_2 = nv->firmware_options_2 | 4096U; nv->firmware_options_3 = 16384U; nv->host_p = 3072U; nv->efi_parameters = 0U; nv->reset_delay = 5U; nv->max_luns_per_target = 128U; nv->port_down_retry_count = 30U; nv->link_down_timeout = 30U; rval = 1; } else { } tmp___0 = qla_ini_mode_enabled(vha); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { nv->firmware_options_1 = nv->firmware_options_1 & 4294959103U; nv->host_p = nv->host_p & 4294966271U; } else { } qlt_24xx_config_nvram_stage1(vha, nv); memset((void *)icb, 0, (size_t )ha->init_cb_size); dptr1 = (uint8_t *)icb; dptr2 = (uint8_t *)(& nv->version); cnt = 28U; goto ldv_61794; ldv_61793: tmp___2 = dptr1; dptr1 = dptr1 + 1; tmp___3 = dptr2; dptr2 = dptr2 + 1; *tmp___2 = *tmp___3; ldv_61794: tmp___4 = cnt; cnt = (uint16_t )((int )cnt - 1); if ((unsigned int )tmp___4 != 0U) { goto ldv_61793; } else { } icb->login_retry_count = nv->login_retry_count; icb->link_down_on_nos = nv->link_down_on_nos; dptr1 = (uint8_t *)(& icb->interrupt_delay_timer); dptr2 = (uint8_t *)(& nv->interrupt_delay_timer); cnt = 20U; goto ldv_61797; ldv_61796: tmp___5 = dptr1; dptr1 = dptr1 + 1; tmp___6 = dptr2; dptr2 = dptr2 + 1; *tmp___5 = *tmp___6; ldv_61797: tmp___7 = cnt; cnt = (uint16_t )((int )cnt - 1); if ((unsigned int )tmp___7 != 0U) { goto ldv_61796; } else { } qla2x00_set_model_info(vha, (uint8_t *)(& nv->model_name), 16UL, (char *)"QLA2462"); qlt_24xx_config_nvram_stage2(vha, icb); if ((nv->host_p & 32768U) != 0U) { __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& icb->node_name), (void const *)(& nv->alternate_node_name), __len); } else { __ret = __builtin_memcpy((void *)(& icb->node_name), (void const *)(& nv->alternate_node_name), __len); } __len___0 = 8UL; if (__len___0 > 63UL) { __ret___0 = __memcpy((void *)(& icb->port_name), (void const *)(& nv->alternate_port_name), __len___0); } else { __ret___0 = __builtin_memcpy((void *)(& icb->port_name), (void const *)(& nv->alternate_port_name), __len___0); } } else { } if ((icb->firmware_options_1 & 16384U) == 0U) { __len___1 = 8UL; if (__len___1 > 63UL) { __ret___1 = __memcpy((void *)(& icb->node_name), (void const *)(& icb->port_name), __len___1); } else { __ret___1 = __builtin_memcpy((void *)(& icb->node_name), (void const *)(& icb->port_name), __len___1); } icb->node_name[0] = (unsigned int )icb->node_name[0] & 240U; } else { } ha->flags.disable_risc_code_load = 0U; ha->flags.enable_lip_reset = 0U; ha->flags.enable_lip_full_login = (nv->host_p & 1024U) != 0U; ha->flags.enable_target_reset = (nv->host_p & 2048U) != 0U; ha->flags.enable_led_scheme = 0U; ha->flags.disable_serdes = (nv->host_p & 32U) != 0U; ha->operating_mode = (uint8_t )((icb->firmware_options_2 & 112U) >> 4); __len___2 = 8UL; if (__len___2 > 63UL) { __ret___2 = __memcpy((void *)(& ha->fw_seriallink_options24), (void const *)(& nv->seriallink_options), __len___2); } else { __ret___2 = __builtin_memcpy((void *)(& ha->fw_seriallink_options24), (void const *)(& nv->seriallink_options), __len___2); } ha->serial0 = icb->port_name[5]; ha->serial1 = icb->port_name[6]; ha->serial2 = icb->port_name[7]; __len___3 = 8UL; if (__len___3 > 63UL) { __ret___3 = __memcpy((void *)(& vha->node_name), (void const *)(& icb->node_name), __len___3); } else { __ret___3 = __builtin_memcpy((void *)(& vha->node_name), (void const *)(& icb->node_name), __len___3); } __len___4 = 8UL; if (__len___4 > 63UL) { __ret___4 = __memcpy((void *)(& vha->port_name), (void const *)(& icb->port_name), __len___4); } else { __ret___4 = __builtin_memcpy((void *)(& vha->port_name), (void const *)(& icb->port_name), __len___4); } icb->execution_throttle = 65535U; ha->retry_count = (uint8_t )nv->login_retry_count; if ((int )nv->login_timeout < ql2xlogintimeout) { nv->login_timeout = (unsigned short )ql2xlogintimeout; } else { } if ((unsigned int )nv->login_timeout <= 3U) { nv->login_timeout = 4U; } else { } ha->login_timeout = (uint8_t )nv->login_timeout; icb->login_timeout = nv->login_timeout; ha->r_a_tov = 100U; ha->loop_reset_delay = (uint16_t )nv->reset_delay; if ((unsigned int )nv->link_down_timeout == 0U) { ha->loop_down_abort_time = 195U; } else { ha->link_down_timeout = (uint8_t )nv->link_down_timeout; ha->loop_down_abort_time = ~ ((int )ha->link_down_timeout); } ha->port_down_retry_count = (int )nv->port_down_retry_count; if (qlport_down_retry != 0) { ha->port_down_retry_count = qlport_down_retry; } else { } ha->login_retry_count = (uint32_t )nv->login_retry_count; if (ha->port_down_retry_count == (int )nv->port_down_retry_count && ha->port_down_retry_count > 3) { ha->login_retry_count = (uint32_t )ha->port_down_retry_count; } else if (ha->port_down_retry_count > (int )ha->login_retry_count) { ha->login_retry_count = (uint32_t )ha->port_down_retry_count; } else { } if (ql2xloginretrycount != 0) { ha->login_retry_count = (uint32_t )ql2xloginretrycount; } else { } if (*((unsigned long *)vha + 19UL) == 0UL) { ha->zio_mode = (unsigned int )((uint16_t )icb->firmware_options_2) & 15U; ha->zio_timer = (unsigned int )icb->interrupt_delay_timer != 0U ? icb->interrupt_delay_timer : 2U; } else { } icb->firmware_options_2 = icb->firmware_options_2 & 4294967280U; vha->flags.process_response_queue = 0U; if ((unsigned int )ha->zio_mode != 0U) { ha->zio_mode = 6U; ql_log(2U, vha, 111, "ZIO mode %d enabled; timer delay (%d us).\n", (int )ha->zio_mode, (int )ha->zio_timer * 100); icb->firmware_options_2 = icb->firmware_options_2 | (uint32_t )ha->zio_mode; icb->interrupt_delay_timer = ha->zio_timer; vha->flags.process_response_queue = 1U; } else { } if (rval != 0) { ql_log(1U, vha, 112, "NVRAM configuration failed.\n"); } else { } return (rval); } } static int qla24xx_load_risc_flash(scsi_qla_host_t *vha , uint32_t *srisc_addr , uint32_t faddr ) { int rval ; int segments ; int fragment ; uint32_t *dcode ; uint32_t dlen ; uint32_t risc_addr ; uint32_t risc_size ; uint32_t i ; struct qla_hw_data *ha ; struct req_que *req ; __u32 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; { rval = 0; ha = vha->hw; req = *(ha->req_q_map); ql_dbg(1073741824U, vha, 139, "FW: Loading firmware from flash (%x).\n", faddr); rval = 0; segments = 2; dcode = (uint32_t *)req->ring; *srisc_addr = 0U; qla24xx_read_flash_data(vha, dcode, faddr + 4U, 4U); i = 0U; goto ldv_61833; ldv_61832: tmp = __fswab32(*(dcode + (unsigned long )i)); *(dcode + (unsigned long )i) = tmp; i = i + 1U; ldv_61833: ; if (i <= 3U) { goto ldv_61832; } else { } if ((((*dcode == 4294967295U && *(dcode + 1UL) == 4294967295U) && *(dcode + 2UL) == 4294967295U) && *(dcode + 3UL) == 4294967295U) || (((*dcode == 0U && *(dcode + 1UL) == 0U) && *(dcode + 2UL) == 0U) && *(dcode + 3UL) == 0U)) { ql_log(0U, vha, 140, "Unable to verify the integrity of flash firmware image.\n"); ql_log(0U, vha, 141, "Firmware data: %08x %08x %08x %08x.\n", *dcode, *(dcode + 1UL), *(dcode + 2UL), *(dcode + 3UL)); return (258); } else { } goto ldv_61842; ldv_61841: qla24xx_read_flash_data(vha, dcode, faddr, 4U); tmp___0 = __fswab32(*(dcode + 2UL)); risc_addr = tmp___0; *srisc_addr = *srisc_addr != 0U ? *srisc_addr : risc_addr; tmp___1 = __fswab32(*(dcode + 3UL)); risc_size = tmp___1; fragment = 0; goto ldv_61840; ldv_61839: dlen = ha->fw_transfer_size >> 2; if (dlen > risc_size) { dlen = risc_size; } else { } ql_dbg(1073741824U, vha, 142, "Loading risc segment@ risc addr %x number of dwords 0x%x offset 0x%x.\n", risc_addr, dlen, faddr); qla24xx_read_flash_data(vha, dcode, faddr, dlen); i = 0U; goto ldv_61836; ldv_61835: tmp___2 = __fswab32(*(dcode + (unsigned long )i)); *(dcode + (unsigned long )i) = tmp___2; i = i + 1U; ldv_61836: ; if (i < dlen) { goto ldv_61835; } else { } rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); if (rval != 0) { ql_log(0U, vha, 143, "Failed to load segment %d of firmware.\n", fragment); goto ldv_61838; } else { } faddr = faddr + dlen; risc_addr = risc_addr + dlen; risc_size = risc_size - dlen; fragment = fragment + 1; ldv_61840: ; if (risc_size != 0U && rval == 0) { goto ldv_61839; } else { } ldv_61838: segments = segments - 1; ldv_61842: ; if (segments != 0 && rval == 0) { goto ldv_61841; } else { } return (rval); } } int qla2x00_load_risc(struct scsi_qla_host *vha , uint32_t *srisc_addr ) { int rval ; int i ; int fragment ; uint16_t *wcode ; uint16_t *fwcode ; uint32_t risc_addr ; uint32_t risc_size ; uint32_t fwclen ; uint32_t wlen ; uint32_t *seg ; struct fw_blob *blob ; struct qla_hw_data *ha ; struct req_que *req ; __u16 tmp ; __u16 tmp___0 ; __u16 tmp___1 ; { ha = vha->hw; req = *(ha->req_q_map); blob = qla2x00_request_firmware(vha); if ((unsigned long )blob == (unsigned long )((struct fw_blob *)0)) { ql_log(2U, vha, 131, "Fimware image unavailable.\n"); ql_log(2U, vha, 132, "Firmware images can be retrieved from: http://ldriver.qlogic.com/firmware/.\n"); return (258); } else { } rval = 0; wcode = (uint16_t *)req->ring; *srisc_addr = 0U; fwcode = (uint16_t *)(blob->fw)->data; fwclen = 0U; if ((unsigned long )(blob->fw)->size <= 15UL) { ql_log(0U, vha, 133, "Unable to verify integrity of firmware image (%Zd).\n", (blob->fw)->size); goto fail_fw_integrity; } else { } i = 0; goto ldv_61863; ldv_61862: tmp = __fswab16((int )*(fwcode + ((unsigned long )i + 4UL))); *(wcode + (unsigned long )i) = tmp; i = i + 1; ldv_61863: ; if (i <= 3) { goto ldv_61862; } else { } if (((((unsigned int )*wcode == 65535U && (unsigned int )*(wcode + 1UL) == 65535U) && (unsigned int )*(wcode + 2UL) == 65535U) && (unsigned int )*(wcode + 3UL) == 65535U) || ((((unsigned int )*wcode == 0U && (unsigned int )*(wcode + 1UL) == 0U) && (unsigned int )*(wcode + 2UL) == 0U) && (unsigned int )*(wcode + 3UL) == 0U)) { ql_log(0U, vha, 134, "Unable to verify integrity of firmware image.\n"); ql_log(0U, vha, 135, "Firmware data: %04x %04x %04x %04x.\n", (int )*wcode, (int )*(wcode + 1UL), (int )*(wcode + 2UL), (int )*(wcode + 3UL)); goto fail_fw_integrity; } else { } seg = (uint32_t *)(& blob->segs); goto ldv_61872; ldv_61871: risc_addr = *seg; *srisc_addr = *srisc_addr == 0U ? *seg : *srisc_addr; tmp___0 = __fswab16((int )*(fwcode + 3UL)); risc_size = (uint32_t )tmp___0; fwclen = risc_size * 2U + fwclen; if ((unsigned long )(blob->fw)->size < (unsigned long )fwclen) { ql_log(0U, vha, 136, "Unable to verify integrity of firmware image (%Zd).\n", (blob->fw)->size); goto fail_fw_integrity; } else { } fragment = 0; goto ldv_61870; ldv_61869: wlen = (uint32_t )((unsigned short )(ha->fw_transfer_size >> 1)); if (wlen > risc_size) { wlen = risc_size; } else { } ql_dbg(1073741824U, vha, 137, "Loading risc segment@ risc addr %x number of words 0x%x.\n", risc_addr, wlen); i = 0; goto ldv_61866; ldv_61865: tmp___1 = __fswab16((int )*(fwcode + (unsigned long )i)); *(wcode + (unsigned long )i) = tmp___1; i = i + 1; ldv_61866: ; if ((uint32_t )i < wlen) { goto ldv_61865; } else { } rval = qla2x00_load_ram(vha, req->dma, risc_addr, wlen); if (rval != 0) { ql_log(0U, vha, 138, "Failed to load segment %d of firmware.\n", fragment); goto ldv_61868; } else { } fwcode = fwcode + (unsigned long )wlen; risc_addr = risc_addr + wlen; risc_size = risc_size - wlen; fragment = fragment + 1; ldv_61870: ; if (risc_size != 0U && rval == 0) { goto ldv_61869; } else { } ldv_61868: seg = seg + 1; ldv_61872: ; if (*seg != 0U && rval == 0) { goto ldv_61871; } else { } return (rval); fail_fw_integrity: ; return (258); } } static int qla24xx_load_risc_blob(scsi_qla_host_t *vha , uint32_t *srisc_addr ) { int rval ; int segments ; int fragment ; uint32_t *dcode ; uint32_t dlen ; uint32_t risc_addr ; uint32_t risc_size ; uint32_t i ; struct fw_blob *blob ; uint32_t *fwcode ; uint32_t fwclen ; struct qla_hw_data *ha ; struct req_que *req ; __u32 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; { ha = vha->hw; req = *(ha->req_q_map); blob = qla2x00_request_firmware(vha); if ((unsigned long )blob == (unsigned long )((struct fw_blob *)0)) { ql_log(1U, vha, 144, "Fimware image unavailable.\n"); ql_log(1U, vha, 145, "Firmware images can be retrieved from: http://ldriver.qlogic.com/firmware/.\n"); return (258); } else { } ql_dbg(1073741824U, vha, 146, "FW: Loading via request-firmware.\n"); rval = 0; segments = 2; dcode = (uint32_t *)req->ring; *srisc_addr = 0U; fwcode = (uint32_t *)(blob->fw)->data; fwclen = 0U; if ((unsigned long )(blob->fw)->size <= 31UL) { ql_log(0U, vha, 147, "Unable to verify integrity of firmware image (%Zd).\n", (blob->fw)->size); goto fail_fw_integrity; } else { } i = 0U; goto ldv_61893; ldv_61892: tmp = __fswab32(*(fwcode + (unsigned long )(i + 4U))); *(dcode + (unsigned long )i) = tmp; i = i + 1U; ldv_61893: ; if (i <= 3U) { goto ldv_61892; } else { } if ((((*dcode == 4294967295U && *(dcode + 1UL) == 4294967295U) && *(dcode + 2UL) == 4294967295U) && *(dcode + 3UL) == 4294967295U) || (((*dcode == 0U && *(dcode + 1UL) == 0U) && *(dcode + 2UL) == 0U) && *(dcode + 3UL) == 0U)) { ql_log(0U, vha, 148, "Unable to verify integrity of firmware image (%Zd).\n", (blob->fw)->size); ql_log(0U, vha, 149, "Firmware data: %08x %08x %08x %08x.\n", *dcode, *(dcode + 1UL), *(dcode + 2UL), *(dcode + 3UL)); goto fail_fw_integrity; } else { } goto ldv_61902; ldv_61901: tmp___0 = __fswab32(*(fwcode + 2UL)); risc_addr = tmp___0; *srisc_addr = *srisc_addr != 0U ? *srisc_addr : risc_addr; tmp___1 = __fswab32(*(fwcode + 3UL)); risc_size = tmp___1; fwclen = risc_size * 4U + fwclen; if ((unsigned long )(blob->fw)->size < (unsigned long )fwclen) { ql_log(0U, vha, 150, "Unable to verify integrity of firmware image (%Zd).\n", (blob->fw)->size); goto fail_fw_integrity; } else { } fragment = 0; goto ldv_61900; ldv_61899: dlen = ha->fw_transfer_size >> 2; if (dlen > risc_size) { dlen = risc_size; } else { } ql_dbg(1073741824U, vha, 151, "Loading risc segment@ risc addr %x number of dwords 0x%x.\n", risc_addr, dlen); i = 0U; goto ldv_61896; ldv_61895: tmp___2 = __fswab32(*(fwcode + (unsigned long )i)); *(dcode + (unsigned long )i) = tmp___2; i = i + 1U; ldv_61896: ; if (i < dlen) { goto ldv_61895; } else { } rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); if (rval != 0) { ql_log(0U, vha, 152, "Failed to load segment %d of firmware.\n", fragment); goto ldv_61898; } else { } fwcode = fwcode + (unsigned long )dlen; risc_addr = risc_addr + dlen; risc_size = risc_size - dlen; fragment = fragment + 1; ldv_61900: ; if (risc_size != 0U && rval == 0) { goto ldv_61899; } else { } ldv_61898: segments = segments - 1; ldv_61902: ; if (segments != 0 && rval == 0) { goto ldv_61901; } else { } return (rval); fail_fw_integrity: ; return (258); } } int qla24xx_load_risc(scsi_qla_host_t *vha , uint32_t *srisc_addr ) { int rval ; int tmp ; int tmp___0 ; { if (ql2xfwloadbin == 1) { tmp = qla81xx_load_risc(vha, srisc_addr); return (tmp); } else { } rval = qla24xx_load_risc_blob(vha, srisc_addr); if (rval == 0) { return (rval); } else { } tmp___0 = qla24xx_load_risc_flash(vha, srisc_addr, (vha->hw)->flt_region_fw); return (tmp___0); } } int qla81xx_load_risc(scsi_qla_host_t *vha , uint32_t *srisc_addr ) { int rval ; struct qla_hw_data *ha ; { ha = vha->hw; if (ql2xfwloadbin == 2) { goto try_blob_fw; } else { } rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); if (rval == 0) { return (rval); } else { } try_blob_fw: rval = qla24xx_load_risc_blob(vha, srisc_addr); if (rval == 0 || ha->flt_region_gold_fw == 0U) { return (rval); } else { } ql_log(2U, vha, 153, "Attempting to fallback to golden firmware.\n"); rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); if (rval != 0) { return (rval); } else { } ql_log(2U, vha, 154, "Update operational firmware.\n"); ha->flags.running_gold_fw = 1U; return (rval); } } void qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha ) { int ret ; int retries ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = vha->hw; if (*((unsigned long *)ha + 2UL) != 0UL) { return; } else { } if ((ha->device_type & 134217728U) == 0U) { return; } else { } if ((unsigned int )ha->fw_major_version == 0U) { return; } else { } ret = qla2x00_stop_firmware(vha); retries = 5; goto ldv_61924; ldv_61923: (*((ha->isp_ops)->reset_chip))(vha); tmp = (*((ha->isp_ops)->chip_diag))(vha); if (tmp != 0) { goto ldv_61922; } else { } tmp___0 = qla2x00_setup_chip(vha); if (tmp___0 != 0) { goto ldv_61922; } else { } ql_log(2U, vha, 32789, "Attempting retry of stop-firmware command.\n"); ret = qla2x00_stop_firmware(vha); ldv_61922: retries = retries - 1; ldv_61924: ; if (((ret != 0 && ret != 256) && ret != 1) && retries != 0) { goto ldv_61923; } else { } return; } } int qla24xx_configure_vhba(scsi_qla_host_t *vha ) { int rval ; int rval2 ; uint16_t mb[32U] ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp ; struct req_que *req ; struct rsp_que *rsp ; { rval = 0; ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; if ((unsigned int )vha->vp_idx == 0U) { return (-22); } else { } rval = qla2x00_fw_ready(base_vha); if (*((unsigned long *)ha + 2UL) != 0UL) { req = *(ha->req_q_map); } else { req = vha->req; } rsp = req->rsp; if (rval == 0) { clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); qla2x00_marker(vha, req, rsp, 0, 0, 2); } else { } vha->flags.management_server_logged_in = 0U; rval2 = (*((ha->isp_ops)->fabric_login))(vha, 2044, 255, 255, 252, (uint16_t *)(& mb), 2); if (rval2 != 0 || (unsigned int )mb[0] != 16384U) { if (rval2 == 259) { ql_dbg(1073741824U, vha, 288, "Failed SNS login: loop_id=%x, rval2=%d\n", 2044, rval2); } else { ql_dbg(1073741824U, vha, 259, "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", 2044, (int )mb[0], (int )mb[1], (int )mb[2], (int )mb[6], (int )mb[7]); } return (258); } else { } atomic_set(& vha->loop_down_timer, 0); atomic_set(& vha->loop_state, 3); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); rval = qla2x00_loop_resync(base_vha); return (rval); } } static struct list_head qla_cs84xx_list = {& qla_cs84xx_list, & qla_cs84xx_list}; static struct mutex qla_cs84xx_mutex = {{1}, {{{{{0U}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "qla_cs84xx_mutex.wait_lock", 0, 0UL}}}}, {& qla_cs84xx_mutex.wait_list, & qla_cs84xx_mutex.wait_list}, 0, 0, (void *)(& qla_cs84xx_mutex), {0, {0, 0}, "qla_cs84xx_mutex", 0, 0UL}}; static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *vha ) { struct qla_chip_state_84xx *cs84xx ; struct qla_hw_data *ha ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; void *tmp ; struct lock_class_key __key ; struct lock_class_key __key___0 ; { ha = vha->hw; mutex_lock_nested(& qla_cs84xx_mutex, 0U); __mptr = (struct list_head const *)qla_cs84xx_list.next; cs84xx = (struct qla_chip_state_84xx *)__mptr; goto ldv_61950; ldv_61949: ; if ((unsigned long )cs84xx->bus == (unsigned long )((void *)(ha->pdev)->bus)) { kref_get(& cs84xx->kref); goto done; } else { } __mptr___0 = (struct list_head const *)cs84xx->list.next; cs84xx = (struct qla_chip_state_84xx *)__mptr___0; ldv_61950: ; if ((unsigned long )(& cs84xx->list) != (unsigned long )(& qla_cs84xx_list)) { goto ldv_61949; } else { } tmp = kzalloc(296UL, 208U); cs84xx = (struct qla_chip_state_84xx *)tmp; if ((unsigned long )cs84xx == (unsigned long )((struct qla_chip_state_84xx *)0)) { goto done; } else { } kref_init(& cs84xx->kref); spinlock_check(& cs84xx->access_lock); __raw_spin_lock_init(& cs84xx->access_lock.ldv_6105.rlock, "&(&cs84xx->access_lock)->rlock", & __key); __mutex_init(& cs84xx->fw_update_mutex, "&cs84xx->fw_update_mutex", & __key___0); cs84xx->bus = (void *)(ha->pdev)->bus; list_add_tail(& cs84xx->list, & qla_cs84xx_list); done: mutex_unlock(& qla_cs84xx_mutex); return (cs84xx); } } static void __qla84xx_chip_release(struct kref *kref ) { struct qla_chip_state_84xx *cs84xx ; struct kref const *__mptr ; { __mptr = (struct kref const *)kref; cs84xx = (struct qla_chip_state_84xx *)__mptr + 0xfffffffffffffff0UL; mutex_lock_nested(& qla_cs84xx_mutex, 0U); list_del(& cs84xx->list); mutex_unlock(& qla_cs84xx_mutex); kfree((void const *)cs84xx); return; } } void qla84xx_put_chip(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((unsigned long )ha->cs84xx != (unsigned long )((struct qla_chip_state_84xx *)0)) { kref_put(& (ha->cs84xx)->kref, & __qla84xx_chip_release); } else { } return; } } static int qla84xx_init_chip(scsi_qla_host_t *vha ) { int rval ; uint16_t status[2U] ; struct qla_hw_data *ha ; { ha = vha->hw; mutex_lock_nested(& (ha->cs84xx)->fw_update_mutex, 0U); rval = qla84xx_verify_chip(vha, (uint16_t *)(& status)); mutex_unlock(& (ha->cs84xx)->fw_update_mutex); return (rval != 0 || (unsigned int )status[0] != 0U ? 258 : 0); } } int qla81xx_nvram_config(struct scsi_qla_host *vha ) { int rval ; struct init_cb_81xx *icb ; struct nvram_81xx *nv ; uint32_t *dptr ; uint8_t *dptr1 ; uint8_t *dptr2 ; uint32_t chksum ; uint16_t cnt ; struct qla_hw_data *ha ; uint32_t *tmp ; uint8_t *tmp___0 ; uint8_t *tmp___1 ; uint16_t tmp___2 ; uint8_t *tmp___3 ; uint8_t *tmp___4 ; uint16_t tmp___5 ; size_t __len ; void *__ret ; int tmp___6 ; size_t __len___0 ; void *__ret___0 ; size_t __len___1 ; void *__ret___1 ; size_t __len___2 ; void *__ret___2 ; size_t __len___3 ; void *__ret___3 ; size_t __len___4 ; void *__ret___4 ; size_t __len___5 ; void *__ret___5 ; { ha = vha->hw; rval = 0; icb = (struct init_cb_81xx *)ha->init_cb; nv = (struct nvram_81xx *)ha->nvram; ha->nvram_size = 512U; ha->vpd_size = 512U; if (((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) || (ha->device_type & 65536U) != 0U) { ha->vpd_size = 1024U; } else { } ha->vpd = ha->nvram + 2048UL; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->vpd, ha->flt_region_vpd << 2, (uint32_t )ha->vpd_size); (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->nvram, ha->flt_region_nvram << 2, (uint32_t )ha->nvram_size); dptr = (uint32_t *)nv; cnt = 0U; chksum = 0U; goto ldv_61983; ldv_61982: tmp = dptr; dptr = dptr + 1; chksum = *tmp + chksum; cnt = (uint16_t )((int )cnt + 1); ldv_61983: ; if ((int )ha->nvram_size >> 2 > (int )cnt) { goto ldv_61982; } else { } ql_dbg(1073872896U, vha, 273, "Contents of NVRAM:\n"); ql_dump_buffer(1073872896U, vha, 274, (uint8_t *)nv, (uint32_t )ha->nvram_size); if (((((chksum != 0U || (unsigned int )nv->id[0] != 73U) || (unsigned int )nv->id[1] != 83U) || (unsigned int )nv->id[2] != 80U) || (unsigned int )nv->id[3] != 32U) || (unsigned int )nv->nvram_version == 0U) { ql_log(2U, vha, 115, "Inconsistent NVRAM detected: checksum=0x%x id=%c version=0x%x.\n", chksum, (int )nv->id[0], (int )nv->nvram_version); ql_log(2U, vha, 116, "Falling back to functioning (yet invalid -- WWPN) defaults.\n"); memset((void *)nv, 0, (size_t )ha->nvram_size); nv->nvram_version = 1U; nv->version = 1U; nv->frame_payload_size = 2048U; nv->execution_throttle = 65535U; nv->exchange_count = 0U; nv->port_name[0] = 33U; nv->port_name[1] = ha->port_no; nv->port_name[2] = 0U; nv->port_name[3] = 224U; nv->port_name[4] = 139U; nv->port_name[5] = 28U; nv->port_name[6] = 85U; nv->port_name[7] = 134U; nv->node_name[0] = 32U; nv->node_name[1] = 0U; nv->node_name[2] = 0U; nv->node_name[3] = 224U; nv->node_name[4] = 139U; nv->node_name[5] = 28U; nv->node_name[6] = 85U; nv->node_name[7] = 134U; nv->login_retry_count = 8U; nv->interrupt_delay_timer = 0U; nv->login_timeout = 0U; nv->firmware_options_1 = 24582U; nv->firmware_options_2 = 32U; nv->firmware_options_2 = nv->firmware_options_2 | 4096U; nv->firmware_options_3 = 16384U; nv->host_p = 3072U; nv->efi_parameters = 0U; nv->reset_delay = 5U; nv->max_luns_per_target = 128U; nv->port_down_retry_count = 30U; nv->link_down_timeout = 180U; nv->enode_mac[0] = 0U; nv->enode_mac[1] = 192U; nv->enode_mac[2] = 221U; nv->enode_mac[3] = 4U; nv->enode_mac[4] = 5U; nv->enode_mac[5] = (unsigned int )ha->port_no + 6U; rval = 1; } else { } if ((ha->device_type & 33554432U) != 0U) { nv->frame_payload_size = (unsigned int )nv->frame_payload_size & 65528U; } else { } qlt_81xx_config_nvram_stage1(vha, nv); memset((void *)icb, 0, (size_t )ha->init_cb_size); dptr1 = (uint8_t *)icb; dptr2 = (uint8_t *)(& nv->version); cnt = 28U; goto ldv_61986; ldv_61985: tmp___0 = dptr1; dptr1 = dptr1 + 1; tmp___1 = dptr2; dptr2 = dptr2 + 1; *tmp___0 = *tmp___1; ldv_61986: tmp___2 = cnt; cnt = (uint16_t )((int )cnt - 1); if ((unsigned int )tmp___2 != 0U) { goto ldv_61985; } else { } icb->login_retry_count = nv->login_retry_count; dptr1 = (uint8_t *)(& icb->interrupt_delay_timer); dptr2 = (uint8_t *)(& nv->interrupt_delay_timer); cnt = 16U; goto ldv_61989; ldv_61988: tmp___3 = dptr1; dptr1 = dptr1 + 1; tmp___4 = dptr2; dptr2 = dptr2 + 1; *tmp___3 = *tmp___4; ldv_61989: tmp___5 = cnt; cnt = (uint16_t )((int )cnt - 1); if ((unsigned int )tmp___5 != 0U) { goto ldv_61988; } else { } __len = 6UL; if (__len > 63UL) { __ret = __memcpy((void *)(& icb->enode_mac), (void const *)(& nv->enode_mac), __len); } else { __ret = __builtin_memcpy((void *)(& icb->enode_mac), (void const *)(& nv->enode_mac), __len); } tmp___6 = memcmp((void const *)(& icb->enode_mac), (void const *)"", 6UL); if (tmp___6 == 0) { icb->enode_mac[0] = 0U; icb->enode_mac[1] = 192U; icb->enode_mac[2] = 221U; icb->enode_mac[3] = 4U; icb->enode_mac[4] = 5U; icb->enode_mac[5] = (unsigned int )ha->port_no + 6U; } else { } __len___0 = 64UL; if (__len___0 > 63UL) { __ret___0 = __memcpy((void *)ha->ex_init_cb, (void const *)(& nv->ex_version), __len___0); } else { __ret___0 = __builtin_memcpy((void *)ha->ex_init_cb, (void const *)(& nv->ex_version), __len___0); } qla2x00_set_model_info(vha, (uint8_t *)(& nv->model_name), 16UL, (char *)"QLE8XXX"); qlt_81xx_config_nvram_stage2(vha, icb); if ((nv->host_p & 32768U) != 0U) { __len___1 = 8UL; if (__len___1 > 63UL) { __ret___1 = __memcpy((void *)(& icb->node_name), (void const *)(& nv->alternate_node_name), __len___1); } else { __ret___1 = __builtin_memcpy((void *)(& icb->node_name), (void const *)(& nv->alternate_node_name), __len___1); } __len___2 = 8UL; if (__len___2 > 63UL) { __ret___2 = __memcpy((void *)(& icb->port_name), (void const *)(& nv->alternate_port_name), __len___2); } else { __ret___2 = __builtin_memcpy((void *)(& icb->port_name), (void const *)(& nv->alternate_port_name), __len___2); } } else { } if ((icb->firmware_options_1 & 16384U) == 0U) { __len___3 = 8UL; if (__len___3 > 63UL) { __ret___3 = __memcpy((void *)(& icb->node_name), (void const *)(& icb->port_name), __len___3); } else { __ret___3 = __builtin_memcpy((void *)(& icb->node_name), (void const *)(& icb->port_name), __len___3); } icb->node_name[0] = (unsigned int )icb->node_name[0] & 240U; } else { } ha->flags.disable_risc_code_load = 0U; ha->flags.enable_lip_reset = 0U; ha->flags.enable_lip_full_login = (nv->host_p & 1024U) != 0U; ha->flags.enable_target_reset = (nv->host_p & 2048U) != 0U; ha->flags.enable_led_scheme = 0U; ha->flags.disable_serdes = (nv->host_p & 32U) != 0U; ha->operating_mode = (uint8_t )((icb->firmware_options_2 & 112U) >> 4); ha->serial0 = icb->port_name[5]; ha->serial1 = icb->port_name[6]; ha->serial2 = icb->port_name[7]; __len___4 = 8UL; if (__len___4 > 63UL) { __ret___4 = __memcpy((void *)(& vha->node_name), (void const *)(& icb->node_name), __len___4); } else { __ret___4 = __builtin_memcpy((void *)(& vha->node_name), (void const *)(& icb->node_name), __len___4); } __len___5 = 8UL; if (__len___5 > 63UL) { __ret___5 = __memcpy((void *)(& vha->port_name), (void const *)(& icb->port_name), __len___5); } else { __ret___5 = __builtin_memcpy((void *)(& vha->port_name), (void const *)(& icb->port_name), __len___5); } icb->execution_throttle = 65535U; ha->retry_count = (uint8_t )nv->login_retry_count; if ((int )nv->login_timeout < ql2xlogintimeout) { nv->login_timeout = (unsigned short )ql2xlogintimeout; } else { } if ((unsigned int )nv->login_timeout <= 3U) { nv->login_timeout = 4U; } else { } ha->login_timeout = (uint8_t )nv->login_timeout; icb->login_timeout = nv->login_timeout; ha->r_a_tov = 100U; ha->loop_reset_delay = (uint16_t )nv->reset_delay; if ((unsigned int )nv->link_down_timeout == 0U) { ha->loop_down_abort_time = 195U; } else { ha->link_down_timeout = (uint8_t )nv->link_down_timeout; ha->loop_down_abort_time = ~ ((int )ha->link_down_timeout); } ha->port_down_retry_count = (int )nv->port_down_retry_count; if (qlport_down_retry != 0) { ha->port_down_retry_count = qlport_down_retry; } else { } ha->login_retry_count = (uint32_t )nv->login_retry_count; if (ha->port_down_retry_count == (int )nv->port_down_retry_count && ha->port_down_retry_count > 3) { ha->login_retry_count = (uint32_t )ha->port_down_retry_count; } else if (ha->port_down_retry_count > (int )ha->login_retry_count) { ha->login_retry_count = (uint32_t )ha->port_down_retry_count; } else { } if (ql2xloginretrycount != 0) { ha->login_retry_count = (uint32_t )ql2xloginretrycount; } else { } if (*((unsigned long *)vha->hw + 2UL) == 0UL && ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { icb->firmware_options_2 = icb->firmware_options_2 | 4194304U; } else { } if (*((unsigned long *)vha + 19UL) == 0UL) { ha->zio_mode = (unsigned int )((uint16_t )icb->firmware_options_2) & 15U; ha->zio_timer = (unsigned int )icb->interrupt_delay_timer != 0U ? icb->interrupt_delay_timer : 2U; } else { } icb->firmware_options_2 = icb->firmware_options_2 & 4294967280U; vha->flags.process_response_queue = 0U; if ((unsigned int )ha->zio_mode != 0U) { ha->zio_mode = 6U; ql_log(2U, vha, 117, "ZIO mode %d enabled; timer delay (%d us).\n", (int )ha->zio_mode, (int )ha->zio_timer * 100); icb->firmware_options_2 = icb->firmware_options_2 | (uint32_t )ha->zio_mode; icb->interrupt_delay_timer = ha->zio_timer; vha->flags.process_response_queue = 1U; } else { } if (rval != 0) { ql_log(1U, vha, 118, "NVRAM configuration failed.\n"); } else { } return (rval); } } int qla82xx_restart_isp(scsi_qla_host_t *vha ) { int status ; int rval ; uint32_t wait_time ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; struct scsi_qla_host *vp ; unsigned long flags ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; raw_spinlock_t *tmp___3 ; struct list_head const *__mptr ; raw_spinlock_t *tmp___4 ; struct list_head const *__mptr___0 ; { ha = vha->hw; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); status = qla2x00_init_rings(vha); if (status == 0) { clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); ha->flags.chip_reset_done = 1U; status = qla2x00_fw_ready(vha); if (status == 0) { ql_log(2U, vha, 32828, "Start configure loop, status =%d.\n", status); qla2x00_marker(vha, req, rsp, 0, 0, 2); vha->flags.online = 1U; wait_time = 256U; ldv_62023: clear_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); qla2x00_configure_loop(vha); wait_time = wait_time - 1U; tmp = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp == 0) { tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 == 0) { if (wait_time != 0U) { tmp___1 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { goto ldv_62023; } else { goto ldv_62024; } } else { goto ldv_62024; } } else { goto ldv_62024; } } else { } ldv_62024: ; } else { } if ((vha->device_flags & 2U) != 0U) { status = 0; } else { } ql_log(2U, vha, 32768, "Configure loop done, status = 0x%x.\n", status); } else { } if (status == 0) { clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); tmp___2 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___2 == 0) { vha->marker_needed = 1U; } else { } vha->flags.online = 1U; (*((ha->isp_ops)->enable_intrs))(ha); ha->isp_abort_cnt = 0U; clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); status = qla82xx_check_md_needed(vha); if ((unsigned long )ha->fce != (unsigned long )((void *)0)) { ha->flags.fce_enabled = 1U; memset(ha->fce, 0, (size_t )(ha->fce_bufs * 1024U)); rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, (int )((uint16_t )ha->fce_bufs), (uint16_t *)(& ha->fce_mb), & ha->fce_bufs); if (rval != 0) { ql_log(1U, vha, 32769, "Unable to reinitialize FCE (%d).\n", rval); ha->flags.fce_enabled = 0U; } else { } } else { } if ((unsigned long )ha->eft != (unsigned long )((void *)0)) { memset(ha->eft, 0, 65536UL); rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, 4); if (rval != 0) { ql_log(1U, vha, 32784, "Unable to reinitialize EFT (%d).\n", rval); } else { } } else { } } else { } if (status == 0) { ql_dbg(4194304U, vha, 32785, "qla82xx_restart_isp succeeded.\n"); tmp___3 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___3); __mptr = (struct list_head const *)ha->vp_list.next; vp = (struct scsi_qla_host *)__mptr; goto ldv_62036; ldv_62035: ; if ((unsigned int )vp->vp_idx != 0U) { atomic_inc(& vp->vref_count); spin_unlock_irqrestore(& ha->vport_slock, flags); qla2x00_vp_abort_isp(vp); tmp___4 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___4); atomic_dec(& vp->vref_count); } else { } __mptr___0 = (struct list_head const *)vp->list.next; vp = (struct scsi_qla_host *)__mptr___0; ldv_62036: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_62035; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); } else { ql_log(1U, vha, 32790, "qla82xx_restart_isp **** FAILED ****.\n"); } return (status); } } void qla81xx_update_fw_options(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; if (ql2xetsenable == 0) { return; } else { } memset((void *)(& ha->fw_options), 0, 32UL); ha->fw_options[2] = (uint16_t )((unsigned int )ha->fw_options[2] | 512U); qla2x00_set_fw_options(vha, (uint16_t *)(& ha->fw_options)); return; } } static int qla24xx_get_fcp_prio(scsi_qla_host_t *vha , fc_port_t *fcport ) { int i ; int entries ; uint8_t pid_match ; uint8_t wwn_match ; int priority ; uint32_t pid1 ; uint32_t pid2 ; uint64_t wwn1 ; uint64_t wwn2 ; struct qla_fcp_prio_entry *pri_entry ; struct qla_hw_data *ha ; { ha = vha->hw; if ((unsigned long )ha->fcp_prio_cfg == (unsigned long )((struct qla_fcp_prio_cfg *)0) || *((unsigned long *)ha + 2UL) == 0UL) { return (-1); } else { } priority = -1; entries = (int )(ha->fcp_prio_cfg)->num_entries; pri_entry = (struct qla_fcp_prio_entry *)(& (ha->fcp_prio_cfg)->entry); i = 0; goto ldv_62060; ldv_62059: wwn_match = 0U; pid_match = wwn_match; if (((int )pri_entry->flags & 1) == 0) { pri_entry = pri_entry + 1; goto ldv_62057; } else { } if (((int )pri_entry->flags & 4) != 0) { pid1 = pri_entry->src_pid & 16777215U; pid2 = vha->d_id.b24; if (pid1 == 16777215U) { pid_match = (uint8_t )((int )pid_match + 1); } else if (pid1 == pid2) { pid_match = (uint8_t )((int )pid_match + 1); } else { } } else { } if (((int )pri_entry->flags & 8) != 0) { pid1 = pri_entry->dst_pid & 16777215U; pid2 = fcport->d_id.b24; if (pid1 == 16777215U) { pid_match = (uint8_t )((int )pid_match + 1); } else if (pid1 == pid2) { pid_match = (uint8_t )((int )pid_match + 1); } else { } } else { } if (((int )pri_entry->flags & 64) != 0) { wwn1 = wwn_to_u64((u8 *)(& vha->port_name)); wwn2 = wwn_to_u64((u8 *)(& pri_entry->src_wwpn)); if (wwn2 == 0xffffffffffffffffULL) { wwn_match = (uint8_t )((int )wwn_match + 1); } else if (wwn1 == wwn2) { wwn_match = (uint8_t )((int )wwn_match + 1); } else { } } else { } if (((int )pri_entry->flags & 128) != 0) { wwn1 = wwn_to_u64((u8 *)(& fcport->port_name)); wwn2 = wwn_to_u64((u8 *)(& pri_entry->dst_wwpn)); if (wwn2 == 0xffffffffffffffffULL) { wwn_match = (uint8_t )((int )wwn_match + 1); } else if (wwn1 == wwn2) { wwn_match = (uint8_t )((int )wwn_match + 1); } else { } } else { } if ((unsigned int )pid_match == 2U || (unsigned int )wwn_match == 2U) { if (((int )pri_entry->flags & 2) != 0) { priority = (int )pri_entry->tag; } else { } goto ldv_62058; } else { } pri_entry = pri_entry + 1; ldv_62057: i = i + 1; ldv_62060: ; if (i < entries) { goto ldv_62059; } else { } ldv_62058: ; return (priority); } } int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha , fc_port_t *fcport ) { int ret ; int priority ; uint16_t mb[5U] ; { if ((unsigned int )fcport->port_type != 5U || (unsigned int )fcport->loop_id == 4096U) { return (258); } else { } priority = qla24xx_get_fcp_prio(vha, fcport); if (priority < 0) { return (258); } else { } if (((vha->hw)->device_type & 16384U) != 0U || ((vha->hw)->device_type & 262144U) != 0U) { fcport->fcp_prio = (unsigned int )((uint8_t )priority) & 15U; return (0); } else { } ret = qla24xx_set_fcp_prio(vha, (int )fcport->loop_id, (int )((uint16_t )priority), (uint16_t *)(& mb)); if (ret == 0) { if ((int )fcport->fcp_prio != priority) { ql_dbg(8388608U, vha, 28830, "Updated FCP_CMND priority - value=%d loop_id=%d port_id=%02x%02x%02x.\n", priority, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } fcport->fcp_prio = (unsigned int )((uint8_t )priority) & 15U; } else { ql_dbg(8388608U, vha, 28751, "Unable to update FCP_CMND priority - ret=0x%x for loop_id=%d port_id=%02x%02x%02x.\n", ret, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } return (ret); } } int qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha ) { int ret ; fc_port_t *fcport ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { ret = 258; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_62078; ldv_62077: ret = qla24xx_update_fcport_fcp_prio(vha, fcport); __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_62078: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_62077; } else { } return (ret); } } int reg_timer_3(struct timer_list *timer ) { { ldv_timer_list_3 = timer; ldv_timer_state_3 = 1; return (0); } } void activate_pending_timer_3(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_3 == (unsigned long )timer) { if (ldv_timer_state_3 == 2 || pending_flag != 0) { ldv_timer_list_3 = timer; ldv_timer_list_3->data = data; ldv_timer_state_3 = 1; } else { } return; } else { } reg_timer_3(timer); ldv_timer_list_3->data = data; return; } } void disable_suitable_timer_3(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_3) { ldv_timer_state_3 = 0; return; } else { } return; } } void choose_timer_3(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_3 = 2; return; } } int ldv_del_timer_17(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_18(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } int ldv_del_timer_19(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___1 ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } __inline static __le16 __cpu_to_le16p(__u16 const *p ) { { return ((__le16 )*p); } } extern size_t strlen(char const * ) ; __inline static void arch_local_irq_restore(unsigned long f ) { unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.restore_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-3.12-rc1.tar.xz/linux-3.12-rc1/arch/x86/include/asm/paravirt.h"), "i" (809), "i" (12UL)); ldv_4645: ; goto ldv_4645; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (45UL), [paravirt_opptr] "i" (& pv_irq_ops.restore_fl.func), [paravirt_clobber] "i" (1), "D" (f): "memory", "cc"); return; } } __inline static void arch_local_irq_disable(void) { unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.irq_disable.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-3.12-rc1.tar.xz/linux-3.12-rc1/arch/x86/include/asm/paravirt.h"), "i" (814), "i" (12UL)); ldv_4654: ; goto ldv_4654; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (46UL), [paravirt_opptr] "i" (& pv_irq_ops.irq_disable.func), [paravirt_clobber] "i" (1): "memory", "cc"); return; } } __inline static unsigned long arch_local_irq_save(void) { unsigned long f ; { f = arch_local_save_flags(); arch_local_irq_disable(); return (f); } } extern void trace_hardirqs_on(void) ; extern void trace_hardirqs_off(void) ; extern unsigned long wait_for_completion_timeout(struct completion * , unsigned long ) ; int ldv_del_timer_23(struct timer_list *ldv_func_arg1 ) ; void activate_pending_timer_4(struct timer_list *timer , unsigned long data , int pending_flag ) ; void disable_suitable_timer_4(struct timer_list *timer ) ; void choose_timer_4(struct timer_list *timer ) ; int reg_timer_4(struct timer_list *timer ) ; int ldv_scsi_add_host_with_dma_24(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; extern void int_to_scsilun(unsigned int , struct scsi_lun * ) ; __inline static void fc_vport_set_state(struct fc_vport *vport , enum fc_vport_state new_state ) { { if ((unsigned int )new_state != 0U && (unsigned int )new_state != 4U) { vport->vport_last_state = vport->vport_state; } else { } vport->vport_state = new_state; return; } } int qla2x00_get_thermal_temp(scsi_qla_host_t *vha , uint16_t *temp ) ; int qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha ) ; int qla24xx_control_vp(scsi_qla_host_t *vha , int cmd ) ; int qla24xx_modify_vp_config(scsi_qla_host_t *vha ) ; void qla24xx_report_id_acquisition(scsi_qla_host_t *vha , struct vp_rpt_id_entry_24xx *rptid_entry ) ; int qla2x00_dump_ram(scsi_qla_host_t *vha , dma_addr_t req_dma , uint32_t addr , uint32_t size ) ; int qla2x00_issue_iocb(scsi_qla_host_t *vha , void *buffer , dma_addr_t phys_addr , size_t size ) ; int qla2x00_get_node_name_list(scsi_qla_host_t *vha , void **out_data , int *out_len ) ; int qla2x00_send_sns(scsi_qla_host_t *vha , dma_addr_t sns_phys_address , uint16_t cmd_size , size_t buf_size ) ; int qla2x00_get_fcal_position_map(scsi_qla_host_t *vha , char *pos_map ) ; int qla2x00_get_link_status(scsi_qla_host_t *vha , uint16_t loop_id , struct link_statistics *stats , dma_addr_t stats_dma ) ; int qla24xx_get_isp_stats(scsi_qla_host_t *vha , struct link_statistics *stats , dma_addr_t stats_dma ) ; int qla2x00_system_error(scsi_qla_host_t *vha ) ; int qla2x00_read_sfp(scsi_qla_host_t *vha , dma_addr_t sfp_dma , uint8_t *sfp , uint16_t dev , uint16_t off , uint16_t len , uint16_t opt ) ; int qla2x00_write_sfp(scsi_qla_host_t *vha , dma_addr_t sfp_dma , uint8_t *sfp , uint16_t dev , uint16_t off , uint16_t len , uint16_t opt ) ; int qla81xx_fac_do_write_enable(scsi_qla_host_t *vha , int enable ) ; int qla81xx_fac_erase_sector(scsi_qla_host_t *vha , uint32_t start , uint32_t finish ) ; int qla2x00_get_xgmac_stats(scsi_qla_host_t *vha , dma_addr_t stats_dma , uint16_t size_in_bytes , uint16_t *actual_size ) ; int qla2x00_get_dcbx_params(scsi_qla_host_t *vha , dma_addr_t tlv_dma , uint16_t size ) ; int qla81xx_set_port_config(scsi_qla_host_t *vha , uint16_t *mb ) ; int qla2x00_port_logout(scsi_qla_host_t *vha , struct fc_port *fcport ) ; void ql_dump_regs(uint32_t level , scsi_qla_host_t *vha , int32_t id ) ; int qla2x00_loopback_test(scsi_qla_host_t *vha , struct msg_echo_lb *mreq , uint16_t *mresp ) ; int qla2x00_echo_test(scsi_qla_host_t *vha , struct msg_echo_lb *mreq , uint16_t *mresp ) ; void qla82xx_poll(int irq , void *dev_id ) ; int qla82xx_mbx_intr_enable(scsi_qla_host_t *vha ) ; int qla82xx_mbx_intr_disable(scsi_qla_host_t *vha ) ; int qla81xx_set_led_config(scsi_qla_host_t *vha , uint16_t *led_cfg ) ; int qla81xx_get_led_config(scsi_qla_host_t *vha , uint16_t *led_cfg ) ; int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha , int enable ) ; int qla82xx_read_temperature(scsi_qla_host_t *vha ) ; int qla8044_read_temperature(scsi_qla_host_t *vha ) ; int qla84xx_reset_chip(scsi_qla_host_t *vha , uint16_t enable_diagnostic ) ; int qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha , void *buffer , dma_addr_t phys_addr , size_t size , uint32_t tov ) ; int qla2x00_get_idma_speed(scsi_qla_host_t *vha , uint16_t loop_id , uint16_t *port_speed , uint16_t *mb ) ; int qla82xx_md_get_template_size(scsi_qla_host_t *vha ) ; int qla82xx_md_get_template(scsi_qla_host_t *vha ) ; int qla8044_md_get_template(scsi_qla_host_t *vha ) ; __inline static void qla2x00_poll(struct rsp_que *rsp ) { unsigned long flags ; struct qla_hw_data *ha ; int tmp ; { ha = rsp->hw; flags = arch_local_irq_save(); trace_hardirqs_off(); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { qla82xx_poll(0, (void *)rsp); } else { (*((ha->isp_ops)->intr_handler))(0, (void *)rsp); } tmp = arch_irqs_disabled_flags(flags); if (tmp != 0) { arch_local_irq_restore(flags); trace_hardirqs_off(); } else { trace_hardirqs_on(); arch_local_irq_restore(flags); } return; } } __inline static uint8_t *host_to_fcp_swap(uint8_t *fcp , uint32_t bsize ) { uint32_t *ifcp ; uint32_t *ofcp ; uint32_t iter ; uint32_t *tmp ; uint32_t *tmp___0 ; __u32 tmp___1 ; { ifcp = (uint32_t *)fcp; ofcp = (uint32_t *)fcp; iter = bsize >> 2; goto ldv_43322; ldv_43321: tmp = ofcp; ofcp = ofcp + 1; tmp___0 = ifcp; ifcp = ifcp + 1; tmp___1 = __fswab32(*tmp___0); *tmp = tmp___1; iter = iter - 1U; ldv_43322: ; if (iter != 0U) { goto ldv_43321; } else { } return (fcp); } } void qlt_modify_vp_config(struct scsi_qla_host *vha , struct vp_config_entry_24xx *vpmod ) ; static int qla2x00_mailbox_command(scsi_qla_host_t *vha , mbx_cmd_t *mcp ) { int rval ; unsigned long flags ; device_reg_t *reg ; uint8_t abort_active ; uint8_t io_lock_on ; uint16_t command ; uint16_t *iptr ; uint16_t *optr ; uint32_t cnt ; uint32_t mboxes ; unsigned long wait_time ; struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; unsigned long tmp___1 ; raw_spinlock_t *tmp___2 ; unsigned int tmp___3 ; raw_spinlock_t *tmp___4 ; unsigned long tmp___5 ; unsigned int tmp___6 ; uint16_t *iptr2 ; uint16_t mb0 ; uint32_t ictrl ; unsigned short tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; { flags = 0UL; command = 0U; ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; ql_dbg(536870912U, vha, 4096, "Entered %s.\n", "qla2x00_mailbox_command"); if ((ha->pdev)->error_state > 2U) { ql_log(1U, vha, 4097, "error_state is greater than pci_channel_io_frozen, exiting.\n"); return (256); } else { } if ((vha->device_flags & 32U) != 0U) { ql_log(1U, vha, 4098, "Device in failed state, exiting.\n"); return (256); } else { } reg = ha->iobase; io_lock_on = (uint8_t )base_vha->flags.init_done; rval = 0; tmp___0 = constant_test_bit(3L, (unsigned long const volatile *)(& base_vha->dpc_flags)); abort_active = (uint8_t )tmp___0; if (*((unsigned long *)ha + 2UL) != 0UL) { ql_log(1U, vha, 4099, "Perm failure on EEH timeout MBX, exiting.\n"); return (256); } else { } if (((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) && *((unsigned long *)ha + 2UL) != 0UL) { mcp->mb[0] = 16395U; ql_log(1U, vha, 4100, "FW hung = %d.\n", (int )ha->flags.isp82xx_fw_hung); return (256); } else { } tmp___1 = wait_for_completion_timeout(& ha->mbx_cmd_comp, (unsigned long )(mcp->tov * 250U)); if (tmp___1 == 0UL) { ql_log(1U, vha, 4101, "Cmd access timeout, cmd=0x%x, Exiting.\n", (int )mcp->mb[0]); return (256); } else { } ha->flags.mbox_busy = 1U; ha->mcp = mcp; ql_dbg(536870912U, vha, 4102, "Prepare to issue mbox cmd=0x%x.\n", (int )mcp->mb[0]); tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { optr = (uint16_t *)(& reg->isp82.mailbox_in); } else if ((ha->device_type & 134217728U) != 0U && ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U)) { optr = & reg->isp24.mailbox0; } else { optr = (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->isp.u.isp2100.mailbox0 : & reg->isp.u.isp2300.mailbox0; } iptr = (uint16_t *)(& mcp->mb); command = mcp->mb[0]; mboxes = mcp->out_mb; ql_dbg(537001984U, vha, 4369, "Mailbox registers (OUT):\n"); cnt = 0U; goto ldv_60740; ldv_60739: ; if ((ha->device_type & 2U) != 0U && cnt == 8U) { optr = (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->isp.u_end.isp2200.mailbox8 : & reg->isp.u.isp2300.mailbox0 + 8UL; } else { } if ((int )mboxes & 1) { ql_dbg(536870912U, vha, 4370, "mbox[%d]<-0x%04x\n", cnt, (int )*iptr); writew((int )*iptr, (void volatile *)optr); } else { } mboxes = mboxes >> 1; optr = optr + 1; iptr = iptr + 1; cnt = cnt + 1U; ldv_60740: ; if ((uint32_t )ha->mbx_count > cnt) { goto ldv_60739; } else { } ql_dbg(537001984U, vha, 4375, "I/O Address = %p.\n", optr); ha->flags.mbox_int = 0U; clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); ql_dbg(536870912U, vha, 4111, "Going to unlock irq & waiting for interrupts. jiffies=%lx.\n", jiffies); if (((unsigned int )abort_active == 0U && (unsigned int )io_lock_on != 0U) || ((((ha->device_type & 2048U) != 0U || (ha->device_type & 8192U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) && *((unsigned long *)ha + 2UL) != 0UL)) { set_bit(2L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { tmp___3 = readl((void const volatile *)(& reg->isp82.hint)); if ((int )tmp___3 & 1) { spin_unlock_irqrestore(& ha->hardware_lock, flags); ha->flags.mbox_busy = 0U; ql_dbg(536870912U, vha, 4112, "Pending mailbox timeout, exiting.\n"); rval = 256; goto premature_exit; } else { } writel(1U, (void volatile *)(& reg->isp82.hint)); } else if ((ha->device_type & 134217728U) != 0U) { writel(1342177280U, (void volatile *)(& reg->isp24.hccr)); } else { writew(20480, (void volatile *)(& reg->isp.hccr)); } spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___5 = wait_for_completion_timeout(& ha->mbx_intr_comp, (unsigned long )(mcp->tov * 250U)); if (tmp___5 == 0UL) { ql_dbg(536870912U, vha, 4474, "cmd=%x Timeout.\n", (int )command); tmp___4 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___4); clear_bit(2L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } } else { ql_dbg(536870912U, vha, 4113, "Cmd=%x Polling Mode.\n", (int )command); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { tmp___6 = readl((void const volatile *)(& reg->isp82.hint)); if ((int )tmp___6 & 1) { spin_unlock_irqrestore(& ha->hardware_lock, flags); ha->flags.mbox_busy = 0U; ql_dbg(536870912U, vha, 4114, "Pending mailbox timeout, exiting.\n"); rval = 256; goto premature_exit; } else { } writel(1U, (void volatile *)(& reg->isp82.hint)); } else if ((ha->device_type & 134217728U) != 0U) { writel(1342177280U, (void volatile *)(& reg->isp24.hccr)); } else { writew(20480, (void volatile *)(& reg->isp.hccr)); } spin_unlock_irqrestore(& ha->hardware_lock, flags); wait_time = (unsigned long )(mcp->tov * 250U) + (unsigned long )jiffies; goto ldv_60754; ldv_60753: ; if ((long )(wait_time - (unsigned long )jiffies) < 0L) { goto ldv_60752; } else { } qla2x00_poll(*(ha->rsp_q_map)); if (*((unsigned long *)ha + 2UL) == 0UL && ((ha->device_type & 2U) == 0U || (unsigned int )command != 11U)) { msleep(10U); } else { } ldv_60754: ; if (*((unsigned long *)ha + 2UL) == 0UL) { goto ldv_60753; } else { } ldv_60752: ql_dbg(536870912U, vha, 4115, "Waited %d sec.\n", (unsigned int )((((unsigned long )(mcp->tov * 250U) - wait_time) + (unsigned long )jiffies) / 250UL)); } if (*((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(536870912U, vha, 4116, "Cmd=%x completed.\n", (int )command); ha->flags.mbox_int = 0U; clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) && *((unsigned long *)ha + 2UL) != 0UL) { ha->flags.mbox_busy = 0U; mcp->mb[0] = 16395U; ha->mcp = (mbx_cmd_t *)0; rval = 258; ql_log(1U, vha, 4117, "FW hung = %d.\n", (int )ha->flags.isp82xx_fw_hung); goto premature_exit; } else { } if ((unsigned int )ha->mailbox_out[0] != 16384U) { rval = 258; } else { } iptr2 = (uint16_t *)(& mcp->mb); iptr = (uint16_t *)(& ha->mailbox_out); mboxes = mcp->in_mb; ql_dbg(536870912U, vha, 4371, "Mailbox registers (IN):\n"); cnt = 0U; goto ldv_60757; ldv_60756: ; if ((int )mboxes & 1) { *iptr2 = *iptr; ql_dbg(536870912U, vha, 4372, "mbox[%d]->0x%04x\n", cnt, (int )*iptr2); } else { } mboxes = mboxes >> 1; iptr2 = iptr2 + 1; iptr = iptr + 1; cnt = cnt + 1U; ldv_60757: ; if ((uint32_t )ha->mbx_count > cnt) { goto ldv_60756; } else { } } else { if ((ha->device_type & 134217728U) != 0U) { mb0 = readw((void const volatile *)(& reg->isp24.mailbox0)); ictrl = readl((void const volatile *)(& reg->isp24.ictrl)); } else { mb0 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->isp.u.isp2100.mailbox0 : & reg->isp.u.isp2300.mailbox0)); tmp___7 = readw((void const volatile *)(& reg->isp.ictrl)); ictrl = (uint32_t )tmp___7; } ql_dbg(537001984U, vha, 4377, "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx mb[0]=0x%x\n", (int )command, ictrl, jiffies, (int )mb0); ql_dump_regs(537001984U, vha, 4121); if ((unsigned int )mcp->mb[0] != 42U) { (*((ha->isp_ops)->fw_dump))(vha, 0); } else { } rval = 256; } ha->flags.mbox_busy = 0U; ha->mcp = (mbx_cmd_t *)0; if (((unsigned int )abort_active != 0U || (unsigned int )io_lock_on == 0U) && ((((ha->device_type & 2048U) == 0U && (ha->device_type & 8192U) == 0U) && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) || *((unsigned long *)ha + 2UL) == 0UL)) { ql_dbg(536870912U, vha, 4122, "Checking for additional resp interrupt.\n"); qla2x00_poll(*(ha->rsp_q_map)); } else { } if (rval == 256 && (unsigned int )mcp->mb[0] != 42U) { if (((unsigned int )io_lock_on == 0U || ((int )mcp->flags & 4) != 0) || *((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(536870912U, vha, 4123, "Timeout, schedule isp_abort_needed.\n"); tmp___8 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___8 == 0) { tmp___9 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___9 == 0) { tmp___10 = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___10 == 0) { if ((ha->device_type & 16384U) != 0U) { ql_dbg(536870912U, vha, 4394, "disabling pause transmit on port 0 & 1.\n"); qla82xx_wr_32(ha, 106954904UL, 9U); } else { } ql_log(2U, base_vha, 4124, "Mailbox cmd timeout occurred, cmd=0x%x, mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP abort.\n", (int )command, (int )mcp->mb[0], (int )ha->flags.eeh_busy); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else { } } else { } } else { } } else if ((unsigned int )abort_active == 0U) { ql_dbg(536870912U, vha, 4125, "Timeout, calling abort_isp.\n"); tmp___12 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___12 == 0) { tmp___13 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___13 == 0) { tmp___14 = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___14 == 0) { if ((ha->device_type & 16384U) != 0U) { ql_dbg(536870912U, vha, 4395, "disabling pause transmit on port 0 & 1.\n"); qla82xx_wr_32(ha, 106954904UL, 9U); } else { } ql_log(2U, base_vha, 4126, "Mailbox cmd timeout occurred, cmd=0x%x, mb[0]=0x%x. Scheduling ISP abort ", (int )command, (int )mcp->mb[0]); set_bit(3L, (unsigned long volatile *)(& vha->dpc_flags)); clear_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); complete(& ha->mbx_cmd_comp); tmp___11 = (*((ha->isp_ops)->abort_isp))(vha); if (tmp___11 != 0) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } clear_bit(3L, (unsigned long volatile *)(& vha->dpc_flags)); ql_dbg(536870912U, vha, 4127, "Finished abort_isp.\n"); goto mbx_done; } else { } } else { } } else { } } else { } } else { } premature_exit: complete(& ha->mbx_cmd_comp); mbx_done: ; if (rval != 0) { ql_log(1U, base_vha, 4128, "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2], (int )mcp->mb[3], (int )command); } else { ql_dbg(536870912U, base_vha, 4129, "Done %s.\n", "qla2x00_mailbox_command"); } return (rval); } } int qla2x00_load_ram(scsi_qla_host_t *vha , dma_addr_t req_dma , uint32_t risc_addr , uint32_t risc_code_size ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; ql_dbg(536903680U, vha, 4130, "Entered %s.\n", "qla2x00_load_ram"); if ((unsigned int )((unsigned short )(risc_addr >> 16)) != 0U || (ha->device_type & 134217728U) != 0U) { mcp->mb[0] = 11U; mcp->mb[8] = (unsigned short )(risc_addr >> 16); mcp->out_mb = 257U; } else { mcp->mb[0] = 9U; mcp->out_mb = 1U; } mcp->mb[1] = (unsigned short )risc_addr; mcp->mb[2] = (unsigned short )((unsigned int )req_dma >> 16); mcp->mb[3] = (unsigned short )req_dma; mcp->mb[6] = (unsigned short )((unsigned int )(req_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(req_dma >> 32ULL); mcp->out_mb = mcp->out_mb | 206U; if ((ha->device_type & 134217728U) != 0U) { mcp->mb[4] = (unsigned short )(risc_code_size >> 16); mcp->mb[5] = (unsigned short )risc_code_size; mcp->out_mb = mcp->out_mb | 48U; } else { mcp->mb[4] = (unsigned short )risc_code_size; mcp->out_mb = mcp->out_mb | 16U; } mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4131, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4132, "Done %s.\n", "qla2x00_load_ram"); } return (rval); } } int qla2x00_execute_fw(scsi_qla_host_t *vha , uint32_t risc_addr ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct nvram_81xx *nv ; { ha = vha->hw; mcp = & mc; ql_dbg(536903680U, vha, 4133, "Entered %s.\n", "qla2x00_execute_fw"); mcp->mb[0] = 2U; mcp->out_mb = 1U; mcp->in_mb = 1U; if ((ha->device_type & 134217728U) != 0U) { mcp->mb[1] = (unsigned short )(risc_addr >> 16); mcp->mb[2] = (unsigned short )risc_addr; mcp->mb[3] = 0U; if ((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { nv = (struct nvram_81xx *)ha->nvram; mcp->mb[4] = (unsigned int )((uint16_t )nv->enhanced_features) & 1U; } else { mcp->mb[4] = 0U; } mcp->out_mb = mcp->out_mb | 30U; mcp->in_mb = mcp->in_mb | 2U; } else { mcp->mb[1] = (unsigned short )risc_addr; mcp->out_mb = mcp->out_mb | 2U; if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { mcp->mb[2] = 0U; mcp->out_mb = mcp->out_mb | 4U; } else { } } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4134, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else if ((ha->device_type & 134217728U) != 0U) { ql_dbg(536903680U, vha, 4135, "Done exchanges=%x.\n", (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4136, "Done %s.\n", "qla2x00_execute_fw"); } return (rval); } } int qla2x00_get_fw_version(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4137, "Entered %s.\n", "qla2x00_get_fw_version"); mcp->mb[0] = 8U; mcp->out_mb = 1U; mcp->in_mb = 127U; if ((((vha->hw)->device_type & 8192U) != 0U || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { mcp->in_mb = mcp->in_mb | 16128U; } else { } if ((ha->device_type & 134217728U) != 0U) { mcp->in_mb = mcp->in_mb | 229376U; } else { } mcp->flags = 0U; mcp->tov = 30U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { goto failed; } else { } ha->fw_major_version = mcp->mb[1]; ha->fw_minor_version = mcp->mb[2]; ha->fw_subminor_version = mcp->mb[3]; ha->fw_attributes = mcp->mb[6]; if ((int )(vha->hw)->device_type & 1 || ((vha->hw)->device_type & 2U) != 0U) { ha->fw_memory_size = 131071U; } else { ha->fw_memory_size = (uint32_t )(((int )mcp->mb[5] << 16) | (int )mcp->mb[4]); } if ((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { ha->mpi_version[0] = (uint8_t )mcp->mb[10]; ha->mpi_version[1] = (uint8_t )((int )mcp->mb[11] >> 8); ha->mpi_version[2] = (uint8_t )mcp->mb[11]; ha->mpi_capabilities = (uint32_t )(((int )mcp->mb[12] << 16) | (int )mcp->mb[13]); ha->phy_version[0] = (uint8_t )mcp->mb[8]; ha->phy_version[1] = (uint8_t )((int )mcp->mb[9] >> 8); ha->phy_version[2] = (uint8_t )mcp->mb[9]; } else { } if ((ha->device_type & 134217728U) != 0U) { ha->fw_attributes_h = mcp->mb[15]; ha->fw_attributes_ext[0] = mcp->mb[16]; ha->fw_attributes_ext[1] = mcp->mb[17]; ql_dbg(536903680U, vha, 4409, "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", "qla2x00_get_fw_version", (int )mcp->mb[15], (int )mcp->mb[6]); ql_dbg(536903680U, vha, 4399, "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", "qla2x00_get_fw_version", (int )mcp->mb[17], (int )mcp->mb[16]); } else { } failed: ; if (rval != 0) { ql_dbg(536870912U, vha, 4138, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4139, "Done %s.\n", "qla2x00_get_fw_version"); } return (rval); } } int qla2x00_get_fw_options(scsi_qla_host_t *vha , uint16_t *fwopts ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4140, "Entered %s.\n", "qla2x00_get_fw_options"); mcp->mb[0] = 40U; mcp->out_mb = 1U; mcp->in_mb = 15U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4141, "Failed=%x.\n", rval); } else { *fwopts = mcp->mb[0]; *(fwopts + 1UL) = mcp->mb[1]; *(fwopts + 2UL) = mcp->mb[2]; *(fwopts + 3UL) = mcp->mb[3]; ql_dbg(536903680U, vha, 4142, "Done %s.\n", "qla2x00_get_fw_options"); } return (rval); } } int qla2x00_set_fw_options(scsi_qla_host_t *vha , uint16_t *fwopts ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4143, "Entered %s.\n", "qla2x00_set_fw_options"); mcp->mb[0] = 56U; mcp->mb[1] = *(fwopts + 1UL); mcp->mb[2] = *(fwopts + 2UL); mcp->mb[3] = *(fwopts + 3UL); mcp->out_mb = 15U; mcp->in_mb = 1U; if (((vha->hw)->device_type & 134217728U) != 0U) { mcp->in_mb = mcp->in_mb | 2U; } else { mcp->mb[10] = *(fwopts + 10UL); mcp->mb[11] = *(fwopts + 11UL); mcp->mb[12] = 0U; mcp->out_mb = mcp->out_mb | 7168U; } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); *fwopts = mcp->mb[0]; if (rval != 0) { ql_dbg(536870912U, vha, 4144, "Failed=%x (%x/%x).\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4145, "Done %s.\n", "qla2x00_set_fw_options"); } return (rval); } } int qla2x00_mbx_reg_test(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4146, "Entered %s.\n", "qla2x00_mbx_reg_test"); mcp->mb[0] = 6U; mcp->mb[1] = 43690U; mcp->mb[2] = 21845U; mcp->mb[3] = 43605U; mcp->mb[4] = 21930U; mcp->mb[5] = 42405U; mcp->mb[6] = 23130U; mcp->mb[7] = 9509U; mcp->out_mb = 255U; mcp->in_mb = 255U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval == 0) { if ((((unsigned int )mcp->mb[1] != 43690U || (unsigned int )mcp->mb[2] != 21845U) || (unsigned int )mcp->mb[3] != 43605U) || (unsigned int )mcp->mb[4] != 21930U) { rval = 258; } else { } if (((unsigned int )mcp->mb[5] != 42405U || (unsigned int )mcp->mb[6] != 23130U) || (unsigned int )mcp->mb[7] != 9509U) { rval = 258; } else { } } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4147, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4148, "Done %s.\n", "qla2x00_mbx_reg_test"); } return (rval); } } int qla2x00_verify_checksum(scsi_qla_host_t *vha , uint32_t risc_addr ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4149, "Entered %s.\n", "qla2x00_verify_checksum"); mcp->mb[0] = 7U; mcp->out_mb = 1U; mcp->in_mb = 1U; if (((vha->hw)->device_type & 134217728U) != 0U) { mcp->mb[1] = (unsigned short )(risc_addr >> 16); mcp->mb[2] = (unsigned short )risc_addr; mcp->out_mb = mcp->out_mb | 6U; mcp->in_mb = mcp->in_mb | 6U; } else { mcp->mb[1] = (unsigned short )risc_addr; mcp->out_mb = mcp->out_mb | 2U; mcp->in_mb = mcp->in_mb | 2U; } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4150, "Failed=%x chm sum=%x.\n", rval, ((vha->hw)->device_type & 134217728U) != 0U ? ((int )mcp->mb[2] << 16) | (int )mcp->mb[1] : (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4151, "Done %s.\n", "qla2x00_verify_checksum"); } return (rval); } } int qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha , void *buffer , dma_addr_t phys_addr , size_t size , uint32_t tov ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; sts_entry_t *sts_entry ; { mcp = & mc; ql_dbg(536903680U, vha, 4152, "Entered %s.\n", "qla2x00_issue_iocb_timeout"); mcp->mb[0] = 84U; mcp->mb[1] = 0U; mcp->mb[2] = (unsigned short )((unsigned int )phys_addr >> 16); mcp->mb[3] = (unsigned short )phys_addr; mcp->mb[6] = (unsigned short )((unsigned int )(phys_addr >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(phys_addr >> 32ULL); mcp->out_mb = 207U; mcp->in_mb = 5U; mcp->tov = tov; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4153, "Failed=%x.\n", rval); } else { sts_entry = (sts_entry_t *)buffer; sts_entry->entry_status = (uint8_t )((int )((signed char )sts_entry->entry_status) & (((vha->hw)->device_type & 134217728U) != 0U ? 60 : 126)); ql_dbg(536903680U, vha, 4154, "Done %s.\n", "qla2x00_issue_iocb_timeout"); } return (rval); } } int qla2x00_issue_iocb(scsi_qla_host_t *vha , void *buffer , dma_addr_t phys_addr , size_t size ) { int tmp ; { tmp = qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 30U); return (tmp); } } int qla2x00_abort_command(srb_t *sp ) { unsigned long flags ; int rval ; uint32_t handle ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; fc_port_t *fcport ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct req_que *req ; struct scsi_cmnd *cmd ; raw_spinlock_t *tmp ; { flags = 0UL; handle = 0U; mcp = & mc; fcport = sp->fcport; vha = fcport->vha; ha = vha->hw; req = vha->req; cmd = sp->u.scmd.cmd; ql_dbg(536903680U, vha, 4155, "Entered %s.\n", "qla2x00_abort_command"); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); handle = 1U; goto ldv_60860; ldv_60859: ; if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )sp) { goto ldv_60858; } else { } handle = handle + 1U; ldv_60860: ; if ((uint32_t )req->num_outstanding_cmds > handle) { goto ldv_60859; } else { } ldv_60858: spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((uint32_t )req->num_outstanding_cmds == handle) { return (258); } else { } mcp->mb[0] = 21U; if ((int )ha->device_type < 0) { mcp->mb[1] = fcport->loop_id; } else { mcp->mb[1] = (int )fcport->loop_id << 8U; } mcp->mb[2] = (unsigned short )handle; mcp->mb[3] = (unsigned short )(handle >> 16); mcp->mb[6] = (unsigned short )(cmd->device)->lun; mcp->out_mb = 79U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4156, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4157, "Done %s.\n", "qla2x00_abort_command"); } return (rval); } } int qla2x00_abort_target(struct fc_port *fcport , unsigned int l , int tag ) { int rval ; int rval2 ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; scsi_qla_host_t *vha ; struct req_que *req ; struct rsp_que *rsp ; { mcp = & mc; l = l; vha = fcport->vha; ql_dbg(536903680U, vha, 4158, "Entered %s.\n", "qla2x00_abort_target"); req = *((vha->hw)->req_q_map); rsp = req->rsp; mcp->mb[0] = 23U; mcp->out_mb = 519U; if ((int )(vha->hw)->device_type < 0) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = 0U; mcp->out_mb = mcp->out_mb | 1024U; } else { mcp->mb[1] = (int )fcport->loop_id << 8U; } mcp->mb[2] = (vha->hw)->loop_reset_delay; mcp->mb[9] = vha->vp_idx; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536903680U, vha, 4159, "Failed=%x.\n", rval); } else { } rval2 = qla2x00_marker(vha, req, rsp, (int )fcport->loop_id, 0, 1); if (rval2 != 0) { ql_dbg(536870912U, vha, 4160, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(536903680U, vha, 4161, "Done %s.\n", "qla2x00_abort_target"); } return (rval); } } int qla2x00_lun_reset(struct fc_port *fcport , unsigned int l , int tag ) { int rval ; int rval2 ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; scsi_qla_host_t *vha ; struct req_que *req ; struct rsp_que *rsp ; { mcp = & mc; vha = fcport->vha; ql_dbg(536903680U, vha, 4162, "Entered %s.\n", "qla2x00_lun_reset"); req = *((vha->hw)->req_q_map); rsp = req->rsp; mcp->mb[0] = 126U; mcp->out_mb = 527U; if ((int )(vha->hw)->device_type < 0) { mcp->mb[1] = fcport->loop_id; } else { mcp->mb[1] = (int )fcport->loop_id << 8U; } mcp->mb[2] = (uint16_t )l; mcp->mb[3] = 0U; mcp->mb[9] = vha->vp_idx; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4163, "Failed=%x.\n", rval); } else { } rval2 = qla2x00_marker(vha, req, rsp, (int )fcport->loop_id, (int )((uint16_t )l), 0); if (rval2 != 0) { ql_dbg(536870912U, vha, 4164, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(536903680U, vha, 4165, "Done %s.\n", "qla2x00_lun_reset"); } return (rval); } } int qla2x00_get_adapter_id(scsi_qla_host_t *vha , uint16_t *id , uint8_t *al_pa , uint8_t *area , uint8_t *domain , uint16_t *top , uint16_t *sw_cap ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4166, "Entered %s.\n", "qla2x00_get_adapter_id"); mcp->mb[0] = 32U; mcp->mb[9] = vha->vp_idx; mcp->out_mb = 513U; mcp->in_mb = 719U; if (((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 16384U) != 0U) || ((vha->hw)->device_type & 65536U) != 0U) || ((vha->hw)->device_type & 262144U) != 0U) { mcp->in_mb = mcp->in_mb | 15360U; } else { } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((unsigned int )mcp->mb[0] == 16389U) { rval = 5; } else if ((unsigned int )mcp->mb[0] == 16385U) { rval = 1; } else { } *id = mcp->mb[1]; *al_pa = (unsigned char )mcp->mb[2]; *area = (unsigned char )((int )mcp->mb[2] >> 8); *domain = (unsigned char )mcp->mb[3]; *top = mcp->mb[6]; *sw_cap = mcp->mb[7]; if (rval != 0) { ql_dbg(536870912U, vha, 4167, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4168, "Done %s.\n", "qla2x00_get_adapter_id"); if (((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 16384U) != 0U) || ((vha->hw)->device_type & 65536U) != 0U) || ((vha->hw)->device_type & 262144U) != 0U) { vha->fcoe_vlan_id = (unsigned int )mcp->mb[9] & 4095U; vha->fcoe_fcf_idx = mcp->mb[10]; vha->fcoe_vn_port_mac[5] = (uint8_t )((int )mcp->mb[11] >> 8); vha->fcoe_vn_port_mac[4] = (uint8_t )mcp->mb[11]; vha->fcoe_vn_port_mac[3] = (uint8_t )((int )mcp->mb[12] >> 8); vha->fcoe_vn_port_mac[2] = (uint8_t )mcp->mb[12]; vha->fcoe_vn_port_mac[1] = (uint8_t )((int )mcp->mb[13] >> 8); vha->fcoe_vn_port_mac[0] = (uint8_t )mcp->mb[13]; } else { } } return (rval); } } int qla2x00_get_retry_cnt(scsi_qla_host_t *vha , uint8_t *retry_cnt , uint8_t *tov , uint16_t *r_a_tov ) { int rval ; uint16_t ratov ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4169, "Entered %s.\n", "qla2x00_get_retry_cnt"); mcp->mb[0] = 34U; mcp->out_mb = 1U; mcp->in_mb = 15U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4170, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { *r_a_tov = (uint16_t )((unsigned int )mcp->mb[3] / 2U); ratov = (uint16_t )((unsigned int )mcp->mb[3] / 20U); if ((int )mcp->mb[1] * (int )ratov > (int )*retry_cnt * (int )*tov) { *retry_cnt = (unsigned char )mcp->mb[1]; *tov = (uint8_t )ratov; } else { } ql_dbg(536903680U, vha, 4171, "Done %s mb3=%d ratov=%d.\n", "qla2x00_get_retry_cnt", (int )mcp->mb[3], (int )ratov); } return (rval); } } int qla2x00_init_firmware(scsi_qla_host_t *vha , uint16_t size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4172, "Entered %s.\n", "qla2x00_init_firmware"); if (((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) && ql2xdbwr != 0) { qla82xx_wr_32(ha, ha->nxdb_wr_ptr, (u32 )(((int )ha->portnum << 5) | 4)); } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { mcp->mb[0] = 72U; } else { mcp->mb[0] = 96U; } mcp->mb[1] = 0U; mcp->mb[2] = (unsigned short )((unsigned int )ha->init_cb_dma >> 16); mcp->mb[3] = (unsigned short )ha->init_cb_dma; mcp->mb[6] = (unsigned short )((unsigned int )(ha->init_cb_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(ha->init_cb_dma >> 32ULL); mcp->out_mb = 207U; if (((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) && (unsigned int )(ha->ex_init_cb)->ex_version != 0U) { mcp->mb[1] = 1U; mcp->mb[10] = (unsigned short )((unsigned int )ha->ex_init_cb_dma >> 16); mcp->mb[11] = (unsigned short )ha->ex_init_cb_dma; mcp->mb[12] = (unsigned short )((unsigned int )(ha->ex_init_cb_dma >> 32ULL) >> 16); mcp->mb[13] = (unsigned short )(ha->ex_init_cb_dma >> 32ULL); mcp->mb[14] = 64U; mcp->out_mb = mcp->out_mb | 31744U; } else { } mcp->in_mb = 7U; if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { mcp->in_mb = mcp->in_mb | 8U; } else { } mcp->buf_size = (long )size; mcp->flags = 2U; mcp->tov = 30U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4173, "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2], (int )mcp->mb[3]); } else { ql_dbg(536903680U, vha, 4174, "Done %s.\n", "qla2x00_init_firmware"); } return (rval); } } int qla2x00_get_node_name_list(scsi_qla_host_t *vha , void **out_data , int *out_len ) { struct qla_hw_data *ha ; struct qla_port_24xx_data *list ; void *pmap ; mbx_cmd_t mc ; dma_addr_t pmap_dma ; ulong dma_size ; int rval ; int left ; void *tmp ; size_t __len ; void *__ret ; { ha = vha->hw; list = (struct qla_port_24xx_data *)0; left = 1; goto ldv_60941; ldv_60940: dma_size = (unsigned long )left * 12UL; pmap = dma_alloc_attrs(& (ha->pdev)->dev, dma_size, & pmap_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )pmap == (unsigned long )((void *)0)) { ql_log(1U, vha, 4415, "%s(%ld): DMA Alloc failed of %ld\n", "qla2x00_get_node_name_list", vha->host_no, dma_size); rval = 259; goto out; } else { } mc.mb[0] = 117U; mc.mb[1] = 10U; mc.mb[2] = (unsigned short )((unsigned int )pmap_dma >> 16); mc.mb[3] = (unsigned short )pmap_dma; mc.mb[6] = (unsigned short )((unsigned int )(pmap_dma >> 32ULL) >> 16); mc.mb[7] = (unsigned short )(pmap_dma >> 32ULL); mc.mb[8] = (uint16_t )dma_size; mc.out_mb = 463U; mc.in_mb = 3U; mc.tov = 30U; mc.flags = 1U; rval = qla2x00_mailbox_command(vha, & mc); if (rval != 0) { if ((unsigned int )mc.mb[0] == 16389U && (unsigned int )mc.mb[1] == 10U) { left = (int )((unsigned int )mc.mb[2] / 12U + (unsigned int )left); goto restart; } else { } goto out_free; } else { } left = 0; tmp = kzalloc(dma_size, 208U); list = (struct qla_port_24xx_data *)tmp; if ((unsigned long )list == (unsigned long )((struct qla_port_24xx_data *)0)) { ql_log(1U, vha, 4416, "%s(%ld): failed to allocate node names list structure.\n", "qla2x00_get_node_name_list", vha->host_no); rval = 259; goto out_free; } else { } __len = dma_size; __ret = __builtin_memcpy((void *)list, (void const *)pmap, __len); restart: dma_free_attrs(& (ha->pdev)->dev, dma_size, pmap, pmap_dma, (struct dma_attrs *)0); ldv_60941: ; if (left > 0) { goto ldv_60940; } else { } *out_data = (void *)list; *out_len = (int )dma_size; out: ; return (rval); out_free: dma_free_attrs(& (ha->pdev)->dev, dma_size, pmap, pmap_dma, (struct dma_attrs *)0); return (rval); } } int qla2x00_get_port_database(scsi_qla_host_t *vha , fc_port_t *fcport , uint8_t opt ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; port_database_t *pd ; struct port_database_24xx *pd24 ; dma_addr_t pd_dma ; struct qla_hw_data *ha ; void *tmp ; int _max1 ; int _max2 ; uint64_t zero ; int tmp___0 ; int tmp___1 ; size_t __len ; void *__ret ; size_t __len___0 ; void *__ret___0 ; uint64_t zero___0 ; int tmp___2 ; int tmp___3 ; size_t __len___1 ; void *__ret___1 ; size_t __len___2 ; void *__ret___2 ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4175, "Entered %s.\n", "qla2x00_get_port_database"); pd24 = (struct port_database_24xx *)0; tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & pd_dma); pd = (port_database_t *)tmp; if ((unsigned long )pd == (unsigned long )((port_database_t *)0)) { ql_log(1U, vha, 4176, "Failed to allocate port database structure.\n"); return (259); } else { } _max1 = 128; _max2 = 64; memset((void *)pd, 0, (size_t )(_max1 > _max2 ? _max1 : _max2)); mcp->mb[0] = 100U; if ((unsigned int )opt != 0U && (ha->device_type & 134217728U) == 0U) { mcp->mb[0] = 71U; } else { } mcp->mb[2] = (unsigned short )((unsigned int )pd_dma >> 16); mcp->mb[3] = (unsigned short )pd_dma; mcp->mb[6] = (unsigned short )((unsigned int )(pd_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(pd_dma >> 32ULL); mcp->mb[9] = vha->vp_idx; mcp->out_mb = 717U; mcp->in_mb = 1U; if ((ha->device_type & 134217728U) != 0U) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = (uint16_t )opt; mcp->out_mb = mcp->out_mb | 1026U; mcp->in_mb = mcp->in_mb | 2U; } else if ((int )ha->device_type < 0) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = (uint16_t )opt; mcp->out_mb = mcp->out_mb | 1026U; } else { mcp->mb[1] = (uint16_t )((int )((short )((int )fcport->loop_id << 8)) | (int )((short )opt)); mcp->out_mb = mcp->out_mb | 2U; } mcp->buf_size = (ha->device_type & 134217728U) != 0U ? 64L : 128L; mcp->flags = 1U; mcp->tov = (uint32_t )((int )ha->login_timeout * 2 + (int )((unsigned int )ha->login_timeout / 2U)); rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { goto gpd_error_out; } else { } if ((ha->device_type & 134217728U) != 0U) { zero = 0ULL; pd24 = (struct port_database_24xx *)pd; if ((unsigned int )pd24->current_login_state != 6U && (unsigned int )pd24->last_login_state != 6U) { ql_dbg(536870912U, vha, 4177, "Unable to verify login-state (%x/%x) for loop_id %x.\n", (int )pd24->current_login_state, (int )pd24->last_login_state, (int )fcport->loop_id); rval = 258; goto gpd_error_out; } else { } if ((unsigned int )fcport->loop_id == 4096U) { rval = 10; goto gpd_error_out; } else { tmp___0 = memcmp((void const *)(& fcport->port_name), (void const *)(& zero), 8UL); if (tmp___0 != 0) { tmp___1 = memcmp((void const *)(& fcport->port_name), (void const *)(& pd24->port_name), 8UL); if (tmp___1 != 0) { rval = 10; goto gpd_error_out; } else { } } else { } } __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& fcport->node_name), (void const *)(& pd24->node_name), __len); } else { __ret = __builtin_memcpy((void *)(& fcport->node_name), (void const *)(& pd24->node_name), __len); } __len___0 = 8UL; if (__len___0 > 63UL) { __ret___0 = __memcpy((void *)(& fcport->port_name), (void const *)(& pd24->port_name), __len___0); } else { __ret___0 = __builtin_memcpy((void *)(& fcport->port_name), (void const *)(& pd24->port_name), __len___0); } fcport->d_id.b.domain = pd24->port_id[0]; fcport->d_id.b.area = pd24->port_id[1]; fcport->d_id.b.al_pa = pd24->port_id[2]; fcport->d_id.b.rsvd_1 = 0U; if (((int )pd24->prli_svc_param_word_3[0] & 16) == 0) { fcport->port_type = 4; } else { fcport->port_type = 5; } fcport->supported_classes = ((int )pd24->flags & 16) != 0 ? 4U : 8U; if ((int )((signed char )pd24->prli_svc_param_word_3[0]) < 0) { fcport->flags = fcport->flags | 16U; } else { } } else { zero___0 = 0ULL; if ((unsigned int )pd->master_state != 6U && (unsigned int )pd->slave_state != 6U) { ql_dbg(536870912U, vha, 4106, "Unable to verify login-state (%x/%x) - portid=%02x%02x%02x.\n", (int )pd->master_state, (int )pd->slave_state, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); rval = 258; goto gpd_error_out; } else { } if ((unsigned int )fcport->loop_id == 4096U) { rval = 10; goto gpd_error_out; } else { tmp___2 = memcmp((void const *)(& fcport->port_name), (void const *)(& zero___0), 8UL); if (tmp___2 != 0) { tmp___3 = memcmp((void const *)(& fcport->port_name), (void const *)(& pd->port_name), 8UL); if (tmp___3 != 0) { rval = 10; goto gpd_error_out; } else { } } else { } } __len___1 = 8UL; if (__len___1 > 63UL) { __ret___1 = __memcpy((void *)(& fcport->node_name), (void const *)(& pd->node_name), __len___1); } else { __ret___1 = __builtin_memcpy((void *)(& fcport->node_name), (void const *)(& pd->node_name), __len___1); } __len___2 = 8UL; if (__len___2 > 63UL) { __ret___2 = __memcpy((void *)(& fcport->port_name), (void const *)(& pd->port_name), __len___2); } else { __ret___2 = __builtin_memcpy((void *)(& fcport->port_name), (void const *)(& pd->port_name), __len___2); } fcport->d_id.b.domain = pd->port_id[0]; fcport->d_id.b.area = pd->port_id[3]; fcport->d_id.b.al_pa = pd->port_id[2]; fcport->d_id.b.rsvd_1 = 0U; if (((int )pd->prli_svc_param_word_3[0] & 16) == 0) { fcport->port_type = 4; } else { fcport->port_type = 5; } fcport->supported_classes = ((int )pd->options & 16) != 0 ? 4U : 8U; } gpd_error_out: dma_pool_free(ha->s_dma_pool, (void *)pd, pd_dma); if (rval != 0) { ql_dbg(536870912U, vha, 4178, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4179, "Done %s.\n", "qla2x00_get_port_database"); } return (rval); } } int qla2x00_get_firmware_state(scsi_qla_host_t *vha , uint16_t *states ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4180, "Entered %s.\n", "qla2x00_get_firmware_state"); mcp->mb[0] = 105U; mcp->out_mb = 1U; if (((vha->hw)->device_type & 134217728U) != 0U) { mcp->in_mb = 63U; } else { mcp->in_mb = 3U; } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); *states = mcp->mb[1]; if (((vha->hw)->device_type & 134217728U) != 0U) { *(states + 1UL) = mcp->mb[2]; *(states + 2UL) = mcp->mb[3]; *(states + 3UL) = mcp->mb[4]; *(states + 4UL) = mcp->mb[5]; } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4181, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4182, "Done %s.\n", "qla2x00_get_firmware_state"); } return (rval); } } int qla2x00_get_port_name(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t *name , uint8_t opt ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4183, "Entered %s.\n", "qla2x00_get_port_name"); mcp->mb[0] = 106U; mcp->mb[9] = vha->vp_idx; mcp->out_mb = 515U; if ((int )(vha->hw)->device_type < 0) { mcp->mb[1] = loop_id; mcp->mb[10] = (uint16_t )opt; mcp->out_mb = mcp->out_mb | 1024U; } else { mcp->mb[1] = (uint16_t )((int )((short )((int )loop_id << 8)) | (int )((short )opt)); } mcp->in_mb = 207U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4184, "Failed=%x.\n", rval); } else { if ((unsigned long )name != (unsigned long )((uint8_t *)0U)) { *name = (unsigned char )((int )mcp->mb[2] >> 8); *(name + 1UL) = (unsigned char )mcp->mb[2]; *(name + 2UL) = (unsigned char )((int )mcp->mb[3] >> 8); *(name + 3UL) = (unsigned char )mcp->mb[3]; *(name + 4UL) = (unsigned char )((int )mcp->mb[6] >> 8); *(name + 5UL) = (unsigned char )mcp->mb[6]; *(name + 6UL) = (unsigned char )((int )mcp->mb[7] >> 8); *(name + 7UL) = (unsigned char )mcp->mb[7]; } else { } ql_dbg(536903680U, vha, 4185, "Done %s.\n", "qla2x00_get_port_name"); } return (rval); } } int qla24xx_link_initialize(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4434, "Entered %s.\n", "qla24xx_link_initialize"); if (((vha->hw)->device_type & 134217728U) == 0U || (((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 16384U) != 0U) || ((vha->hw)->device_type & 65536U) != 0U) || ((vha->hw)->device_type & 262144U) != 0U)) { return (258); } else { } mcp->mb[0] = 114U; mcp->mb[1] = 16U; if ((unsigned int )(vha->hw)->operating_mode == 0U) { mcp->mb[1] = (uint16_t )((unsigned int )mcp->mb[1] | 64U); } else { mcp->mb[1] = (uint16_t )((unsigned int )mcp->mb[1] | 32U); } mcp->mb[2] = 0U; mcp->mb[3] = 0U; mcp->out_mb = 15U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4435, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4436, "Done %s.\n", "qla24xx_link_initialize"); } return (rval); } } int qla2x00_lip_reset(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4186, "Entered %s.\n", "qla2x00_lip_reset"); if (((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 16384U) != 0U) || ((vha->hw)->device_type & 65536U) != 0U) || ((vha->hw)->device_type & 262144U) != 0U) { mcp->mb[0] = 114U; mcp->mb[1] = 2U; mcp->mb[2] = 0U; mcp->out_mb = 7U; } else if (((vha->hw)->device_type & 134217728U) != 0U) { mcp->mb[0] = 114U; mcp->mb[1] = 64U; mcp->mb[2] = 0U; mcp->mb[3] = (vha->hw)->loop_reset_delay; mcp->out_mb = 15U; } else { mcp->mb[0] = 108U; mcp->out_mb = 15U; if ((int )(vha->hw)->device_type < 0) { mcp->mb[1] = 255U; mcp->mb[10] = 0U; mcp->out_mb = mcp->out_mb | 1024U; } else { mcp->mb[1] = 65280U; } mcp->mb[2] = (vha->hw)->loop_reset_delay; mcp->mb[3] = 0U; } mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4187, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4188, "Done %s.\n", "qla2x00_lip_reset"); } return (rval); } } int qla2x00_send_sns(scsi_qla_host_t *vha , dma_addr_t sns_phys_address , uint16_t cmd_size , size_t buf_size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4189, "Entered %s.\n", "qla2x00_send_sns"); ql_dbg(536903680U, vha, 4190, "Retry cnt=%d ratov=%d total tov=%d.\n", (int )(vha->hw)->retry_count, (int )(vha->hw)->login_timeout, mcp->tov); mcp->mb[0] = 110U; mcp->mb[1] = cmd_size; mcp->mb[2] = (unsigned short )((unsigned int )sns_phys_address >> 16); mcp->mb[3] = (unsigned short )sns_phys_address; mcp->mb[6] = (unsigned short )((unsigned int )(sns_phys_address >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(sns_phys_address >> 32ULL); mcp->out_mb = 207U; mcp->in_mb = 3U; mcp->buf_size = (long )buf_size; mcp->flags = 3U; mcp->tov = (uint32_t )((int )(vha->hw)->login_timeout * 2 + (int )((unsigned int )(vha->hw)->login_timeout / 2U)); rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4191, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4192, "Done %s.\n", "qla2x00_send_sns"); } return (rval); } } int qla24xx_login_fabric(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa , uint16_t *mb , uint8_t opt ) { int rval ; struct logio_entry_24xx *lg ; dma_addr_t lg_dma ; uint32_t iop[2U] ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; void *tmp ; { ha = vha->hw; ql_dbg(536903680U, vha, 4193, "Entered %s.\n", "qla24xx_login_fabric"); if (*((unsigned long *)ha + 2UL) != 0UL) { req = *(ha->req_q_map); } else { req = vha->req; } rsp = req->rsp; tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & lg_dma); lg = (struct logio_entry_24xx *)tmp; if ((unsigned long )lg == (unsigned long )((struct logio_entry_24xx *)0)) { ql_log(1U, vha, 4194, "Failed to allocate login IOCB.\n"); return (259); } else { } memset((void *)lg, 0, 64UL); lg->entry_type = 82U; lg->entry_count = 1U; lg->handle = ((unsigned int )req->id << 16) | lg->handle; lg->nport_handle = loop_id; lg->control_flags = 0U; if ((int )opt & 1) { lg->control_flags = (uint16_t )((unsigned int )lg->control_flags | 16U); } else { } if (((int )opt & 2) != 0) { lg->control_flags = (uint16_t )((unsigned int )lg->control_flags | 32U); } else { } lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; lg->vp_index = (uint8_t )vha->vp_idx; rval = qla2x00_issue_iocb_timeout(vha, (void *)lg, lg_dma, 0UL, (uint32_t )(((int )((unsigned int )ha->r_a_tov / 10U) + 1) * 2)); if (rval != 0) { ql_dbg(536870912U, vha, 4195, "Failed to issue login IOCB (%x).\n", rval); } else if ((unsigned int )lg->entry_status != 0U) { ql_dbg(536870912U, vha, 4196, "Failed to complete IOCB -- error status (%x).\n", (int )lg->entry_status); rval = 258; } else if ((unsigned int )lg->comp_status != 0U) { iop[0] = lg->io_parameter[0]; iop[1] = lg->io_parameter[1]; ql_dbg(536870912U, vha, 4197, "Failed to complete IOCB -- completion status (%x) ioparam=%x/%x.\n", (int )lg->comp_status, iop[0], iop[1]); switch (iop[0]) { case 26U: *mb = 16391U; *(mb + 1UL) = (unsigned short )iop[1]; goto ldv_61034; case 27U: *mb = 16392U; goto ldv_61034; case 1U: ; case 2U: ; case 3U: ; case 4U: ; case 5U: ; case 7U: ; case 9U: ; case 10U: ; case 24U: ; case 25U: ; case 28U: ; case 29U: ; case 31U: ; default: *mb = 16389U; goto ldv_61034; } ldv_61034: ; } else { ql_dbg(536903680U, vha, 4198, "Done %s.\n", "qla24xx_login_fabric"); iop[0] = lg->io_parameter[0]; *mb = 16384U; *(mb + 1UL) = 0U; if ((iop[0] & 16U) != 0U) { if ((iop[0] & 256U) != 0U) { *(mb + 1UL) = (uint16_t )((unsigned int )*(mb + 1UL) | 2U); } else { } } else { *(mb + 1UL) = 1U; } *(mb + 10UL) = 0U; if (lg->io_parameter[7] != 0U || lg->io_parameter[8] != 0U) { *(mb + 10UL) = (uint16_t )((unsigned int )*(mb + 10UL) | 1U); } else { } if (lg->io_parameter[9] != 0U || lg->io_parameter[10] != 0U) { *(mb + 10UL) = (uint16_t )((unsigned int )*(mb + 10UL) | 2U); } else { } if ((lg->io_parameter[0] & 128U) != 0U) { *(mb + 10UL) = (uint16_t )((unsigned int )*(mb + 10UL) | 128U); } else { } } dma_pool_free(ha->s_dma_pool, (void *)lg, lg_dma); return (rval); } } int qla2x00_login_fabric(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa , uint16_t *mb , uint8_t opt ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4199, "Entered %s.\n", "qla2x00_login_fabric"); mcp->mb[0] = 111U; mcp->out_mb = 15U; if ((int )ha->device_type < 0) { mcp->mb[1] = loop_id; mcp->mb[10] = (uint16_t )opt; mcp->out_mb = mcp->out_mb | 1024U; } else { mcp->mb[1] = (uint16_t )((int )((short )((int )loop_id << 8)) | (int )((short )opt)); } mcp->mb[2] = (uint16_t )domain; mcp->mb[3] = (uint16_t )((int )((short )((int )area << 8)) | (int )((short )al_pa)); mcp->in_mb = 199U; mcp->tov = (uint32_t )((int )ha->login_timeout * 2 + (int )((unsigned int )ha->login_timeout / 2U)); mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((unsigned long )mb != (unsigned long )((uint16_t *)0U)) { *mb = mcp->mb[0]; *(mb + 1UL) = mcp->mb[1]; *(mb + 2UL) = mcp->mb[2]; *(mb + 6UL) = mcp->mb[6]; *(mb + 7UL) = mcp->mb[7]; *(mb + 10UL) = 0U; } else { } if (rval != 0) { if (((((unsigned int )mcp->mb[0] == 16385U || (unsigned int )mcp->mb[0] == 16386U) || (unsigned int )mcp->mb[0] == 16387U) || (unsigned int )mcp->mb[0] == 16389U) || (unsigned int )mcp->mb[0] == 16390U) { rval = 0; } else { } ql_dbg(536870912U, vha, 4200, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2]); } else { ql_dbg(536903680U, vha, 4201, "Done %s.\n", "qla2x00_login_fabric"); } return (rval); } } int qla2x00_login_local_device(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t *mb_ret , uint8_t opt ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; int tmp ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4202, "Entered %s.\n", "qla2x00_login_local_device"); if ((ha->device_type & 134217728U) != 0U) { tmp = qla24xx_login_fabric(vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, mb_ret, (int )opt); return (tmp); } else { } mcp->mb[0] = 116U; if ((int )ha->device_type < 0) { mcp->mb[1] = fcport->loop_id; } else { mcp->mb[1] = (int )fcport->loop_id << 8U; } mcp->mb[2] = (uint16_t )opt; mcp->out_mb = 7U; mcp->in_mb = 195U; mcp->tov = (uint32_t )((int )ha->login_timeout * 2 + (int )((unsigned int )ha->login_timeout / 2U)); mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((unsigned long )mb_ret != (unsigned long )((uint16_t *)0U)) { *mb_ret = mcp->mb[0]; *(mb_ret + 1UL) = mcp->mb[1]; *(mb_ret + 6UL) = mcp->mb[6]; *(mb_ret + 7UL) = mcp->mb[7]; } else { } if (rval != 0) { if ((unsigned int )mcp->mb[0] == 16389U || (unsigned int )mcp->mb[0] == 16390U) { rval = 0; } else { } ql_dbg(536870912U, vha, 4203, "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[6], (int )mcp->mb[7]); } else { ql_dbg(536903680U, vha, 4204, "Done %s.\n", "qla2x00_login_local_device"); } return (rval); } } int qla24xx_fabric_logout(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa ) { int rval ; struct logio_entry_24xx *lg ; dma_addr_t lg_dma ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; void *tmp ; { ha = vha->hw; ql_dbg(536903680U, vha, 4205, "Entered %s.\n", "qla24xx_fabric_logout"); tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & lg_dma); lg = (struct logio_entry_24xx *)tmp; if ((unsigned long )lg == (unsigned long )((struct logio_entry_24xx *)0)) { ql_log(1U, vha, 4206, "Failed to allocate logout IOCB.\n"); return (259); } else { } memset((void *)lg, 0, 64UL); if (ql2xmaxqueues > 1) { req = *(ha->req_q_map); } else { req = vha->req; } rsp = req->rsp; lg->entry_type = 82U; lg->entry_count = 1U; lg->handle = ((unsigned int )req->id << 16) | lg->handle; lg->nport_handle = loop_id; lg->control_flags = 152U; lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; lg->vp_index = (uint8_t )vha->vp_idx; rval = qla2x00_issue_iocb_timeout(vha, (void *)lg, lg_dma, 0UL, (uint32_t )(((int )((unsigned int )ha->r_a_tov / 10U) + 1) * 2)); if (rval != 0) { ql_dbg(536870912U, vha, 4207, "Failed to issue logout IOCB (%x).\n", rval); } else if ((unsigned int )lg->entry_status != 0U) { ql_dbg(536870912U, vha, 4208, "Failed to complete IOCB -- error status (%x).\n", (int )lg->entry_status); rval = 258; } else if ((unsigned int )lg->comp_status != 0U) { ql_dbg(536870912U, vha, 4209, "Failed to complete IOCB -- completion status (%x) ioparam=%x/%x.\n", (int )lg->comp_status, lg->io_parameter[0], lg->io_parameter[1]); } else { ql_dbg(536903680U, vha, 4210, "Done %s.\n", "qla24xx_fabric_logout"); } dma_pool_free(ha->s_dma_pool, (void *)lg, lg_dma); return (rval); } } int qla2x00_fabric_logout(scsi_qla_host_t *vha , uint16_t loop_id , uint8_t domain , uint8_t area , uint8_t al_pa ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4211, "Entered %s.\n", "qla2x00_fabric_logout"); mcp->mb[0] = 113U; mcp->out_mb = 3U; if ((int )(vha->hw)->device_type < 0) { mcp->mb[1] = loop_id; mcp->mb[10] = 0U; mcp->out_mb = mcp->out_mb | 1024U; } else { mcp->mb[1] = (int )loop_id << 8U; } mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4212, "Failed=%x mb[1]=%x.\n", rval, (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4213, "Done %s.\n", "qla2x00_fabric_logout"); } return (rval); } } int qla2x00_full_login_lip(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4214, "Entered %s.\n", "qla2x00_full_login_lip"); mcp->mb[0] = 114U; mcp->mb[1] = ((vha->hw)->device_type & 134217728U) != 0U ? 8U : 0U; mcp->mb[2] = 0U; mcp->mb[3] = 0U; mcp->out_mb = 15U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4215, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4216, "Done %s.\n", "qla2x00_full_login_lip"); } return (rval); } } int qla2x00_get_id_list(scsi_qla_host_t *vha , void *id_list , dma_addr_t id_list_dma , uint16_t *entries ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4217, "Entered %s.\n", "qla2x00_get_id_list"); if ((unsigned long )id_list == (unsigned long )((void *)0)) { return (258); } else { } mcp->mb[0] = 124U; mcp->out_mb = 1U; if (((vha->hw)->device_type & 134217728U) != 0U) { mcp->mb[2] = (unsigned short )((unsigned int )id_list_dma >> 16); mcp->mb[3] = (unsigned short )id_list_dma; mcp->mb[6] = (unsigned short )((unsigned int )(id_list_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(id_list_dma >> 32ULL); mcp->mb[8] = 0U; mcp->mb[9] = vha->vp_idx; mcp->out_mb = mcp->out_mb | 972U; } else { mcp->mb[1] = (unsigned short )((unsigned int )id_list_dma >> 16); mcp->mb[2] = (unsigned short )id_list_dma; mcp->mb[3] = (unsigned short )((unsigned int )(id_list_dma >> 32ULL) >> 16); mcp->mb[6] = (unsigned short )(id_list_dma >> 32ULL); mcp->out_mb = mcp->out_mb | 78U; } mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4218, "Failed=%x.\n", rval); } else { *entries = mcp->mb[1]; ql_dbg(536903680U, vha, 4219, "Done %s.\n", "qla2x00_get_id_list"); } return (rval); } } int qla2x00_get_resource_cnts(scsi_qla_host_t *vha , uint16_t *cur_xchg_cnt , uint16_t *orig_xchg_cnt , uint16_t *cur_iocb_cnt , uint16_t *orig_iocb_cnt , uint16_t *max_npiv_vports , uint16_t *max_fcfs ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4220, "Entered %s.\n", "qla2x00_get_resource_cnts"); mcp->mb[0] = 66U; mcp->out_mb = 1U; mcp->in_mb = 3279U; if (((vha->hw)->device_type & 8192U) != 0U || (((vha->hw)->device_type & 32768U) != 0U || ((vha->hw)->device_type & 65536U) != 0U)) { mcp->in_mb = mcp->in_mb | 4096U; } else { } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4221, "Failed mb[0]=%x.\n", (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4222, "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x mb11=%x mb12=%x.\n", "qla2x00_get_resource_cnts", (int )mcp->mb[1], (int )mcp->mb[2], (int )mcp->mb[3], (int )mcp->mb[6], (int )mcp->mb[7], (int )mcp->mb[10], (int )mcp->mb[11], (int )mcp->mb[12]); if ((unsigned long )cur_xchg_cnt != (unsigned long )((uint16_t *)0U)) { *cur_xchg_cnt = mcp->mb[3]; } else { } if ((unsigned long )orig_xchg_cnt != (unsigned long )((uint16_t *)0U)) { *orig_xchg_cnt = mcp->mb[6]; } else { } if ((unsigned long )cur_iocb_cnt != (unsigned long )((uint16_t *)0U)) { *cur_iocb_cnt = mcp->mb[7]; } else { } if ((unsigned long )orig_iocb_cnt != (unsigned long )((uint16_t *)0U)) { *orig_iocb_cnt = mcp->mb[10]; } else { } if (*((unsigned long *)vha->hw + 2UL) != 0UL && (unsigned long )max_npiv_vports != (unsigned long )((uint16_t *)0U)) { *max_npiv_vports = mcp->mb[11]; } else { } if ((((vha->hw)->device_type & 8192U) != 0U || (((vha->hw)->device_type & 32768U) != 0U || ((vha->hw)->device_type & 65536U) != 0U)) && (unsigned long )max_fcfs != (unsigned long )((uint16_t *)0U)) { *max_fcfs = mcp->mb[12]; } else { } } return (rval); } } int qla2x00_get_fcal_position_map(scsi_qla_host_t *vha , char *pos_map ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; char *pmap ; dma_addr_t pmap_dma ; struct qla_hw_data *ha ; void *tmp ; size_t __len ; void *__ret ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4223, "Entered %s.\n", "qla2x00_get_fcal_position_map"); tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & pmap_dma); pmap = (char *)tmp; if ((unsigned long )pmap == (unsigned long )((char *)0)) { ql_log(1U, vha, 4224, "Memory alloc failed.\n"); return (259); } else { } memset((void *)pmap, 0, 128UL); mcp->mb[0] = 99U; mcp->mb[2] = (unsigned short )((unsigned int )pmap_dma >> 16); mcp->mb[3] = (unsigned short )pmap_dma; mcp->mb[6] = (unsigned short )((unsigned int )(pmap_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(pmap_dma >> 32ULL); mcp->out_mb = 205U; mcp->in_mb = 3U; mcp->buf_size = 128L; mcp->flags = 1U; mcp->tov = (uint32_t )((int )ha->login_timeout * 2 + (int )((unsigned int )ha->login_timeout / 2U)); rval = qla2x00_mailbox_command(vha, mcp); if (rval == 0) { ql_dbg(537001984U, vha, 4225, "mb0/mb1=%x/%X FC/AL position map size (%x).\n", (int )mcp->mb[0], (int )mcp->mb[1], (unsigned int )*pmap); ql_dump_buffer(537001984U, vha, 4381, (uint8_t *)pmap, (uint32_t )((int )*pmap + 1)); if ((unsigned long )pos_map != (unsigned long )((char *)0)) { __len = 128UL; if (__len > 63UL) { __ret = __memcpy((void *)pos_map, (void const *)pmap, __len); } else { __ret = __builtin_memcpy((void *)pos_map, (void const *)pmap, __len); } } else { } } else { } dma_pool_free(ha->s_dma_pool, (void *)pmap, pmap_dma); if (rval != 0) { ql_dbg(536870912U, vha, 4226, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4227, "Done %s.\n", "qla2x00_get_fcal_position_map"); } return (rval); } } int qla2x00_get_link_status(scsi_qla_host_t *vha , uint16_t loop_id , struct link_statistics *stats , dma_addr_t stats_dma ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; uint32_t *siter ; uint32_t *diter ; uint32_t dwords ; struct qla_hw_data *ha ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t tmp___1 ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4228, "Entered %s.\n", "qla2x00_get_link_status"); mcp->mb[0] = 107U; mcp->mb[2] = (unsigned short )((unsigned int )stats_dma >> 16); mcp->mb[3] = (unsigned short )stats_dma; mcp->mb[6] = (unsigned short )((unsigned int )(stats_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(stats_dma >> 32ULL); mcp->out_mb = 205U; mcp->in_mb = 1U; if ((ha->device_type & 134217728U) != 0U) { mcp->mb[1] = loop_id; mcp->mb[4] = 0U; mcp->mb[10] = 0U; mcp->out_mb = mcp->out_mb | 1042U; mcp->in_mb = mcp->in_mb | 2U; } else if ((int )ha->device_type < 0) { mcp->mb[1] = loop_id; mcp->mb[10] = 0U; mcp->out_mb = mcp->out_mb | 1026U; } else { mcp->mb[1] = (int )loop_id << 8U; mcp->out_mb = mcp->out_mb | 2U; } mcp->tov = 30U; mcp->flags = 4U; rval = qla2x00_mailbox_command(vha, mcp); if (rval == 0) { if ((unsigned int )mcp->mb[0] != 16384U) { ql_dbg(536870912U, vha, 4229, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); rval = 258; } else { ql_dbg(536903680U, vha, 4230, "Done %s.\n", "qla2x00_get_link_status"); dwords = 7U; diter = & stats->link_fail_cnt; siter = diter; goto ldv_61159; ldv_61158: tmp = diter; diter = diter + 1; tmp___0 = siter; siter = siter + 1; *tmp = *tmp___0; ldv_61159: tmp___1 = dwords; dwords = dwords - 1U; if (tmp___1 != 0U) { goto ldv_61158; } else { } } } else { ql_dbg(536870912U, vha, 4231, "Failed=%x.\n", rval); } return (rval); } } int qla24xx_get_isp_stats(scsi_qla_host_t *vha , struct link_statistics *stats , dma_addr_t stats_dma ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; uint32_t *siter ; uint32_t *diter ; uint32_t dwords ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t tmp___1 ; { mcp = & mc; ql_dbg(536903680U, vha, 4232, "Entered %s.\n", "qla24xx_get_isp_stats"); mcp->mb[0] = 109U; mcp->mb[2] = (unsigned short )((unsigned int )stats_dma >> 16); mcp->mb[3] = (unsigned short )stats_dma; mcp->mb[6] = (unsigned short )((unsigned int )(stats_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(stats_dma >> 32ULL); mcp->mb[8] = 39U; mcp->mb[9] = vha->vp_idx; mcp->mb[10] = 0U; mcp->out_mb = 1997U; mcp->in_mb = 7U; mcp->tov = 30U; mcp->flags = 4U; rval = qla2x00_mailbox_command(vha, mcp); if (rval == 0) { if ((unsigned int )mcp->mb[0] != 16384U) { ql_dbg(536870912U, vha, 4233, "Failed mb[0]=%x.\n", (int )mcp->mb[0]); rval = 258; } else { ql_dbg(536903680U, vha, 4234, "Done %s.\n", "qla24xx_get_isp_stats"); dwords = 39U; diter = & stats->link_fail_cnt; siter = diter; goto ldv_61174; ldv_61173: tmp = diter; diter = diter + 1; tmp___0 = siter; siter = siter + 1; *tmp = *tmp___0; ldv_61174: tmp___1 = dwords; dwords = dwords - 1U; if (tmp___1 != 0U) { goto ldv_61173; } else { } } } else { ql_dbg(536870912U, vha, 4235, "Failed=%x.\n", rval); } return (rval); } } int qla24xx_abort_command(srb_t *sp ) { int rval ; unsigned long flags ; struct abort_entry_24xx *abt ; dma_addr_t abt_dma ; uint32_t handle ; fc_port_t *fcport ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct req_que *req ; raw_spinlock_t *tmp ; void *tmp___0 ; { flags = 0UL; fcport = sp->fcport; vha = fcport->vha; ha = vha->hw; req = vha->req; ql_dbg(536903680U, vha, 4236, "Entered %s.\n", "qla24xx_abort_command"); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); handle = 1U; goto ldv_61194; ldv_61193: ; if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )sp) { goto ldv_61192; } else { } handle = handle + 1U; ldv_61194: ; if ((uint32_t )req->num_outstanding_cmds > handle) { goto ldv_61193; } else { } ldv_61192: spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((uint32_t )req->num_outstanding_cmds == handle) { return (258); } else { } tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & abt_dma); abt = (struct abort_entry_24xx *)tmp___0; if ((unsigned long )abt == (unsigned long )((struct abort_entry_24xx *)0)) { ql_log(1U, vha, 4237, "Failed to allocate abort IOCB.\n"); return (259); } else { } memset((void *)abt, 0, 64UL); abt->entry_type = 51U; abt->entry_count = 1U; abt->handle = ((unsigned int )req->id << 16) | abt->handle; abt->nport_handle = fcport->loop_id; abt->handle_to_abort = ((unsigned int )req->id << 16) | handle; abt->port_id[0] = fcport->d_id.b.al_pa; abt->port_id[1] = fcport->d_id.b.area; abt->port_id[2] = fcport->d_id.b.domain; abt->vp_index = (uint8_t )(fcport->vha)->vp_idx; abt->req_que_no = req->id; rval = qla2x00_issue_iocb(vha, (void *)abt, abt_dma, 0UL); if (rval != 0) { ql_dbg(536870912U, vha, 4238, "Failed to issue IOCB (%x).\n", rval); } else if ((unsigned int )abt->entry_status != 0U) { ql_dbg(536870912U, vha, 4239, "Failed to complete IOCB -- error status (%x).\n", (int )abt->entry_status); rval = 258; } else if ((unsigned int )abt->nport_handle != 0U) { ql_dbg(536870912U, vha, 4240, "Failed to complete IOCB -- completion status (%x).\n", (int )abt->nport_handle); rval = 258; } else { ql_dbg(536903680U, vha, 4241, "Done %s.\n", "qla24xx_abort_command"); } dma_pool_free(ha->s_dma_pool, (void *)abt, abt_dma); return (rval); } } static int __qla24xx_issue_tmf(char *name , uint32_t type , struct fc_port *fcport , unsigned int l , int tag ) { int rval ; int rval2 ; struct tsk_mgmt_cmd *tsk ; struct sts_entry_24xx *sts ; dma_addr_t tsk_dma ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; void *tmp ; { vha = fcport->vha; ha = vha->hw; req = vha->req; ql_dbg(536903680U, vha, 4242, "Entered %s.\n", "__qla24xx_issue_tmf"); if (*((unsigned long *)ha + 2UL) != 0UL) { rsp = *(ha->rsp_q_map + ((unsigned long )tag + 1UL)); } else { rsp = req->rsp; } tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & tsk_dma); tsk = (struct tsk_mgmt_cmd *)tmp; if ((unsigned long )tsk == (unsigned long )((struct tsk_mgmt_cmd *)0)) { ql_log(1U, vha, 4243, "Failed to allocate task management IOCB.\n"); return (259); } else { } memset((void *)tsk, 0, 64UL); tsk->p.tsk.entry_type = 20U; tsk->p.tsk.entry_count = 1U; tsk->p.tsk.handle = ((unsigned int )req->id << 16) | tsk->p.tsk.handle; tsk->p.tsk.nport_handle = fcport->loop_id; tsk->p.tsk.timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; tsk->p.tsk.control_flags = type; tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; tsk->p.tsk.port_id[1] = fcport->d_id.b.area; tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; tsk->p.tsk.vp_index = (uint8_t )(fcport->vha)->vp_idx; if (type == 16U) { int_to_scsilun(l, & tsk->p.tsk.lun); host_to_fcp_swap((uint8_t *)(& tsk->p.tsk.lun), 8U); } else { } sts = & tsk->p.sts; rval = qla2x00_issue_iocb(vha, (void *)tsk, tsk_dma, 0UL); if (rval != 0) { ql_dbg(536870912U, vha, 4244, "Failed to issue %s reset IOCB (%x).\n", name, rval); } else if ((unsigned int )sts->entry_status != 0U) { ql_dbg(536870912U, vha, 4245, "Failed to complete IOCB -- error status (%x).\n", (int )sts->entry_status); rval = 258; } else if ((unsigned int )sts->comp_status != 0U) { ql_dbg(536870912U, vha, 4246, "Failed to complete IOCB -- completion status (%x).\n", (int )sts->comp_status); rval = 258; } else if (((int )sts->scsi_status & 256) != 0) { if (sts->rsp_data_len <= 3U) { ql_dbg(536903680U, vha, 4247, "Ignoring inconsistent data length -- not enough response info (%d).\n", sts->rsp_data_len); } else if ((unsigned int )sts->data[3] != 0U) { ql_dbg(536870912U, vha, 4248, "Failed to complete IOCB -- response (%x).\n", (int )sts->data[3]); rval = 258; } else { } } else { } rval2 = qla2x00_marker(vha, req, rsp, (int )fcport->loop_id, (int )((uint16_t )l), type != 16U); if (rval2 != 0) { ql_dbg(536870912U, vha, 4249, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(536903680U, vha, 4250, "Done %s.\n", "__qla24xx_issue_tmf"); } dma_pool_free(ha->s_dma_pool, (void *)tsk, tsk_dma); return (rval); } } int qla24xx_abort_target(struct fc_port *fcport , unsigned int l , int tag ) { struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = (fcport->vha)->hw; if (ql2xasynctmfenable != 0 && (ha->device_type & 134217728U) != 0U) { tmp = qla2x00_async_tm_cmd(fcport, 2U, l, (uint32_t )tag); return (tmp); } else { } tmp___0 = __qla24xx_issue_tmf((char *)"Target", 2U, fcport, l, tag); return (tmp___0); } } int qla24xx_lun_reset(struct fc_port *fcport , unsigned int l , int tag ) { struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = (fcport->vha)->hw; if (ql2xasynctmfenable != 0 && (ha->device_type & 134217728U) != 0U) { tmp = qla2x00_async_tm_cmd(fcport, 16U, l, (uint32_t )tag); return (tmp); } else { } tmp___0 = __qla24xx_issue_tmf((char *)"Lun", 16U, fcport, l, tag); return (tmp___0); } } int qla2x00_system_error(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; if ((((((ha->device_type & 4U) == 0U && (ha->device_type & 8U) == 0U) && (ha->device_type & 16U) == 0U) && (ha->device_type & 32U) == 0U) && (ha->device_type & 64U) == 0U) && (ha->device_type & 134217728U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4251, "Entered %s.\n", "qla2x00_system_error"); mcp->mb[0] = 42U; mcp->out_mb = 1U; mcp->in_mb = 1U; mcp->tov = 5U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4252, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4253, "Done %s.\n", "qla2x00_system_error"); } return (rval); } } int qla2x00_set_serdes_params(scsi_qla_host_t *vha , uint16_t sw_em_1g , uint16_t sw_em_2g , uint16_t sw_em_4g ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4254, "Entered %s.\n", "qla2x00_set_serdes_params"); mcp->mb[0] = 16U; mcp->mb[1] = 1U; mcp->mb[2] = (uint16_t )((unsigned int )sw_em_1g | 32768U); mcp->mb[3] = (uint16_t )((unsigned int )sw_em_2g | 32768U); mcp->mb[4] = (uint16_t )((unsigned int )sw_em_4g | 32768U); mcp->out_mb = 31U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4255, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4256, "Done %s.\n", "qla2x00_set_serdes_params"); } return (rval); } } int qla2x00_stop_firmware(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4257, "Entered %s.\n", "qla2x00_stop_firmware"); mcp->mb[0] = 20U; mcp->mb[1] = 0U; mcp->out_mb = 3U; mcp->in_mb = 1U; mcp->tov = 5U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4258, "Failed=%x.\n", rval); if ((unsigned int )mcp->mb[0] == 16385U) { rval = 1; } else { } } else { ql_dbg(536903680U, vha, 4259, "Done %s.\n", "qla2x00_stop_firmware"); } return (rval); } } int qla2x00_enable_eft_trace(scsi_qla_host_t *vha , dma_addr_t eft_dma , uint16_t buffers ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int tmp ; long tmp___0 ; { mcp = & mc; ql_dbg(536903680U, vha, 4260, "Entered %s.\n", "qla2x00_enable_eft_trace"); if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } tmp = pci_channel_offline((vha->hw)->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (258); } else { } mcp->mb[0] = 39U; mcp->mb[1] = 4U; mcp->mb[2] = (unsigned short )eft_dma; mcp->mb[3] = (unsigned short )((unsigned int )eft_dma >> 16); mcp->mb[4] = (unsigned short )(eft_dma >> 32ULL); mcp->mb[5] = (unsigned short )((unsigned int )(eft_dma >> 32ULL) >> 16); mcp->mb[6] = buffers; mcp->mb[7] = 0U; mcp->out_mb = 255U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4261, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4262, "Done %s.\n", "qla2x00_enable_eft_trace"); } return (rval); } } int qla2x00_disable_eft_trace(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int tmp ; long tmp___0 ; { mcp = & mc; ql_dbg(536903680U, vha, 4263, "Entered %s.\n", "qla2x00_disable_eft_trace"); if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } tmp = pci_channel_offline((vha->hw)->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (258); } else { } mcp->mb[0] = 39U; mcp->mb[1] = 5U; mcp->out_mb = 3U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4264, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4265, "Done %s.\n", "qla2x00_disable_eft_trace"); } return (rval); } } int qla2x00_enable_fce_trace(scsi_qla_host_t *vha , dma_addr_t fce_dma , uint16_t buffers , uint16_t *mb , uint32_t *dwords ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int tmp ; long tmp___0 ; size_t __len ; void *__ret ; { mcp = & mc; ql_dbg(536903680U, vha, 4266, "Entered %s.\n", "qla2x00_enable_fce_trace"); if ((((vha->hw)->device_type & 2048U) == 0U && ((vha->hw)->device_type & 8192U) == 0U) && (((vha->hw)->device_type & 32768U) == 0U && ((vha->hw)->device_type & 65536U) == 0U)) { return (258); } else { } tmp = pci_channel_offline((vha->hw)->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (258); } else { } mcp->mb[0] = 39U; mcp->mb[1] = 8U; mcp->mb[2] = (unsigned short )fce_dma; mcp->mb[3] = (unsigned short )((unsigned int )fce_dma >> 16); mcp->mb[4] = (unsigned short )(fce_dma >> 32ULL); mcp->mb[5] = (unsigned short )((unsigned int )(fce_dma >> 32ULL) >> 16); mcp->mb[6] = buffers; mcp->mb[7] = 0U; mcp->mb[8] = 0U; mcp->mb[9] = 2112U; mcp->mb[10] = 2112U; mcp->out_mb = 2047U; mcp->in_mb = 127U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4267, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4268, "Done %s.\n", "qla2x00_enable_fce_trace"); if ((unsigned long )mb != (unsigned long )((uint16_t *)0U)) { __len = 16UL; if (__len > 63UL) { __ret = __memcpy((void *)mb, (void const *)(& mcp->mb), __len); } else { __ret = __builtin_memcpy((void *)mb, (void const *)(& mcp->mb), __len); } } else { } if ((unsigned long )dwords != (unsigned long )((uint32_t *)0U)) { *dwords = (uint32_t )buffers; } else { } } return (rval); } } int qla2x00_disable_fce_trace(scsi_qla_host_t *vha , uint64_t *wr , uint64_t *rd ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int tmp ; long tmp___0 ; { mcp = & mc; ql_dbg(536903680U, vha, 4269, "Entered %s.\n", "qla2x00_disable_fce_trace"); if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } tmp = pci_channel_offline((vha->hw)->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (258); } else { } mcp->mb[0] = 39U; mcp->mb[1] = 9U; mcp->mb[2] = 1U; mcp->out_mb = 7U; mcp->in_mb = 1023U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4270, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4271, "Done %s.\n", "qla2x00_disable_fce_trace"); if ((unsigned long )wr != (unsigned long )((uint64_t *)0ULL)) { *wr = ((((unsigned long long )mcp->mb[5] << 48) | ((unsigned long long )mcp->mb[4] << 32)) | ((unsigned long long )mcp->mb[3] << 16)) | (unsigned long long )mcp->mb[2]; } else { } if ((unsigned long )rd != (unsigned long )((uint64_t *)0ULL)) { *rd = ((((unsigned long long )mcp->mb[9] << 48) | ((unsigned long long )mcp->mb[8] << 32)) | ((unsigned long long )mcp->mb[7] << 16)) | (unsigned long long )mcp->mb[6]; } else { } } return (rval); } } int qla2x00_get_idma_speed(scsi_qla_host_t *vha , uint16_t loop_id , uint16_t *port_speed , uint16_t *mb ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; uint16_t tmp ; { mcp = & mc; ql_dbg(536903680U, vha, 4272, "Entered %s.\n", "qla2x00_get_idma_speed"); if (((vha->hw)->device_type & 67108864U) == 0U) { return (258); } else { } mcp->mb[0] = 26U; mcp->mb[1] = loop_id; tmp = 0U; mcp->mb[3] = tmp; mcp->mb[2] = tmp; mcp->mb[9] = vha->vp_idx; mcp->out_mb = 527U; mcp->in_mb = 11U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((unsigned long )mb != (unsigned long )((uint16_t *)0U)) { *mb = mcp->mb[0]; *(mb + 1UL) = mcp->mb[1]; *(mb + 3UL) = mcp->mb[3]; } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4273, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4274, "Done %s.\n", "qla2x00_get_idma_speed"); if ((unsigned long )port_speed != (unsigned long )((uint16_t *)0U)) { *port_speed = mcp->mb[3]; } else { } } return (rval); } } int qla2x00_set_idma_speed(scsi_qla_host_t *vha , uint16_t loop_id , uint16_t port_speed , uint16_t *mb ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4275, "Entered %s.\n", "qla2x00_set_idma_speed"); if (((vha->hw)->device_type & 67108864U) == 0U) { return (258); } else { } mcp->mb[0] = 26U; mcp->mb[1] = loop_id; mcp->mb[2] = 1U; if (((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 16384U) != 0U) || ((vha->hw)->device_type & 65536U) != 0U) || ((vha->hw)->device_type & 262144U) != 0U) { mcp->mb[3] = (unsigned int )port_speed & 63U; } else { mcp->mb[3] = (unsigned int )port_speed & 7U; } mcp->mb[9] = vha->vp_idx; mcp->out_mb = 527U; mcp->in_mb = 11U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((unsigned long )mb != (unsigned long )((uint16_t *)0U)) { *mb = mcp->mb[0]; *(mb + 1UL) = mcp->mb[1]; *(mb + 3UL) = mcp->mb[3]; } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4276, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4277, "Done %s.\n", "qla2x00_set_idma_speed"); } return (rval); } } void qla24xx_report_id_acquisition(scsi_qla_host_t *vha , struct vp_rpt_id_entry_24xx *rptid_entry ) { uint8_t vp_idx ; uint16_t stat ; struct qla_hw_data *ha ; scsi_qla_host_t *vp ; unsigned long flags ; int found ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { stat = rptid_entry->vp_idx; ha = vha->hw; ql_dbg(536903680U, vha, 4278, "Entered %s.\n", "qla24xx_report_id_acquisition"); if ((unsigned int )rptid_entry->entry_status != 0U) { return; } else { } if ((unsigned int )rptid_entry->format == 0U) { ql_dbg(536903680U, vha, 4279, "Format 0 : Number of VPs setup %d, number of VPs acquired %d.\n", (int )((unsigned char )((int )rptid_entry->vp_count >> 8)), (int )((unsigned char )rptid_entry->vp_count)); ql_dbg(536903680U, vha, 4280, "Primary port id %02x%02x%02x.\n", (int )rptid_entry->port_id[2], (int )rptid_entry->port_id[1], (int )rptid_entry->port_id[0]); } else if ((unsigned int )rptid_entry->format == 1U) { vp_idx = (unsigned char )stat; ql_dbg(536903680U, vha, 4281, "Format 1: VP[%d] enabled - status %d - with port id %02x%02x%02x.\n", (int )vp_idx, (int )((unsigned char )((int )stat >> 8)), (int )rptid_entry->port_id[2], (int )rptid_entry->port_id[1], (int )rptid_entry->port_id[0]); vp = vha; if ((unsigned int )vp_idx == 0U && (unsigned int )((unsigned char )((int )stat >> 8)) != 1U) { goto reg_needed; } else { } if ((unsigned int )((unsigned char )((int )stat >> 8)) != 0U && (unsigned int )((unsigned char )((int )stat >> 8)) != 2U) { ql_dbg(536870912U, vha, 4282, "Could not acquire ID for VP[%d].\n", (int )vp_idx); return; } else { } found = 0; tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)ha->vp_list.next; vp = (scsi_qla_host_t *)__mptr; goto ldv_61334; ldv_61333: ; if ((int )((unsigned short )vp_idx) == (int )vp->vp_idx) { found = 1; goto ldv_61332; } else { } __mptr___0 = (struct list_head const *)vp->list.next; vp = (scsi_qla_host_t *)__mptr___0; ldv_61334: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_61333; } else { } ldv_61332: spin_unlock_irqrestore(& ha->vport_slock, flags); if (found == 0) { return; } else { } vp->d_id.b.domain = rptid_entry->port_id[2]; vp->d_id.b.area = rptid_entry->port_id[1]; vp->d_id.b.al_pa = rptid_entry->port_id[0]; set_bit(0L, (unsigned long volatile *)(& vp->vp_flags)); reg_needed: set_bit(9L, (unsigned long volatile *)(& vp->dpc_flags)); set_bit(12L, (unsigned long volatile *)(& vp->dpc_flags)); set_bit(14L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else { } return; } } int qla24xx_modify_vp_config(scsi_qla_host_t *vha ) { int rval ; struct vp_config_entry_24xx *vpmod ; dma_addr_t vpmod_dma ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp ; void *tmp___0 ; size_t __len ; void *__ret ; size_t __len___0 ; void *__ret___0 ; { ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; ql_dbg(536903680U, vha, 4283, "Entered %s.\n", "qla24xx_modify_vp_config"); tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & vpmod_dma); vpmod = (struct vp_config_entry_24xx *)tmp___0; if ((unsigned long )vpmod == (unsigned long )((struct vp_config_entry_24xx *)0)) { ql_log(1U, vha, 4284, "Failed to allocate modify VP IOCB.\n"); return (259); } else { } memset((void *)vpmod, 0, 64UL); vpmod->entry_type = 49U; vpmod->entry_count = 1U; vpmod->command = 1U; vpmod->vp_count = 1U; vpmod->vp_index1 = (uint8_t )vha->vp_idx; vpmod->options_idx1 = 56U; qlt_modify_vp_config(vha, vpmod); __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& vpmod->node_name_idx1), (void const *)(& vha->node_name), __len); } else { __ret = __builtin_memcpy((void *)(& vpmod->node_name_idx1), (void const *)(& vha->node_name), __len); } __len___0 = 8UL; if (__len___0 > 63UL) { __ret___0 = __memcpy((void *)(& vpmod->port_name_idx1), (void const *)(& vha->port_name), __len___0); } else { __ret___0 = __builtin_memcpy((void *)(& vpmod->port_name_idx1), (void const *)(& vha->port_name), __len___0); } vpmod->entry_count = 1U; rval = qla2x00_issue_iocb(base_vha, (void *)vpmod, vpmod_dma, 0UL); if (rval != 0) { ql_dbg(536870912U, vha, 4285, "Failed to issue VP config IOCB (%x).\n", rval); } else if ((unsigned int )vpmod->comp_status != 0U) { ql_dbg(536870912U, vha, 4286, "Failed to complete IOCB -- error status (%x).\n", (int )vpmod->comp_status); rval = 258; } else if ((unsigned int )vpmod->comp_status != 0U) { ql_dbg(536870912U, vha, 4287, "Failed to complete IOCB -- completion status (%x).\n", (int )vpmod->comp_status); rval = 258; } else { ql_dbg(536903680U, vha, 4288, "Done %s.\n", "qla24xx_modify_vp_config"); fc_vport_set_state(vha->fc_vport, 4); } dma_pool_free(ha->s_dma_pool, (void *)vpmod, vpmod_dma); return (rval); } } int qla24xx_control_vp(scsi_qla_host_t *vha , int cmd ) { int rval ; int map ; int pos ; struct vp_ctrl_entry_24xx *vce ; dma_addr_t vce_dma ; struct qla_hw_data *ha ; int vp_index ; struct scsi_qla_host *base_vha ; void *tmp ; void *tmp___0 ; { ha = vha->hw; vp_index = (int )vha->vp_idx; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; ql_dbg(536903680U, vha, 4289, "Entered %s enabling index %d.\n", "qla24xx_control_vp", vp_index); if (vp_index == 0 || (int )ha->max_npiv_vports <= vp_index) { return (6); } else { } tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & vce_dma); vce = (struct vp_ctrl_entry_24xx *)tmp___0; if ((unsigned long )vce == (unsigned long )((struct vp_ctrl_entry_24xx *)0)) { ql_log(1U, vha, 4290, "Failed to allocate VP control IOCB.\n"); return (259); } else { } memset((void *)vce, 0, 64UL); vce->entry_type = 48U; vce->entry_count = 1U; vce->command = (unsigned short )cmd; vce->vp_count = 1U; map = (vp_index + -1) / 8; pos = (vp_index + -1) & 7; mutex_lock_nested(& ha->vport_lock, 0U); vce->vp_idx_map[map] = (uint8_t )((int )((signed char )vce->vp_idx_map[map]) | (int )((signed char )(1 << pos))); mutex_unlock(& ha->vport_lock); rval = qla2x00_issue_iocb(base_vha, (void *)vce, vce_dma, 0UL); if (rval != 0) { ql_dbg(536870912U, vha, 4291, "Failed to issue VP control IOCB (%x).\n", rval); } else if ((unsigned int )vce->entry_status != 0U) { ql_dbg(536870912U, vha, 4292, "Failed to complete IOCB -- error status (%x).\n", (int )vce->entry_status); rval = 258; } else if ((unsigned int )vce->comp_status != 0U) { ql_dbg(536870912U, vha, 4293, "Failed to complet IOCB -- completion status (%x).\n", (int )vce->comp_status); rval = 258; } else { ql_dbg(536903680U, vha, 4294, "Done %s.\n", "qla24xx_control_vp"); } dma_pool_free(ha->s_dma_pool, (void *)vce, vce_dma); return (rval); } } int qla2x00_send_change_request(scsi_qla_host_t *vha , uint16_t format , uint16_t vp_idx ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4295, "Entered %s.\n", "qla2x00_send_change_request"); mcp->mb[0] = 112U; mcp->mb[1] = format; mcp->mb[9] = vp_idx; mcp->out_mb = 515U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval == 0) { if ((unsigned int )mcp->mb[0] != 16384U) { rval = 2; } else { } } else { rval = 2; } return (rval); } } int qla2x00_dump_ram(scsi_qla_host_t *vha , dma_addr_t req_dma , uint32_t addr , uint32_t size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4105, "Entered %s.\n", "qla2x00_dump_ram"); if ((unsigned int )((unsigned short )(addr >> 16)) != 0U || ((vha->hw)->device_type & 134217728U) != 0U) { mcp->mb[0] = 12U; mcp->mb[8] = (unsigned short )(addr >> 16); mcp->out_mb = 257U; } else { mcp->mb[0] = 10U; mcp->out_mb = 1U; } mcp->mb[1] = (unsigned short )addr; mcp->mb[2] = (unsigned short )((unsigned int )req_dma >> 16); mcp->mb[3] = (unsigned short )req_dma; mcp->mb[6] = (unsigned short )((unsigned int )(req_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(req_dma >> 32ULL); mcp->out_mb = mcp->out_mb | 206U; if (((vha->hw)->device_type & 134217728U) != 0U) { mcp->mb[4] = (unsigned short )(size >> 16); mcp->mb[5] = (unsigned short )size; mcp->out_mb = mcp->out_mb | 48U; } else { mcp->mb[4] = (unsigned short )size; mcp->out_mb = mcp->out_mb | 16U; } mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4104, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4103, "Done %s.\n", "qla2x00_dump_ram"); } return (rval); } } int qla84xx_verify_chip(struct scsi_qla_host *vha , uint16_t *status ) { int rval ; int retry ; struct cs84xx_mgmt_cmd *mn ; dma_addr_t mn_dma ; uint16_t options ; unsigned long flags ; struct qla_hw_data *ha ; void *tmp ; raw_spinlock_t *tmp___0 ; { ha = vha->hw; ql_dbg(536903680U, vha, 4296, "Entered %s.\n", "qla84xx_verify_chip"); tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & mn_dma); mn = (struct cs84xx_mgmt_cmd *)tmp; if ((unsigned long )mn == (unsigned long )((struct cs84xx_mgmt_cmd *)0)) { return (259); } else { } options = (ha->cs84xx)->fw_update != 0U ? 2U : 0U; options = (uint16_t )((unsigned int )options | 16384U); ldv_61403: retry = 0; memset((void *)mn, 0, 64UL); mn->p.req.entry_type = 27U; mn->p.req.entry_count = 1U; mn->p.req.options = options; ql_dbg(537001984U, vha, 4380, "Dump of Verify Request.\n"); ql_dump_buffer(537001984U, vha, 4382, (uint8_t *)mn, 64U); rval = qla2x00_issue_iocb_timeout(vha, (void *)mn, mn_dma, 0UL, 120U); if (rval != 0) { ql_dbg(536870912U, vha, 4299, "Failed to issue verify IOCB (%x).\n", rval); goto verify_done; } else { } ql_dbg(537001984U, vha, 4368, "Dump of Verify Response.\n"); ql_dump_buffer(537001984U, vha, 4376, (uint8_t *)mn, 64U); *status = mn->p.rsp.comp_status; *(status + 1UL) = (unsigned int )*status == 3U ? mn->p.rsp.failure_code : 0U; ql_dbg(536903680U, vha, 4302, "cs=%x fc=%x.\n", (int )*status, (int )*(status + 1UL)); if ((unsigned int )*status != 0U) { rval = 258; if (((int )options & 1) == 0) { ql_dbg(536870912U, vha, 4303, "Firmware update failed. Retrying without update firmware.\n"); options = (uint16_t )((unsigned int )options | 1U); options = (unsigned int )options & 65533U; retry = 1; } else { } } else { ql_dbg(536903680U, vha, 4304, "Firmware updated to %x.\n", mn->p.rsp.fw_ver); tmp___0 = spinlock_check(& (ha->cs84xx)->access_lock); flags = _raw_spin_lock_irqsave(tmp___0); (ha->cs84xx)->op_fw_version = mn->p.rsp.fw_ver; spin_unlock_irqrestore(& (ha->cs84xx)->access_lock, flags); } if (retry != 0) { goto ldv_61403; } else { } verify_done: dma_pool_free(ha->s_dma_pool, (void *)mn, mn_dma); if (rval != 0) { ql_dbg(536870912U, vha, 4305, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4306, "Done %s.\n", "qla84xx_verify_chip"); } return (rval); } } int qla25xx_init_req_que(struct scsi_qla_host *vha , struct req_que *req ) { int rval ; unsigned long flags ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; raw_spinlock_t *tmp ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4307, "Entered %s.\n", "qla25xx_init_req_que"); mcp->mb[0] = 31U; mcp->mb[1] = req->options; mcp->mb[2] = (unsigned short )((unsigned int )req->dma >> 16); mcp->mb[3] = (unsigned short )req->dma; mcp->mb[6] = (unsigned short )((unsigned int )(req->dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(req->dma >> 32ULL); mcp->mb[5] = req->length; if ((unsigned long )req->rsp != (unsigned long )((struct rsp_que *)0)) { mcp->mb[10] = (req->rsp)->id; } else { } mcp->mb[12] = req->qos; mcp->mb[11] = req->vp_idx; mcp->mb[13] = req->rid; if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { mcp->mb[15] = 0U; } else { } mcp->mb[4] = req->id; mcp->mb[8] = 0U; mcp->mb[9] = 0U; mcp->out_mb = 32767U; mcp->in_mb = 1U; mcp->flags = 2U; mcp->tov = 60U; if ((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { mcp->in_mb = mcp->in_mb | 2U; } else { } if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { mcp->out_mb = mcp->out_mb | 32768U; mcp->in_mb = mcp->in_mb | 896U; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if (((int )req->options & 1) == 0) { writel(0U, (void volatile *)req->req_q_in); if ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) { writel(0U, (void volatile *)req->req_q_out); } else { } } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4308, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4309, "Done %s.\n", "qla25xx_init_req_que"); } return (rval); } } int qla25xx_init_rsp_que(struct scsi_qla_host *vha , struct rsp_que *rsp ) { int rval ; unsigned long flags ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; raw_spinlock_t *tmp ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4310, "Entered %s.\n", "qla25xx_init_rsp_que"); mcp->mb[0] = 31U; mcp->mb[1] = rsp->options; mcp->mb[2] = (unsigned short )((unsigned int )rsp->dma >> 16); mcp->mb[3] = (unsigned short )rsp->dma; mcp->mb[6] = (unsigned short )((unsigned int )(rsp->dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(rsp->dma >> 32ULL); mcp->mb[5] = rsp->length; mcp->mb[14] = (rsp->msix)->entry; mcp->mb[13] = rsp->rid; if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { mcp->mb[15] = 0U; } else { } mcp->mb[4] = rsp->id; mcp->mb[8] = 0U; mcp->mb[9] = 0U; mcp->out_mb = 25599U; mcp->in_mb = 1U; mcp->flags = 2U; mcp->tov = 60U; if ((ha->device_type & 8192U) != 0U) { mcp->out_mb = mcp->out_mb | 7168U; mcp->in_mb = mcp->in_mb | 2U; } else if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { mcp->out_mb = mcp->out_mb | 39936U; mcp->in_mb = mcp->in_mb | 2U; mcp->in_mb = mcp->in_mb | 896U; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if (((int )rsp->options & 1) == 0) { writel(0U, (void volatile *)rsp->rsp_q_out); if ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) { writel(0U, (void volatile *)rsp->rsp_q_in); } else { } } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4311, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4312, "Done %s.\n", "qla25xx_init_rsp_que"); } return (rval); } } int qla81xx_idc_ack(scsi_qla_host_t *vha , uint16_t *mb ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; size_t __len ; void *__ret ; { mcp = & mc; ql_dbg(536903680U, vha, 4313, "Entered %s.\n", "qla81xx_idc_ack"); mcp->mb[0] = 257U; __len = 14UL; if (__len > 63UL) { __ret = __memcpy((void *)(& mcp->mb) + 1U, (void const *)mb, __len); } else { __ret = __builtin_memcpy((void *)(& mcp->mb) + 1U, (void const *)mb, __len); } mcp->out_mb = 255U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4314, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4315, "Done %s.\n", "qla81xx_idc_ack"); } return (rval); } } int qla81xx_fac_get_sector_size(scsi_qla_host_t *vha , uint32_t *sector_size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4316, "Entered %s.\n", "qla81xx_fac_get_sector_size"); if (((vha->hw)->device_type & 8192U) == 0U && (((vha->hw)->device_type & 32768U) == 0U && ((vha->hw)->device_type & 65536U) == 0U)) { return (258); } else { } mcp->mb[0] = 62U; mcp->mb[1] = 5U; mcp->out_mb = 3U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4317, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4318, "Done %s.\n", "qla81xx_fac_get_sector_size"); *sector_size = (uint32_t )mcp->mb[1]; } return (rval); } } int qla81xx_fac_do_write_enable(scsi_qla_host_t *vha , int enable ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if (((vha->hw)->device_type & 8192U) == 0U && (((vha->hw)->device_type & 32768U) == 0U && ((vha->hw)->device_type & 65536U) == 0U)) { return (258); } else { } ql_dbg(536903680U, vha, 4319, "Entered %s.\n", "qla81xx_fac_do_write_enable"); mcp->mb[0] = 62U; mcp->mb[1] = enable != 0; mcp->out_mb = 3U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4320, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4321, "Done %s.\n", "qla81xx_fac_do_write_enable"); } return (rval); } } int qla81xx_fac_erase_sector(scsi_qla_host_t *vha , uint32_t start , uint32_t finish ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if (((vha->hw)->device_type & 8192U) == 0U && (((vha->hw)->device_type & 32768U) == 0U && ((vha->hw)->device_type & 65536U) == 0U)) { return (258); } else { } ql_dbg(536903680U, vha, 4322, "Entered %s.\n", "qla81xx_fac_erase_sector"); mcp->mb[0] = 62U; mcp->mb[1] = 2U; mcp->mb[2] = (unsigned short )start; mcp->mb[3] = (unsigned short )(start >> 16); mcp->mb[4] = (unsigned short )finish; mcp->mb[5] = (unsigned short )(finish >> 16); mcp->out_mb = 63U; mcp->in_mb = 7U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4323, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2]); } else { ql_dbg(536903680U, vha, 4324, "Done %s.\n", "qla81xx_fac_erase_sector"); } return (rval); } } int qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { rval = 0; mcp = & mc; ql_dbg(536903680U, vha, 4325, "Entered %s.\n", "qla81xx_restart_mpi_firmware"); mcp->mb[0] = 61U; mcp->out_mb = 1U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4326, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4327, "Done %s.\n", "qla81xx_restart_mpi_firmware"); } return (rval); } } int qla82xx_set_driver_version(scsi_qla_host_t *vha , char *version ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int i ; int len ; uint16_t *str ; struct qla_hw_data *ha ; size_t tmp ; { mcp = & mc; ha = vha->hw; if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4475, "Entered %s.\n", "qla82xx_set_driver_version"); str = (uint16_t *)version; tmp = strlen((char const *)version); len = (int )tmp; mcp->mb[0] = 89U; mcp->mb[1] = 2304U; mcp->out_mb = 3U; i = 4; goto ldv_61487; ldv_61486: mcp->mb[i] = __cpu_to_le16p((__u16 const *)str); mcp->out_mb = mcp->out_mb | (uint32_t )(1 << i); i = i + 1; str = str + 1; len = len + -2; ldv_61487: ; if (i <= 15 && len != 0) { goto ldv_61486; } else { } goto ldv_61490; ldv_61489: mcp->mb[i] = 0U; mcp->out_mb = mcp->out_mb | (uint32_t )(1 << i); i = i + 1; ldv_61490: ; if (i <= 15) { goto ldv_61489; } else { } mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4476, "Failed=%x mb[0]=%x,%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4477, "Done %s.\n", "qla82xx_set_driver_version"); } return (rval); } } int qla25xx_set_driver_version(scsi_qla_host_t *vha , char *version ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int len ; uint16_t dwlen ; uint8_t *str ; dma_addr_t str_dma ; struct qla_hw_data *ha ; void *tmp ; size_t __len ; void *__ret ; size_t tmp___0 ; size_t tmp___1 ; size_t __len___0 ; void *__ret___0 ; { mcp = & mc; ha = vha->hw; if ((((ha->device_type & 134217728U) == 0U || ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U)) || (ha->device_type & 8192U) != 0U) || ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U)) { return (258); } else { } ql_dbg(536903680U, vha, 4478, "Entered %s.\n", "qla25xx_set_driver_version"); tmp = dma_pool_alloc(ha->s_dma_pool, 208U, & str_dma); str = (uint8_t *)tmp; if ((unsigned long )str == (unsigned long )((uint8_t *)0U)) { ql_log(1U, vha, 4479, "Failed to allocate driver version param.\n"); return (259); } else { } __len = 4UL; if (__len > 63UL) { __ret = __memcpy((void *)str, (void const *)"\a\003\021", __len); } else { __ret = __builtin_memcpy((void *)str, (void const *)"\a\003\021", __len); } dwlen = (uint16_t )*str; len = ((int )dwlen + -1) * 4; memset((void *)str + 4U, 0, (size_t )len); tmp___1 = strlen((char const *)version); if ((size_t )len > tmp___1) { tmp___0 = strlen((char const *)version); len = (int )tmp___0; } else { } __len___0 = (size_t )len; __ret___0 = __builtin_memcpy((void *)str + 4U, (void const *)version, __len___0); mcp->mb[0] = 89U; mcp->mb[1] = (uint16_t )((unsigned int )dwlen | 2304U); mcp->mb[2] = (unsigned short )((unsigned int )str_dma >> 16); mcp->mb[3] = (unsigned short )str_dma; mcp->mb[6] = (unsigned short )((unsigned int )(str_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(str_dma >> 32ULL); mcp->out_mb = 207U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4480, "Failed=%x mb[0]=%x,%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4481, "Done %s.\n", "qla25xx_set_driver_version"); } dma_pool_free(ha->s_dma_pool, (void *)str, str_dma); return (rval); } } static int qla2x00_read_asic_temperature(scsi_qla_host_t *vha , uint16_t *temp ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4441, "Entered %s.\n", "qla2x00_read_asic_temperature"); mcp->mb[0] = 90U; mcp->mb[1] = 3072U; mcp->out_mb = 3U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); *temp = mcp->mb[1]; if (rval != 0) { ql_dbg(536870912U, vha, 4442, "Failed=%x mb[0]=%x,%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4443, "Done %s.\n", "qla2x00_read_asic_temperature"); } return (rval); } } int qla2x00_read_sfp(scsi_qla_host_t *vha , dma_addr_t sfp_dma , uint8_t *sfp , uint16_t dev , uint16_t off , uint16_t len , uint16_t opt ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4328, "Entered %s.\n", "qla2x00_read_sfp"); if ((ha->device_type & 134217728U) == 0U) { return (258); } else { } if ((unsigned int )len == 1U) { opt = (uint16_t )((unsigned int )opt | 1U); } else { } mcp->mb[0] = 49U; mcp->mb[1] = dev; mcp->mb[2] = (unsigned short )((unsigned int )sfp_dma >> 16); mcp->mb[3] = (unsigned short )sfp_dma; mcp->mb[6] = (unsigned short )((unsigned int )(sfp_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(sfp_dma >> 32ULL); mcp->mb[8] = len; mcp->mb[9] = off; mcp->mb[10] = opt; mcp->out_mb = 1999U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((int )opt & 1) { *sfp = (uint8_t )mcp->mb[1]; } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4329, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4330, "Done %s.\n", "qla2x00_read_sfp"); } return (rval); } } int qla2x00_write_sfp(scsi_qla_host_t *vha , dma_addr_t sfp_dma , uint8_t *sfp , uint16_t dev , uint16_t off , uint16_t len , uint16_t opt ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4331, "Entered %s.\n", "qla2x00_write_sfp"); if ((ha->device_type & 134217728U) == 0U) { return (258); } else { } if ((unsigned int )len == 1U) { opt = (uint16_t )((unsigned int )opt | 1U); } else { } if ((int )opt & 1) { len = (uint16_t )*sfp; } else { } mcp->mb[0] = 48U; mcp->mb[1] = dev; mcp->mb[2] = (unsigned short )((unsigned int )sfp_dma >> 16); mcp->mb[3] = (unsigned short )sfp_dma; mcp->mb[6] = (unsigned short )((unsigned int )(sfp_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(sfp_dma >> 32ULL); mcp->mb[8] = len; mcp->mb[9] = off; mcp->mb[10] = opt; mcp->out_mb = 1999U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4332, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4333, "Done %s.\n", "qla2x00_write_sfp"); } return (rval); } } int qla2x00_get_xgmac_stats(scsi_qla_host_t *vha , dma_addr_t stats_dma , uint16_t size_in_bytes , uint16_t *actual_size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4334, "Entered %s.\n", "qla2x00_get_xgmac_stats"); if (((((vha->hw)->device_type & 8192U) == 0U && ((vha->hw)->device_type & 16384U) == 0U) && ((vha->hw)->device_type & 65536U) == 0U) && ((vha->hw)->device_type & 262144U) == 0U) { return (258); } else { } mcp->mb[0] = 122U; mcp->mb[2] = (unsigned short )((unsigned int )stats_dma >> 16); mcp->mb[3] = (unsigned short )stats_dma; mcp->mb[6] = (unsigned short )((unsigned int )(stats_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(stats_dma >> 32ULL); mcp->mb[8] = (uint16_t )((int )size_in_bytes >> 2); mcp->out_mb = 461U; mcp->in_mb = 7U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4335, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2]); } else { ql_dbg(536903680U, vha, 4336, "Done %s.\n", "qla2x00_get_xgmac_stats"); *actual_size = (int )mcp->mb[2] << 2U; } return (rval); } } int qla2x00_get_dcbx_params(scsi_qla_host_t *vha , dma_addr_t tlv_dma , uint16_t size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4337, "Entered %s.\n", "qla2x00_get_dcbx_params"); if (((((vha->hw)->device_type & 8192U) == 0U && ((vha->hw)->device_type & 16384U) == 0U) && ((vha->hw)->device_type & 65536U) == 0U) && ((vha->hw)->device_type & 262144U) == 0U) { return (258); } else { } mcp->mb[0] = 81U; mcp->mb[1] = 0U; mcp->mb[2] = (unsigned short )((unsigned int )tlv_dma >> 16); mcp->mb[3] = (unsigned short )tlv_dma; mcp->mb[6] = (unsigned short )((unsigned int )(tlv_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(tlv_dma >> 32ULL); mcp->mb[8] = size; mcp->out_mb = 463U; mcp->in_mb = 7U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4338, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2]); } else { ql_dbg(536903680U, vha, 4339, "Done %s.\n", "qla2x00_get_dcbx_params"); } return (rval); } } int qla2x00_read_ram_word(scsi_qla_host_t *vha , uint32_t risc_addr , uint32_t *data ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4340, "Entered %s.\n", "qla2x00_read_ram_word"); if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } mcp->mb[0] = 15U; mcp->mb[1] = (unsigned short )risc_addr; mcp->mb[8] = (unsigned short )(risc_addr >> 16); mcp->out_mb = 259U; mcp->in_mb = 13U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4341, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4342, "Done %s.\n", "qla2x00_read_ram_word"); *data = (uint32_t )(((int )mcp->mb[3] << 16) | (int )mcp->mb[2]); } return (rval); } } int qla2x00_loopback_test(scsi_qla_host_t *vha , struct msg_echo_lb *mreq , uint16_t *mresp ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; size_t __len ; void *__ret ; { mcp = & mc; ql_dbg(536903680U, vha, 4343, "Entered %s.\n", "qla2x00_loopback_test"); memset((void *)(& mcp->mb), 0, 64UL); mcp->mb[0] = 69U; mcp->mb[1] = (uint16_t )((unsigned int )mreq->options | 64U); mcp->mb[10] = (unsigned short )mreq->transfer_size; mcp->mb[11] = (unsigned short )(mreq->transfer_size >> 16); mcp->mb[14] = (unsigned short )mreq->send_dma; mcp->mb[15] = (unsigned short )((unsigned int )mreq->send_dma >> 16); mcp->mb[20] = (unsigned short )(mreq->send_dma >> 32ULL); mcp->mb[21] = (unsigned short )((unsigned int )(mreq->send_dma >> 32ULL) >> 16); mcp->mb[16] = (unsigned short )mreq->rcv_dma; mcp->mb[17] = (unsigned short )((unsigned int )mreq->rcv_dma >> 16); mcp->mb[6] = (unsigned short )(mreq->rcv_dma >> 32ULL); mcp->mb[7] = (unsigned short )((unsigned int )(mreq->rcv_dma >> 32ULL) >> 16); mcp->mb[18] = (unsigned short )mreq->iteration_count; mcp->mb[19] = (unsigned short )(mreq->iteration_count >> 16); mcp->out_mb = 4193475U; if (((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 16384U) != 0U) || ((vha->hw)->device_type & 65536U) != 0U) || ((vha->hw)->device_type & 262144U) != 0U) { mcp->out_mb = mcp->out_mb | 4U; } else { } mcp->in_mb = 786447U; mcp->buf_size = (long )mreq->transfer_size; mcp->tov = 30U; mcp->flags = 7U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4344, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x mb[19]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2], (int )mcp->mb[3], (int )mcp->mb[18], (int )mcp->mb[19]); } else { ql_dbg(536903680U, vha, 4345, "Done %s.\n", "qla2x00_loopback_test"); } __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)mresp, (void const *)(& mcp->mb), __len); } else { __ret = __builtin_memcpy((void *)mresp, (void const *)(& mcp->mb), __len); } return (rval); } } int qla2x00_echo_test(scsi_qla_host_t *vha , struct msg_echo_lb *mreq , uint16_t *mresp ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; size_t __len ; void *__ret ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4346, "Entered %s.\n", "qla2x00_echo_test"); memset((void *)(& mcp->mb), 0, 64UL); mcp->mb[0] = 68U; mcp->mb[1] = (uint16_t )((unsigned int )mreq->options | 64U); if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { mcp->mb[1] = (uint16_t )((unsigned int )mcp->mb[1] | 32768U); mcp->mb[2] = vha->fcoe_fcf_idx; } else { } mcp->mb[16] = (unsigned short )mreq->rcv_dma; mcp->mb[17] = (unsigned short )((unsigned int )mreq->rcv_dma >> 16); mcp->mb[6] = (unsigned short )(mreq->rcv_dma >> 32ULL); mcp->mb[7] = (unsigned short )((unsigned int )(mreq->rcv_dma >> 32ULL) >> 16); mcp->mb[10] = (unsigned short )mreq->transfer_size; mcp->mb[14] = (unsigned short )mreq->send_dma; mcp->mb[15] = (unsigned short )((unsigned int )mreq->send_dma >> 16); mcp->mb[20] = (unsigned short )(mreq->send_dma >> 32ULL); mcp->mb[21] = (unsigned short )((unsigned int )(mreq->send_dma >> 32ULL) >> 16); mcp->out_mb = 3392707U; if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { mcp->out_mb = mcp->out_mb | 4U; } else { } mcp->in_mb = 1U; if (((((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) || ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U)) || (ha->device_type & 32768U) != 0U) { mcp->in_mb = mcp->in_mb | 2U; } else { } if (((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) || (ha->device_type & 32768U) != 0U) { mcp->in_mb = mcp->in_mb | 8U; } else { } mcp->tov = 30U; mcp->flags = 7U; mcp->buf_size = (long )mreq->transfer_size; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4347, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { ql_dbg(536903680U, vha, 4348, "Done %s.\n", "qla2x00_echo_test"); } __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)mresp, (void const *)(& mcp->mb), __len); } else { __ret = __builtin_memcpy((void *)mresp, (void const *)(& mcp->mb), __len); } return (rval); } } int qla84xx_reset_chip(scsi_qla_host_t *vha , uint16_t enable_diagnostic ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4349, "Entered %s enable_diag=%d.\n", "qla84xx_reset_chip", (int )enable_diagnostic); mcp->mb[0] = 58U; mcp->mb[1] = enable_diagnostic; mcp->out_mb = 3U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 7U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4350, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4351, "Done %s.\n", "qla84xx_reset_chip"); } return (rval); } } int qla2x00_write_ram_word(scsi_qla_host_t *vha , uint32_t risc_addr , uint32_t data ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4352, "Entered %s.\n", "qla2x00_write_ram_word"); if (((vha->hw)->device_type & 134217728U) == 0U) { return (258); } else { } mcp->mb[0] = 13U; mcp->mb[1] = (unsigned short )risc_addr; mcp->mb[2] = (unsigned short )data; mcp->mb[3] = (unsigned short )(data >> 16); mcp->mb[8] = (unsigned short )(risc_addr >> 16); mcp->out_mb = 271U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4353, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4354, "Done %s.\n", "qla2x00_write_ram_word"); } return (rval); } } int qla81xx_write_mpi_register(scsi_qla_host_t *vha , uint16_t *mb ) { int rval ; uint32_t stat ; uint32_t timer ; uint16_t mb0 ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; int tmp ; { mb0 = 0U; ha = vha->hw; reg = & (ha->iobase)->isp24; rval = 0; ql_dbg(536903680U, vha, 4355, "Entered %s.\n", "qla81xx_write_mpi_register"); clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); writew(1, (void volatile *)(& reg->mailbox0)); writew((int )*mb, (void volatile *)(& reg->mailbox1)); writew((int )*(mb + 1UL), (void volatile *)(& reg->mailbox2)); writew((int )*(mb + 2UL), (void volatile *)(& reg->mailbox3)); writew((int )*(mb + 3UL), (void volatile *)(& reg->mailbox4)); writel(1342177280U, (void volatile *)(& reg->hccr)); timer = 6000000U; goto ldv_61630; ldv_61629: stat = readl((void const volatile *)(& reg->host_status)); if ((stat & 32768U) != 0U) { stat = stat & 255U; if (((stat == 1U || stat == 2U) || stat == 16U) || stat == 17U) { set_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); mb0 = readw((void const volatile *)(& reg->mailbox0)); writel(2684354560U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); goto ldv_61628; } else { } } else { } __const_udelay(21475UL); timer = timer - 1U; ldv_61630: ; if (timer != 0U) { goto ldv_61629; } else { } ldv_61628: tmp = test_and_clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (tmp != 0) { rval = (int )mb0 & 16383; } else { rval = 258; } if (rval != 0) { ql_dbg(536870912U, vha, 4356, "Failed=%x mb[0]=%x.\n", rval, (int )*mb); } else { ql_dbg(536903680U, vha, 4357, "Done %s.\n", "qla81xx_write_mpi_register"); } return (rval); } } int qla2x00_get_data_rate(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4358, "Entered %s.\n", "qla2x00_get_data_rate"); if ((ha->device_type & 134217728U) == 0U) { return (258); } else { } mcp->mb[0] = 93U; mcp->mb[1] = 0U; mcp->out_mb = 3U; mcp->in_mb = 7U; if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { mcp->in_mb = mcp->in_mb | 8U; } else { } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4359, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4360, "Done %s.\n", "qla2x00_get_data_rate"); if ((unsigned int )mcp->mb[1] != 7U) { ha->link_data_rate = mcp->mb[1]; } else { } } return (rval); } } int qla81xx_get_port_config(scsi_qla_host_t *vha , uint16_t *mb ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; size_t __len ; void *__ret ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4361, "Entered %s.\n", "qla81xx_get_port_config"); if (((ha->device_type & 8192U) == 0U && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) && (ha->device_type & 262144U) == 0U) { return (258); } else { } mcp->mb[0] = 291U; mcp->out_mb = 1U; mcp->in_mb = 31U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4362, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)mb, (void const *)(& mcp->mb) + 1U, __len); } else { __ret = __builtin_memcpy((void *)mb, (void const *)(& mcp->mb) + 1U, __len); } ql_dbg(536903680U, vha, 4363, "Done %s.\n", "qla81xx_get_port_config"); } return (rval); } } int qla81xx_set_port_config(scsi_qla_host_t *vha , uint16_t *mb ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; size_t __len ; void *__ret ; { mcp = & mc; ql_dbg(536903680U, vha, 4364, "Entered %s.\n", "qla81xx_set_port_config"); mcp->mb[0] = 290U; __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& mcp->mb) + 1U, (void const *)mb, __len); } else { __ret = __builtin_memcpy((void *)(& mcp->mb) + 1U, (void const *)mb, __len); } mcp->out_mb = 31U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4365, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4366, "Done %s.\n", "qla81xx_set_port_config"); } return (rval); } } int qla24xx_set_fcp_prio(scsi_qla_host_t *vha , uint16_t loop_id , uint16_t priority , uint16_t *mb ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4367, "Entered %s.\n", "qla24xx_set_fcp_prio"); if (((((ha->device_type & 128U) == 0U && (ha->device_type & 256U) == 0U) && ((ha->device_type & 512U) == 0U && (ha->device_type & 1024U) == 0U)) && (ha->device_type & 4096U) == 0U) && (ha->device_type & 2048U) == 0U) { return (258); } else { } mcp->mb[0] = 26U; mcp->mb[1] = loop_id; if (*((unsigned long *)ha + 2UL) != 0UL) { mcp->mb[2] = 2U; } else { mcp->mb[2] = 4U; } mcp->mb[4] = (unsigned int )priority & 15U; mcp->mb[9] = vha->vp_idx; mcp->out_mb = 543U; mcp->in_mb = 27U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if ((unsigned long )mb != (unsigned long )((uint16_t *)0U)) { *mb = mcp->mb[0]; *(mb + 1UL) = mcp->mb[1]; *(mb + 3UL) = mcp->mb[3]; *(mb + 4UL) = mcp->mb[4]; } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4301, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4300, "Done %s.\n", "qla24xx_set_fcp_prio"); } return (rval); } } int qla2x00_get_thermal_temp(scsi_qla_host_t *vha , uint16_t *temp ) { int rval ; struct qla_hw_data *ha ; uint8_t byte ; int tmp ; int tmp___0 ; { rval = 258; ha = vha->hw; if (((ha->device_type & 134217728U) == 0U || ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U)) || (ha->device_type & 8192U) != 0U) { ql_dbg(536870912U, vha, 4432, "Thermal not supported by this card.\n"); return (rval); } else { } if ((ha->device_type & 2048U) != 0U) { if ((unsigned int )(ha->pdev)->subsystem_vendor == 4215U && (unsigned int )(ha->pdev)->subsystem_device == 373U) { rval = qla2x00_read_sfp(vha, 0ULL, & byte, 152, 1, 1, 8193); *temp = (uint16_t )byte; return (rval); } else { } if ((unsigned int )(ha->pdev)->subsystem_vendor == 4156U && (unsigned int )(ha->pdev)->subsystem_device == 13198U) { rval = qla2x00_read_sfp(vha, 0ULL, & byte, 152, 1, 1, 49153); *temp = (uint16_t )byte; return (rval); } else { } ql_dbg(536870912U, vha, 4297, "Thermal not supported by this card.\n"); return (rval); } else { } if ((ha->device_type & 16384U) != 0U) { tmp = qla82xx_read_temperature(vha); *temp = (uint16_t )tmp; rval = 0; return (rval); } else if ((ha->device_type & 262144U) != 0U) { tmp___0 = qla8044_read_temperature(vha); *temp = (uint16_t )tmp___0; rval = 0; return (rval); } else { } rval = qla2x00_read_asic_temperature(vha, temp); return (rval); } } int qla82xx_mbx_intr_enable(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; ql_dbg(536903680U, vha, 4119, "Entered %s.\n", "qla82xx_mbx_intr_enable"); if ((ha->device_type & 134217728U) == 0U) { return (258); } else { } memset((void *)mcp, 0, 96UL); mcp->mb[0] = 16U; mcp->mb[1] = 1U; mcp->out_mb = 3U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4118, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4110, "Done %s.\n", "qla82xx_mbx_intr_enable"); } return (rval); } } int qla82xx_mbx_intr_disable(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; ql_dbg(536903680U, vha, 4109, "Entered %s.\n", "qla82xx_mbx_intr_disable"); if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { return (258); } else { } memset((void *)mcp, 0, 96UL); mcp->mb[0] = 16U; mcp->mb[1] = 0U; mcp->out_mb = 3U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4108, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4107, "Done %s.\n", "qla82xx_mbx_intr_disable"); } return (rval); } } int qla82xx_md_get_template_size(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int rval ; { ha = vha->hw; mcp = & mc; rval = 258; ql_dbg(536903680U, vha, 4383, "Entered %s.\n", "qla82xx_md_get_template_size"); memset((void *)(& mcp->mb), 0, 64UL); mcp->mb[0] = 297U; mcp->mb[1] = 0U; mcp->mb[2] = 0U; mcp->mb[3] = 0U; mcp->out_mb = 15U; mcp->in_mb = 32767U; mcp->flags = 7U; mcp->tov = 30U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4384, "mailbox command FAILED=0x%x, subcode=%x.\n", ((int )mcp->mb[1] << 16) | (int )mcp->mb[0], ((int )mcp->mb[3] << 16) | (int )mcp->mb[2]); } else { ql_dbg(536903680U, vha, 4385, "Done %s.\n", "qla82xx_md_get_template_size"); ha->md_template_size = (uint32_t )(((int )mcp->mb[3] << 16) | (int )mcp->mb[2]); if (ha->md_template_size == 0U) { ql_dbg(536870912U, vha, 4386, "Null template size obtained.\n"); rval = 258; } else { } } return (rval); } } int qla82xx_md_get_template(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int rval ; { ha = vha->hw; mcp = & mc; rval = 258; ql_dbg(536903680U, vha, 4387, "Entered %s.\n", "qla82xx_md_get_template"); ha->md_tmplt_hdr = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )ha->md_template_size, & ha->md_tmplt_hdr_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )ha->md_tmplt_hdr == (unsigned long )((void *)0)) { ql_log(1U, vha, 4388, "Unable to allocate memory for Minidump template.\n"); return (rval); } else { } memset((void *)(& mcp->mb), 0, 64UL); mcp->mb[0] = 297U; mcp->mb[1] = 0U; mcp->mb[2] = 1U; mcp->mb[3] = 0U; mcp->mb[4] = (unsigned short )ha->md_tmplt_hdr_dma; mcp->mb[5] = (unsigned short )((unsigned int )ha->md_tmplt_hdr_dma >> 16); mcp->mb[6] = (unsigned short )(ha->md_tmplt_hdr_dma >> 32ULL); mcp->mb[7] = (unsigned short )((unsigned int )(ha->md_tmplt_hdr_dma >> 32ULL) >> 16); mcp->mb[8] = (unsigned short )ha->md_template_size; mcp->mb[9] = (unsigned short )(ha->md_template_size >> 16); mcp->flags = 7U; mcp->tov = 30U; mcp->out_mb = 4095U; mcp->in_mb = 15U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4389, "mailbox command FAILED=0x%x, subcode=%x.\n", ((int )mcp->mb[1] << 16) | (int )mcp->mb[0], ((int )mcp->mb[3] << 16) | (int )mcp->mb[2]); } else { ql_dbg(536903680U, vha, 4390, "Done %s.\n", "qla82xx_md_get_template"); } return (rval); } } int qla8044_md_get_template(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; int rval ; int offset ; int size ; { ha = vha->hw; mcp = & mc; rval = 258; offset = 0; size = 36864; ql_dbg(536903680U, vha, 45343, "Entered %s.\n", "qla8044_md_get_template"); ha->md_tmplt_hdr = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )ha->md_template_size, & ha->md_tmplt_hdr_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )ha->md_tmplt_hdr == (unsigned long )((void *)0)) { ql_log(1U, vha, 45339, "Unable to allocate memory for Minidump template.\n"); return (rval); } else { } memset((void *)(& mcp->mb), 0, 64UL); goto ldv_61723; ldv_61722: mcp->mb[0] = 297U; mcp->mb[1] = 0U; mcp->mb[2] = 1U; mcp->mb[3] = 0U; mcp->mb[4] = (int )((unsigned short )ha->md_tmplt_hdr_dma) + (int )((unsigned short )offset); mcp->mb[5] = (unsigned short )(((unsigned int )ha->md_tmplt_hdr_dma + (unsigned int )offset) >> 16); mcp->mb[6] = (unsigned short )((ha->md_tmplt_hdr_dma + (dma_addr_t )offset) >> 32ULL); mcp->mb[7] = (unsigned short )((unsigned int )((ha->md_tmplt_hdr_dma + (dma_addr_t )offset) >> 32ULL) >> 16); mcp->mb[8] = (unsigned short )size; mcp->mb[9] = (unsigned short )((unsigned int )size >> 16); mcp->mb[10] = (uint16_t )offset; mcp->mb[11] = 0U; mcp->flags = 7U; mcp->tov = 30U; mcp->out_mb = 4095U; mcp->in_mb = 15U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 45340, "mailbox command FAILED=0x%x, subcode=%x.\n", ((int )mcp->mb[1] << 16) | (int )mcp->mb[0], ((int )mcp->mb[3] << 16) | (int )mcp->mb[2]); return (rval); } else { ql_dbg(536903680U, vha, 45341, "Done %s.\n", "qla8044_md_get_template"); } offset = offset + size; ldv_61723: ; if ((uint32_t )offset < ha->md_template_size) { goto ldv_61722; } else { } return (rval); } } int qla81xx_set_led_config(scsi_qla_host_t *vha , uint16_t *led_cfg ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; if ((ha->device_type & 8192U) == 0U && (ha->device_type & 65536U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4403, "Entered %s.\n", "qla81xx_set_led_config"); memset((void *)mcp, 0, 96UL); mcp->mb[0] = 293U; mcp->mb[1] = *led_cfg; mcp->mb[2] = *(led_cfg + 1UL); if ((ha->device_type & 65536U) != 0U) { mcp->mb[3] = *(led_cfg + 2UL); mcp->mb[4] = *(led_cfg + 3UL); mcp->mb[5] = *(led_cfg + 4UL); mcp->mb[6] = *(led_cfg + 5UL); } else { } mcp->out_mb = 7U; if ((ha->device_type & 65536U) != 0U) { mcp->out_mb = mcp->out_mb | 120U; } else { } mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4404, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4405, "Done %s.\n", "qla81xx_set_led_config"); } return (rval); } } int qla81xx_get_led_config(scsi_qla_host_t *vha , uint16_t *led_cfg ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; if ((ha->device_type & 8192U) == 0U && (ha->device_type & 65536U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4406, "Entered %s.\n", "qla81xx_get_led_config"); memset((void *)mcp, 0, 96UL); mcp->mb[0] = 294U; mcp->out_mb = 1U; mcp->in_mb = 7U; if ((ha->device_type & 65536U) != 0U) { mcp->in_mb = mcp->in_mb | 120U; } else { } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4407, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { *led_cfg = mcp->mb[1]; *(led_cfg + 1UL) = mcp->mb[2]; if ((ha->device_type & 65536U) != 0U) { *(led_cfg + 2UL) = mcp->mb[3]; *(led_cfg + 3UL) = mcp->mb[4]; *(led_cfg + 4UL) = mcp->mb[5]; *(led_cfg + 5UL) = mcp->mb[6]; } else { } ql_dbg(536903680U, vha, 4408, "Done %s.\n", "qla81xx_get_led_config"); } return (rval); } } int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha , int enable ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4391, "Entered %s.\n", "qla82xx_mbx_beacon_ctl"); memset((void *)mcp, 0, 96UL); mcp->mb[0] = 293U; if (enable != 0) { mcp->mb[7] = 14U; } else { mcp->mb[7] = 13U; } mcp->out_mb = 129U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4392, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4393, "Done %s.\n", "qla82xx_mbx_beacon_ctl"); } return (rval); } } int qla83xx_wr_reg(scsi_qla_host_t *vha , uint32_t reg , uint32_t data ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; if ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) { return (258); } else { } ql_dbg(536903680U, vha, 4400, "Entered %s.\n", "qla83xx_wr_reg"); mcp->mb[0] = 1U; mcp->mb[1] = (unsigned short )reg; mcp->mb[2] = (unsigned short )(reg >> 16); mcp->mb[3] = (unsigned short )data; mcp->mb[4] = (unsigned short )(data >> 16); mcp->out_mb = 31U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4401, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4402, "Done %s.\n", "qla83xx_wr_reg"); } return (rval); } } int qla2x00_port_logout(scsi_qla_host_t *vha , struct fc_port *fcport ) { int rval ; struct qla_hw_data *ha ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { ha = vha->hw; mcp = & mc; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { ql_dbg(536903680U, vha, 4411, "Implicit LOGO Unsupported.\n"); return (258); } else { } ql_dbg(536903680U, vha, 4412, "Entering %s.\n", "qla2x00_port_logout"); mcp->mb[0] = 86U; mcp->mb[1] = fcport->loop_id; mcp->mb[10] = 32768U; mcp->out_mb = 1027U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4413, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4414, "Done %s.\n", "qla2x00_port_logout"); } return (rval); } } int qla83xx_rd_reg(scsi_qla_host_t *vha , uint32_t reg , uint32_t *data ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; unsigned long retry_max_time ; { mcp = & mc; ha = vha->hw; retry_max_time = (unsigned long )jiffies + 500UL; if ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) { return (258); } else { } ql_dbg(536870912U, vha, 4427, "Entered %s.\n", "qla83xx_rd_reg"); retry_rd_reg: mcp->mb[0] = 9U; mcp->mb[1] = (unsigned short )reg; mcp->mb[2] = (unsigned short )(reg >> 16); mcp->out_mb = 7U; mcp->in_mb = 27U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4428, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); } else { *data = (uint32_t )((int )mcp->mb[3] | ((int )mcp->mb[4] << 16)); if (*data == 3134241488U) { if ((long )(retry_max_time - (unsigned long )jiffies) < 0L) { ql_dbg(536870912U, vha, 4417, "Failure to read CAMRAM register. data=0x%x.\n", *data); return (258); } else { } msleep(100U); goto retry_rd_reg; } else { } ql_dbg(536870912U, vha, 4418, "Done %s.\n", "qla83xx_rd_reg"); } return (rval); } } int qla83xx_restart_nic_firmware(scsi_qla_host_t *vha ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; if ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) { return (258); } else { } ql_dbg(536870912U, vha, 4419, "Entered %s.\n", "qla83xx_restart_nic_firmware"); mcp->mb[0] = 61U; mcp->out_mb = 1U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4420, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1]); (*((ha->isp_ops)->fw_dump))(vha, 0); } else { ql_dbg(536870912U, vha, 4421, "Done %s.\n", "qla83xx_restart_nic_firmware"); } return (rval); } } int qla83xx_access_control(scsi_qla_host_t *vha , uint16_t options , uint32_t start_addr , uint32_t end_addr , uint16_t *sector_size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; uint8_t subcode ; struct qla_hw_data *ha ; { mcp = & mc; subcode = (unsigned char )options; ha = vha->hw; if ((ha->device_type & 65536U) == 0U) { return (258); } else { } ql_dbg(536870912U, vha, 4422, "Entered %s.\n", "qla83xx_access_control"); mcp->mb[0] = 62U; mcp->mb[1] = options; mcp->out_mb = 3U; if (((int )subcode & 4) != 0) { mcp->mb[2] = (unsigned short )start_addr; mcp->mb[3] = (unsigned short )(start_addr >> 16); mcp->mb[4] = (unsigned short )end_addr; mcp->mb[5] = (unsigned short )(end_addr >> 16); mcp->out_mb = mcp->out_mb | 60U; } else { } mcp->in_mb = 7U; if (((int )subcode & 36) == 0) { mcp->in_mb = mcp->in_mb | 24U; } else { } mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4423, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", rval, (int )mcp->mb[0], (int )mcp->mb[1], (int )mcp->mb[2], (int )mcp->mb[3], (int )mcp->mb[4]); (*((ha->isp_ops)->fw_dump))(vha, 0); } else { if (((int )subcode & 32) != 0) { *sector_size = mcp->mb[1]; } else if (((int )subcode & 192) != 0) { ql_dbg(536870912U, vha, 4424, "Driver-lock id=%x%x", (int )mcp->mb[4], (int )mcp->mb[3]); } else if (((int )subcode & 24) != 0) { ql_dbg(536870912U, vha, 4425, "Flash-lock id=%x%x", (int )mcp->mb[4], (int )mcp->mb[3]); } else { } ql_dbg(536870912U, vha, 4426, "Done %s.\n", "qla83xx_access_control"); } return (rval); } } int qla2x00_dump_mctp_data(scsi_qla_host_t *vha , dma_addr_t req_dma , uint32_t addr , uint32_t size ) { int rval ; mbx_cmd_t mc ; mbx_cmd_t *mcp ; { mcp = & mc; if (((vha->hw)->device_type & 32768U) == 0U || ((int )(vha->hw)->fw_attributes_ext[0] & 1) == 0) { return (258); } else { } ql_dbg(536903680U, vha, 4431, "Entered %s.\n", "qla2x00_dump_mctp_data"); mcp->mb[0] = 12U; mcp->mb[1] = (unsigned short )addr; mcp->mb[2] = (unsigned short )((unsigned int )req_dma >> 16); mcp->mb[3] = (unsigned short )req_dma; mcp->mb[4] = (unsigned short )(size >> 16); mcp->mb[5] = (unsigned short )size; mcp->mb[6] = (unsigned short )((unsigned int )(req_dma >> 32ULL) >> 16); mcp->mb[7] = (unsigned short )(req_dma >> 32ULL); mcp->mb[8] = (unsigned short )(addr >> 16); mcp->mb[10] = (uint16_t )((unsigned int )mcp->mb[10] | 128U); mcp->mb[10] = (uint16_t )((unsigned int )mcp->mb[10] | 64U); mcp->out_mb = mcp->out_mb | 1535U; mcp->in_mb = 1U; mcp->tov = 30U; mcp->flags = 0U; rval = qla2x00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4430, "Failed=%x mb[0]=%x.\n", rval, (int )mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4429, "Done %s.\n", "qla2x00_dump_mctp_data"); } return (rval); } } void activate_pending_timer_4(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_4 == (unsigned long )timer) { if (ldv_timer_state_4 == 2 || pending_flag != 0) { ldv_timer_list_4 = timer; ldv_timer_list_4->data = data; ldv_timer_state_4 = 1; } else { } return; } else { } reg_timer_4(timer); ldv_timer_list_4->data = data; return; } } void disable_suitable_timer_4(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_4) { ldv_timer_state_4 = 0; return; } else { } return; } } void choose_timer_4(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_4 = 2; return; } } int reg_timer_4(struct timer_list *timer ) { { ldv_timer_list_4 = timer; ldv_timer_state_4 = 1; return (0); } } int ldv_del_timer_23(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_24(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } int ldv_del_timer_27(struct timer_list *ldv_func_arg1 ) ; __inline static unsigned short __readw(void const volatile *addr ) { unsigned short ret ; { __asm__ volatile ("movw %1,%0": "=r" (ret): "m" (*((unsigned short volatile *)addr))); return (ret); } } __inline static unsigned int __readl(void const volatile *addr ) { unsigned int ret ; { __asm__ volatile ("movl %1,%0": "=r" (ret): "m" (*((unsigned int volatile *)addr))); return (ret); } } __inline static void writeb(unsigned char val , void volatile *addr ) { { __asm__ volatile ("movb %0,%1": : "q" (val), "m" (*((unsigned char volatile *)addr)): "memory"); return; } } void choose_timer_5(struct timer_list *timer ) ; void disable_suitable_timer_5(struct timer_list *timer ) ; void activate_pending_timer_5(struct timer_list *timer , unsigned long data , int pending_flag ) ; int reg_timer_5(struct timer_list *timer ) ; __inline static void *lowmem_page_address(struct page const *page ) { { return ((void *)((unsigned long )((unsigned long long )(((long )page + 24189255811072L) / 64L) << 12) + 0xffff880000000000UL)); } } __inline static struct page *sg_page(struct scatterlist *sg ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (98), "i" (12UL)); ldv_20741: ; goto ldv_20741; } else { } tmp___0 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (99), "i" (12UL)); ldv_20742: ; goto ldv_20742; } else { } return ((struct page *)(sg->page_link & 0xfffffffffffffffcUL)); } } __inline static void *sg_virt(struct scatterlist *sg ) { struct page *tmp ; void *tmp___0 ; { tmp = sg_page(sg); tmp___0 = lowmem_page_address((struct page const *)tmp); return (tmp___0 + (unsigned long )sg->offset); } } extern struct scatterlist *sg_next(struct scatterlist * ) ; __inline static void kmemcheck_mark_initialized(void *address , unsigned int n ) { { return; } } extern void debug_dma_map_sg(struct device * , struct scatterlist * , int , int , int ) ; __inline static int dma_map_sg_attrs(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int i ; int ents ; struct scatterlist *s ; void *tmp___0 ; int tmp___1 ; long tmp___2 ; { tmp = get_dma_ops(dev); ops = tmp; i = 0; s = sg; goto ldv_21291; ldv_21290: tmp___0 = sg_virt(s); kmemcheck_mark_initialized(tmp___0, s->length); i = i + 1; s = sg_next(s); ldv_21291: ; if (i < nents) { goto ldv_21290; } else { } tmp___1 = valid_dma_direction((int )dir); tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (52), "i" (12UL)); ldv_21293: ; goto ldv_21293; } else { } ents = (*(ops->map_sg))(dev, sg, nents, dir, attrs); debug_dma_map_sg(dev, sg, nents, ents, (int )dir); return (ents); } } int ldv_scsi_add_host_with_dma_28(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; __inline static unsigned char scsi_host_get_guard(struct Scsi_Host *shost ) { { return (shost->prot_guard_type); } } __inline static sector_t blk_rq_pos(struct request const *rq ) { { return ((sector_t )rq->__sector); } } __inline static unsigned int scsi_sg_count(struct scsi_cmnd *cmd ) { { return (cmd->sdb.table.nents); } } __inline static struct scatterlist *scsi_sglist(struct scsi_cmnd *cmd ) { { return (cmd->sdb.table.sgl); } } __inline static unsigned int scsi_bufflen(struct scsi_cmnd *cmd ) { { return (cmd->sdb.length); } } __inline static unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd ) { { return (scmd->prot_type); } } __inline static sector_t scsi_get_lba(struct scsi_cmnd *scmd ) { sector_t tmp ; { tmp = blk_rq_pos((struct request const *)scmd->request); return (tmp); } } void *qla2x00_alloc_iocbs(struct scsi_qla_host *vha , srb_t *sp ) ; int qla2x00_start_bidir(srb_t *sp , struct scsi_qla_host *vha , uint32_t tot_dsds ) ; int qla2x00_issue_marker(scsi_qla_host_t *vha , int ha_locked ) ; void qla2x00_process_response_queue(struct rsp_que *rsp ) ; void qla24xx_process_response_queue(struct scsi_qla_host *vha , struct rsp_que *rsp ) ; void qlafx00_tm_iocb(srb_t *sp , struct tsk_mgmt_entry_fx00 *ptm_iocb ) ; void qlafx00_abort_iocb(srb_t *sp , struct abort_iocb_entry_fx00 *pabt_iocb ) ; void qlafx00_fxdisc_iocb(srb_t *sp , struct fxdisc_entry_fx00 *pfxiocb ) ; void qla2x00_start_iocbs(struct scsi_qla_host *vha , struct req_que *req ) ; void qla82xx_start_iocbs(scsi_qla_host_t *vha ) ; __inline static uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *vha , uint16_t dsds ) { uint16_t iocbs ; { iocbs = 1U; if ((unsigned int )dsds > 1U) { iocbs = (int )((uint16_t )(((int )dsds + -1) / 5)) + (int )iocbs; if (((int )dsds + -1) % 5 != 0) { iocbs = (uint16_t )((int )iocbs + 1); } else { } } else { } return (iocbs); } } __inline static uint16_t qla2x00_debounce_register___0(uint16_t volatile *addr ) { uint16_t volatile first ; uint16_t volatile second ; unsigned short tmp ; unsigned short tmp___0 ; { ldv_43294: tmp = readw((void const volatile *)addr); first = tmp; __asm__ volatile ("": : : "memory"); cpu_relax(); tmp___0 = readw((void const volatile *)addr); second = tmp___0; if ((int )((unsigned short )first) != (int )((unsigned short )second)) { goto ldv_43294; } else { } return ((uint16_t )first); } } __inline static int qla2x00_hba_err_chk_enabled(srb_t *sp ) { unsigned char tmp ; { tmp = scsi_get_prot_op(sp->u.scmd.cmd); switch ((int )tmp) { case 3: ; case 4: ; if (ql2xenablehba_err_chk > 0) { return (1); } else { } goto ldv_43377; case 5: ; case 6: ; if (ql2xenablehba_err_chk > 1) { return (1); } else { } goto ldv_43377; case 1: ; case 2: ; return (1); } ldv_43377: ; return (0); } } __inline static int scsi_populate_tag_msg(struct scsi_cmnd *cmd , char *msg ) { struct request *req ; char *tmp ; char *tmp___0 ; { req = cmd->request; if ((req->cmd_flags & 1048576U) != 0U) { tmp = msg; msg = msg + 1; *tmp = 32; tmp___0 = msg; msg = msg + 1; *tmp___0 = (char )req->tag; return (2); } else { } return (0); } } static void qla25xx_set_que(srb_t *sp , struct rsp_que **rsp ) ; __inline static uint16_t qla2x00_get_cmd_direction(srb_t *sp ) { uint16_t cflags ; struct scsi_cmnd *cmd ; struct scsi_qla_host *vha ; unsigned int tmp ; unsigned int tmp___0 ; { cmd = sp->u.scmd.cmd; vha = (sp->fcport)->vha; cflags = 0U; if ((unsigned int )cmd->sc_data_direction == 1U) { cflags = 64U; tmp = scsi_bufflen(cmd); vha->qla_stats.output_bytes = vha->qla_stats.output_bytes + (uint64_t )tmp; vha->qla_stats.output_requests = vha->qla_stats.output_requests + 1ULL; } else if ((unsigned int )cmd->sc_data_direction == 2U) { cflags = 32U; tmp___0 = scsi_bufflen(cmd); vha->qla_stats.input_bytes = vha->qla_stats.input_bytes + (uint64_t )tmp___0; vha->qla_stats.input_requests = vha->qla_stats.input_requests + 1ULL; } else { } return (cflags); } } uint16_t qla2x00_calc_iocbs_32(uint16_t dsds ) { uint16_t iocbs ; { iocbs = 1U; if ((unsigned int )dsds > 3U) { iocbs = (int )((uint16_t )(((int )dsds + -3) / 7)) + (int )iocbs; if (((int )dsds + -3) % 7 != 0) { iocbs = (uint16_t )((int )iocbs + 1); } else { } } else { } return (iocbs); } } uint16_t qla2x00_calc_iocbs_64(uint16_t dsds ) { uint16_t iocbs ; { iocbs = 1U; if ((unsigned int )dsds > 2U) { iocbs = (int )((uint16_t )(((int )dsds + -2) / 5)) + (int )iocbs; if (((int )dsds + -2) % 5 != 0) { iocbs = (uint16_t )((int )iocbs + 1); } else { } } else { } return (iocbs); } } __inline static cont_entry_t *qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha ) { cont_entry_t *cont_pkt ; struct req_que *req ; { req = vha->req; req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } cont_pkt = (cont_entry_t *)req->ring_ptr; *((uint32_t *)(& cont_pkt->entry_type)) = 2U; return (cont_pkt); } } __inline static cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha , struct req_que *req ) { cont_a64_entry_t *cont_pkt ; { req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } cont_pkt = (cont_a64_entry_t *)req->ring_ptr; *((uint32_t *)(& cont_pkt->entry_type)) = ((vha->hw)->device_type & 131072U) != 0U ? 3U : 10U; return (cont_pkt); } } __inline static int qla24xx_configure_prot_mode(srb_t *sp , uint16_t *fw_prot_opts ) { struct scsi_cmnd *cmd ; uint8_t guard ; unsigned char tmp ; unsigned char tmp___0 ; unsigned int tmp___1 ; { cmd = sp->u.scmd.cmd; tmp = scsi_host_get_guard((cmd->device)->host); guard = tmp; *fw_prot_opts = 0U; tmp___0 = scsi_get_prot_op(cmd); switch ((int )tmp___0) { case 3: *fw_prot_opts = (uint16_t )((unsigned int )*fw_prot_opts | 1U); goto ldv_60791; case 4: *fw_prot_opts = *fw_prot_opts; goto ldv_60791; case 1: *fw_prot_opts = *fw_prot_opts; goto ldv_60791; case 2: *fw_prot_opts = (uint16_t )((unsigned int )*fw_prot_opts | 1U); goto ldv_60791; case 5: ; case 6: ; if (((int )guard & 2) != 0) { *fw_prot_opts = (uint16_t )((unsigned int )*fw_prot_opts | 6U); } else { *fw_prot_opts = (uint16_t )((unsigned int )*fw_prot_opts | 2U); } goto ldv_60791; default: *fw_prot_opts = (uint16_t )((unsigned int )*fw_prot_opts | 2U); goto ldv_60791; } ldv_60791: tmp___1 = scsi_prot_sg_count(cmd); return ((int )tmp___1); } } void qla2x00_build_scsi_iocbs_32(srb_t *sp , cmd_entry_t *cmd_pkt , uint16_t tot_dsds ) { uint16_t avail_dsds ; uint32_t *cur_dsd ; scsi_qla_host_t *vha ; struct scsi_cmnd *cmd ; struct scatterlist *sg ; int i ; unsigned int tmp ; uint16_t tmp___0 ; cont_entry_t *cont_pkt ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; { cmd = sp->u.scmd.cmd; *((uint32_t *)(& cmd_pkt->entry_type)) = 17U; tmp = scsi_bufflen(cmd); if (tmp == 0U || (unsigned int )cmd->sc_data_direction == 3U) { cmd_pkt->byte_count = 0U; return; } else { } vha = (sp->fcport)->vha; tmp___0 = qla2x00_get_cmd_direction(sp); cmd_pkt->control_flags = (uint16_t )((int )cmd_pkt->control_flags | (int )tmp___0); avail_dsds = 3U; cur_dsd = & cmd_pkt->dseg_0_address; i = 0; sg = scsi_sglist(cmd); goto ldv_60811; ldv_60810: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt = qla2x00_prep_cont_type0_iocb(vha); cur_dsd = & cont_pkt->dseg_0_address; avail_dsds = 7U; } else { } tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = (unsigned int )sg->dma_address; tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = sg->dma_length; avail_dsds = (uint16_t )((int )avail_dsds - 1); i = i + 1; sg = sg_next(sg); ldv_60811: ; if ((int )tot_dsds > i) { goto ldv_60810; } else { } return; } } void qla2x00_build_scsi_iocbs_64(srb_t *sp , cmd_entry_t *cmd_pkt , uint16_t tot_dsds ) { uint16_t avail_dsds ; uint32_t *cur_dsd ; scsi_qla_host_t *vha ; struct scsi_cmnd *cmd ; struct scatterlist *sg ; int i ; unsigned int tmp ; uint16_t tmp___0 ; dma_addr_t sle_dma ; cont_a64_entry_t *cont_pkt ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; uint32_t *tmp___3 ; { cmd = sp->u.scmd.cmd; *((uint32_t *)(& cmd_pkt->entry_type)) = 25U; tmp = scsi_bufflen(cmd); if (tmp == 0U || (unsigned int )cmd->sc_data_direction == 3U) { cmd_pkt->byte_count = 0U; return; } else { } vha = (sp->fcport)->vha; tmp___0 = qla2x00_get_cmd_direction(sp); cmd_pkt->control_flags = (uint16_t )((int )cmd_pkt->control_flags | (int )tmp___0); avail_dsds = 2U; cur_dsd = & cmd_pkt->dseg_0_address; i = 0; sg = scsi_sglist(cmd); goto ldv_60827; ldv_60826: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); cur_dsd = (uint32_t *)(& cont_pkt->dseg_0_address); avail_dsds = 5U; } else { } sle_dma = sg->dma_address; tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = (unsigned int )sle_dma; tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = (unsigned int )(sle_dma >> 32ULL); tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = sg->dma_length; avail_dsds = (uint16_t )((int )avail_dsds - 1); i = i + 1; sg = sg_next(sg); ldv_60827: ; if ((int )tot_dsds > i) { goto ldv_60826; } else { } return; } } int qla2x00_start_scsi(srb_t *sp ) { int ret ; int nseg ; unsigned long flags ; scsi_qla_host_t *vha ; struct scsi_cmnd *cmd ; uint32_t *clr_ptr ; uint32_t index ; uint32_t handle ; cmd_entry_t *cmd_pkt ; uint16_t cnt ; uint16_t req_cnt ; uint16_t tot_dsds ; struct device_reg_2xxx *reg ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; char tag[2U] ; int tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; struct scatterlist *tmp___2 ; long tmp___3 ; unsigned int tmp___4 ; int tmp___5 ; size_t __len ; void *__ret ; { ret = 0; vha = (sp->fcport)->vha; ha = vha->hw; reg = & (ha->iobase)->isp; cmd = sp->u.scmd.cmd; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); tot_dsds = 0U; if ((unsigned int )vha->marker_needed != 0U) { tmp = qla2x00_marker(vha, req, rsp, 0, 0, 2); if (tmp != 0) { return (258); } else { } vha->marker_needed = 0U; } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); handle = req->current_outstanding_cmd; index = 1U; goto ldv_60854; ldv_60853: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_60852; } else { } index = index + 1U; ldv_60854: ; if ((uint32_t )req->num_outstanding_cmds > index) { goto ldv_60853; } else { } ldv_60852: ; if ((uint32_t )req->num_outstanding_cmds == index) { goto queuing_error; } else { } tmp___4 = scsi_sg_count(cmd); if (tmp___4 != 0U) { tmp___1 = scsi_sg_count(cmd); tmp___2 = scsi_sglist(cmd); nseg = dma_map_sg_attrs(& (ha->pdev)->dev, tmp___2, (int )tmp___1, cmd->sc_data_direction, (struct dma_attrs *)0); tmp___3 = ldv__builtin_expect(nseg == 0, 0L); if (tmp___3 != 0L) { goto queuing_error; } else { } } else { nseg = 0; } tot_dsds = (uint16_t )nseg; req_cnt = (*((ha->isp_ops)->calc_req_entries))((int )tot_dsds); if ((int )req->cnt < (int )req_cnt + 2) { cnt = __readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox4 : & reg->u.isp2300.req_q_out)); if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } if ((int )req->cnt < (int )req_cnt + 2) { goto queuing_error; } else { } } else { } req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)((unsigned long )handle); req->cnt = (int )req->cnt - (int )req_cnt; cmd_pkt = (cmd_entry_t *)req->ring_ptr; cmd_pkt->handle = handle; clr_ptr = (uint32_t *)cmd_pkt + 2UL; memset((void *)clr_ptr, 0, 56UL); cmd_pkt->dseg_count = tot_dsds; if ((int )ha->device_type < 0) { cmd_pkt->target.extended = (sp->fcport)->loop_id; } else { cmd_pkt->target.id.standard = (unsigned char )(sp->fcport)->loop_id; } cmd_pkt->lun = (unsigned short )(cmd->device)->lun; tmp___5 = scsi_populate_tag_msg(cmd, (char *)(& tag)); if (tmp___5 != 0) { switch ((int )tag[0]) { case 33: cmd_pkt->control_flags = 2U; goto ldv_60857; case 34: cmd_pkt->control_flags = 4U; goto ldv_60857; default: cmd_pkt->control_flags = 8U; goto ldv_60857; } ldv_60857: ; } else { cmd_pkt->control_flags = 8U; } __len = (size_t )cmd->cmd_len; __ret = __builtin_memcpy((void *)(& cmd_pkt->scsi_cdb), (void const *)cmd->cmnd, __len); cmd_pkt->byte_count = scsi_bufflen(cmd); (*((ha->isp_ops)->build_iocbs))(sp, cmd_pkt, (int )tot_dsds); cmd_pkt->entry_count = (unsigned char )req_cnt; __asm__ volatile ("sfence": : : "memory"); req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } sp->flags = (uint16_t )((unsigned int )sp->flags | 1U); writew((int )req->ring_index, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox4 : & reg->u.isp2300.req_q_in)); __readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox4 : & reg->u.isp2300.req_q_in)); if (*((unsigned long *)vha + 19UL) != 0UL && (rsp->ring_ptr)->signature != 3735936685U) { qla2x00_process_response_queue(rsp); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); queuing_error: ; if ((unsigned int )tot_dsds != 0U) { scsi_dma_unmap(cmd); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (258); } } void qla2x00_start_iocbs(struct scsi_qla_host *vha , struct req_que *req ) { struct qla_hw_data *ha ; device_reg_t *reg ; { ha = vha->hw; reg = (unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) ? ha->mqiobase + (unsigned long )((int )req->id * 4096) : ha->iobase; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { qla82xx_start_iocbs(vha); } else { req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } if ((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { writel((unsigned int )req->ring_index, (void volatile *)req->req_q_in); __readl((void const volatile *)(& (ha->iobase)->isp24.hccr)); } else if ((ha->device_type & 131072U) != 0U) { writel((unsigned int )req->ring_index, (void volatile *)(& reg->ispfx00.req_q_in)); __readl((void const volatile *)(& reg->ispfx00.req_q_in)); writel(ha->rqstq_intr_code, (void volatile *)ha->cregbase + 133636U); } else if ((ha->device_type & 134217728U) != 0U) { writel((unsigned int )req->ring_index, (void volatile *)(& reg->isp24.req_q_in)); __readl((void const volatile *)(& reg->isp24.req_q_in)); } else { writew((int )req->ring_index, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->isp.u.isp2100.mailbox4 : & reg->isp.u.isp2300.req_q_in)); __readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->isp.u.isp2100.mailbox4 : & reg->isp.u.isp2300.req_q_in)); } } return; } } static int __qla2x00_marker(struct scsi_qla_host *vha , struct req_que *req , struct rsp_que *rsp , uint16_t loop_id , uint16_t lun , uint8_t type ) { mrk_entry_t *mrk ; struct mrk_entry_24xx *mrk24 ; struct mrk_entry_fx00 *mrkfx ; struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; void *tmp___0 ; { mrk24 = (struct mrk_entry_24xx *)0; mrkfx = (struct mrk_entry_fx00 *)0; ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; req = *(ha->req_q_map); tmp___0 = qla2x00_alloc_iocbs(vha, (srb_t *)0); mrk = (mrk_entry_t *)tmp___0; if ((unsigned long )mrk == (unsigned long )((mrk_entry_t *)0)) { ql_log(1U, base_vha, 12326, "Failed to allocate Marker IOCB.\n"); return (258); } else { } mrk->entry_type = 4U; mrk->modifier = type; if ((unsigned int )type != 2U) { if ((ha->device_type & 131072U) != 0U) { mrkfx = (struct mrk_entry_fx00 *)mrk; mrkfx->handle = ((unsigned int )req->id << 16) | mrkfx->handle; mrkfx->handle_hi = 0U; mrkfx->tgt_id = loop_id; mrkfx->lun[1] = (unsigned char )lun; mrkfx->lun[2] = (unsigned char )((int )lun >> 8); host_to_fcp_swap((uint8_t *)(& mrkfx->lun), 8U); } else if ((ha->device_type & 134217728U) != 0U) { mrk24 = (struct mrk_entry_24xx *)mrk; mrk24->nport_handle = loop_id; mrk24->lun[1] = (unsigned char )lun; mrk24->lun[2] = (unsigned char )((int )lun >> 8); host_to_fcp_swap((uint8_t *)(& mrk24->lun), 8U); mrk24->vp_index = (uint8_t )vha->vp_idx; mrk24->handle = ((unsigned int )req->id << 16) | mrk24->handle; } else { if ((int )ha->device_type < 0) { mrk->target.extended = loop_id; } else { mrk->target.id.standard = (unsigned char )loop_id; } mrk->lun = lun; } } else { } __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs(vha, req); return (0); } } int qla2x00_marker(struct scsi_qla_host *vha , struct req_que *req , struct rsp_que *rsp , uint16_t loop_id , uint16_t lun , uint8_t type ) { int ret ; unsigned long flags ; raw_spinlock_t *tmp ; { flags = 0UL; tmp = spinlock_check(& (vha->hw)->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ret = __qla2x00_marker(vha, req, rsp, (int )loop_id, (int )lun, (int )type); spin_unlock_irqrestore(& (vha->hw)->hardware_lock, flags); return (ret); } } int qla2x00_issue_marker(scsi_qla_host_t *vha , int ha_locked ) { int tmp ; int tmp___0 ; { if (ha_locked != 0) { tmp = __qla2x00_marker(vha, vha->req, (vha->req)->rsp, 0, 0, 2); if (tmp != 0) { return (258); } else { } } else { tmp___0 = qla2x00_marker(vha, vha->req, (vha->req)->rsp, 0, 0, 2); if (tmp___0 != 0) { return (258); } else { } } vha->marker_needed = 0U; return (0); } } __inline static int qla24xx_build_scsi_type_6_iocbs(srb_t *sp , struct cmd_type_6 *cmd_pkt , uint16_t tot_dsds ) { uint32_t *cur_dsd ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct scsi_cmnd *cmd ; struct scatterlist *cur_seg ; uint32_t *dsd_seg ; void *next_dsd ; uint8_t avail_dsds ; uint8_t first_iocb ; uint32_t dsd_list_len ; struct dsd_dma *dsd_ptr ; struct ct6_dsd *ctx ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; struct list_head const *__mptr ; uint32_t *tmp___2 ; uint32_t *tmp___3 ; uint32_t *tmp___4 ; uint32_t *tmp___5 ; uint32_t *tmp___6 ; dma_addr_t sle_dma ; uint32_t *tmp___7 ; uint32_t *tmp___8 ; uint32_t *tmp___9 ; uint32_t *tmp___10 ; uint32_t *tmp___11 ; uint32_t *tmp___12 ; { cur_dsd = (uint32_t *)0U; first_iocb = 1U; cmd = sp->u.scmd.cmd; *((uint32_t *)(& cmd_pkt->entry_type)) = 72U; tmp = scsi_bufflen(cmd); if (tmp == 0U || (unsigned int )cmd->sc_data_direction == 3U) { cmd_pkt->byte_count = 0U; return (0); } else { } vha = (sp->fcport)->vha; ha = vha->hw; if ((unsigned int )cmd->sc_data_direction == 1U) { cmd_pkt->control_flags = 1U; tmp___0 = scsi_bufflen(cmd); vha->qla_stats.output_bytes = vha->qla_stats.output_bytes + (uint64_t )tmp___0; vha->qla_stats.output_requests = vha->qla_stats.output_requests + 1ULL; } else if ((unsigned int )cmd->sc_data_direction == 2U) { cmd_pkt->control_flags = 2U; tmp___1 = scsi_bufflen(cmd); vha->qla_stats.input_bytes = vha->qla_stats.input_bytes + (uint64_t )tmp___1; vha->qla_stats.input_requests = vha->qla_stats.input_requests + 1ULL; } else { } cur_seg = scsi_sglist(cmd); ctx = (struct ct6_dsd *)sp->u.scmd.ctx; goto ldv_60923; ldv_60922: avail_dsds = (unsigned int )tot_dsds <= 37U ? (uint8_t )tot_dsds : 37U; tot_dsds = (int )tot_dsds - (int )((uint16_t )avail_dsds); dsd_list_len = (uint32_t )(((int )avail_dsds + 1) * 12); __mptr = (struct list_head const *)ha->gbl_dsd_list.next; dsd_ptr = (struct dsd_dma *)__mptr; next_dsd = dsd_ptr->dsd_addr; list_del(& dsd_ptr->list); ha->gbl_dsd_avail = (uint16_t )((int )ha->gbl_dsd_avail - 1); list_add_tail(& dsd_ptr->list, & ctx->dsd_list); ctx->dsd_use_cnt = ctx->dsd_use_cnt + 1; ha->gbl_dsd_inuse = (uint16_t )((int )ha->gbl_dsd_inuse + 1); if ((unsigned int )first_iocb != 0U) { first_iocb = 0U; dsd_seg = (uint32_t *)(& cmd_pkt->fcp_data_dseg_address); tmp___2 = dsd_seg; dsd_seg = dsd_seg + 1; *tmp___2 = (unsigned int )dsd_ptr->dsd_list_dma; tmp___3 = dsd_seg; dsd_seg = dsd_seg + 1; *tmp___3 = (unsigned int )(dsd_ptr->dsd_list_dma >> 32ULL); cmd_pkt->fcp_data_dseg_len = dsd_list_len; } else { tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = (unsigned int )dsd_ptr->dsd_list_dma; tmp___5 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___5 = (unsigned int )(dsd_ptr->dsd_list_dma >> 32ULL); tmp___6 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___6 = dsd_list_len; } cur_dsd = (uint32_t *)next_dsd; goto ldv_60920; ldv_60919: sle_dma = cur_seg->dma_address; tmp___7 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___7 = (unsigned int )sle_dma; tmp___8 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___8 = (unsigned int )(sle_dma >> 32ULL); tmp___9 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___9 = cur_seg->dma_length; cur_seg = sg_next(cur_seg); avail_dsds = (uint8_t )((int )avail_dsds - 1); ldv_60920: ; if ((unsigned int )avail_dsds != 0U) { goto ldv_60919; } else { } ldv_60923: ; if ((unsigned int )tot_dsds != 0U) { goto ldv_60922; } else { } tmp___10 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___10 = 0U; tmp___11 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___11 = 0U; tmp___12 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___12 = 0U; cmd_pkt->control_flags = (uint16_t )((unsigned int )cmd_pkt->control_flags | 4U); return (0); } } __inline uint16_t qla24xx_calc_dsd_lists(uint16_t dsds ) { uint16_t dsd_lists ; { dsd_lists = 0U; dsd_lists = (uint16_t )((unsigned int )dsds / 37U); if ((unsigned int )dsds % 37U != 0U) { dsd_lists = (uint16_t )((int )dsd_lists + 1); } else { } return (dsd_lists); } } __inline void qla24xx_build_scsi_iocbs(srb_t *sp , struct cmd_type_7 *cmd_pkt , uint16_t tot_dsds ) { uint16_t avail_dsds ; uint32_t *cur_dsd ; scsi_qla_host_t *vha ; struct scsi_cmnd *cmd ; struct scatterlist *sg ; int i ; struct req_que *req ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; dma_addr_t sle_dma ; cont_a64_entry_t *cont_pkt ; uint32_t *tmp___2 ; uint32_t *tmp___3 ; uint32_t *tmp___4 ; { cmd = sp->u.scmd.cmd; *((uint32_t *)(& cmd_pkt->entry_type)) = 24U; tmp = scsi_bufflen(cmd); if (tmp == 0U || (unsigned int )cmd->sc_data_direction == 3U) { cmd_pkt->byte_count = 0U; return; } else { } vha = (sp->fcport)->vha; req = vha->req; if ((unsigned int )cmd->sc_data_direction == 1U) { cmd_pkt->task_mgmt_flags = 1U; tmp___0 = scsi_bufflen(cmd); vha->qla_stats.output_bytes = vha->qla_stats.output_bytes + (uint64_t )tmp___0; vha->qla_stats.output_requests = vha->qla_stats.output_requests + 1ULL; } else if ((unsigned int )cmd->sc_data_direction == 2U) { cmd_pkt->task_mgmt_flags = 2U; tmp___1 = scsi_bufflen(cmd); vha->qla_stats.input_bytes = vha->qla_stats.input_bytes + (uint64_t )tmp___1; vha->qla_stats.input_requests = vha->qla_stats.input_requests + 1ULL; } else { } avail_dsds = 1U; cur_dsd = (uint32_t *)(& cmd_pkt->dseg_0_address); i = 0; sg = scsi_sglist(cmd); goto ldv_60944; ldv_60943: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); cur_dsd = (uint32_t *)(& cont_pkt->dseg_0_address); avail_dsds = 5U; } else { } sle_dma = sg->dma_address; tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = (unsigned int )sle_dma; tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = (unsigned int )(sle_dma >> 32ULL); tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = sg->dma_length; avail_dsds = (uint16_t )((int )avail_dsds - 1); i = i + 1; sg = sg_next(sg); ldv_60944: ; if ((int )tot_dsds > i) { goto ldv_60943; } else { } return; } } __inline static void qla24xx_set_t10dif_tags(srb_t *sp , struct fw_dif_context *pkt , unsigned int protcnt ) { struct scsi_cmnd *cmd ; unsigned char tmp ; sector_t tmp___0 ; int tmp___1 ; sector_t tmp___2 ; int tmp___3 ; uint8_t tmp___4 ; uint8_t tmp___5 ; uint8_t tmp___6 ; sector_t tmp___7 ; int tmp___8 ; { cmd = sp->u.scmd.cmd; tmp = scsi_get_prot_type(cmd); switch ((int )tmp) { case 0: tmp___0 = scsi_get_lba(cmd); pkt->ref_tag = (unsigned int )tmp___0; tmp___1 = qla2x00_hba_err_chk_enabled(sp); if (tmp___1 == 0) { goto ldv_60958; } else { } pkt->ref_tag_mask[0] = 255U; pkt->ref_tag_mask[1] = 255U; pkt->ref_tag_mask[2] = 255U; pkt->ref_tag_mask[3] = 255U; goto ldv_60958; case 2: pkt->app_tag = 0U; pkt->app_tag_mask[0] = 0U; pkt->app_tag_mask[1] = 0U; tmp___2 = scsi_get_lba(cmd); pkt->ref_tag = (unsigned int )tmp___2; tmp___3 = qla2x00_hba_err_chk_enabled(sp); if (tmp___3 == 0) { goto ldv_60958; } else { } pkt->ref_tag_mask[0] = 255U; pkt->ref_tag_mask[1] = 255U; pkt->ref_tag_mask[2] = 255U; pkt->ref_tag_mask[3] = 255U; goto ldv_60958; case 3: tmp___6 = 0U; pkt->ref_tag_mask[3] = tmp___6; tmp___5 = tmp___6; pkt->ref_tag_mask[2] = tmp___5; tmp___4 = tmp___5; pkt->ref_tag_mask[1] = tmp___4; pkt->ref_tag_mask[0] = tmp___4; goto ldv_60958; case 1: tmp___7 = scsi_get_lba(cmd); pkt->ref_tag = (unsigned int )tmp___7; pkt->app_tag = 0U; pkt->app_tag_mask[0] = 0U; pkt->app_tag_mask[1] = 0U; tmp___8 = qla2x00_hba_err_chk_enabled(sp); if (tmp___8 == 0) { goto ldv_60958; } else { } pkt->ref_tag_mask[0] = 255U; pkt->ref_tag_mask[1] = 255U; pkt->ref_tag_mask[2] = 255U; pkt->ref_tag_mask[3] = 255U; goto ldv_60958; } ldv_60958: ; return; } } static int qla24xx_get_one_block_sg(uint32_t blk_sz , struct qla2_sgx *sgx , uint32_t *partial ) { struct scatterlist *sg ; uint32_t cumulative_partial ; uint32_t sg_len ; dma_addr_t sg_dma_addr ; { if (sgx->num_bytes == sgx->tot_bytes) { return (0); } else { } sg = sgx->cur_sg; cumulative_partial = sgx->tot_partial; sg_dma_addr = sg->dma_address; sg_len = sg->dma_length; sgx->dma_addr = (dma_addr_t )sgx->bytes_consumed + sg_dma_addr; if ((sg_len - sgx->bytes_consumed) + cumulative_partial >= blk_sz) { sgx->dma_len = blk_sz - cumulative_partial; sgx->tot_partial = 0U; sgx->num_bytes = sgx->num_bytes + blk_sz; *partial = 0U; } else { sgx->dma_len = sg_len - sgx->bytes_consumed; sgx->tot_partial = sgx->tot_partial + sgx->dma_len; *partial = 1U; } sgx->bytes_consumed = sgx->bytes_consumed + sgx->dma_len; if (sgx->bytes_consumed == sg_len) { sg = sg_next(sg); sgx->num_sg = sgx->num_sg + 1U; sgx->cur_sg = sg; sgx->bytes_consumed = 0U; } else { } return (1); } } static int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha , srb_t *sp , uint32_t *dsd , uint16_t tot_dsds ) { void *next_dsd ; uint8_t avail_dsds ; uint32_t dsd_list_len ; struct dsd_dma *dsd_ptr ; struct scatterlist *sg_prot ; uint32_t *cur_dsd ; uint16_t used_dsds ; uint32_t prot_int ; uint32_t partial ; struct qla2_sgx sgx ; dma_addr_t sle_dma ; uint32_t sle_dma_len ; uint32_t tot_prot_dma_len ; struct scsi_cmnd *cmd ; void *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; uint32_t *tmp___3 ; uint32_t *tmp___4 ; uint32_t *tmp___5 ; int tmp___6 ; uint32_t *tmp___7 ; uint32_t *tmp___8 ; uint32_t *tmp___9 ; { avail_dsds = 0U; cur_dsd = dsd; used_dsds = tot_dsds; tot_prot_dma_len = 0U; cmd = sp->u.scmd.cmd; prot_int = (cmd->device)->sector_size; memset((void *)(& sgx), 0, 48UL); sgx.tot_bytes = scsi_bufflen(cmd); sgx.cur_sg = scsi_sglist(cmd); sgx.sp = sp; sg_prot = scsi_prot_sglist(cmd); goto ldv_61003; ldv_61002: sle_dma = sgx.dma_addr; sle_dma_len = sgx.dma_len; alloc_and_fill: ; if ((unsigned int )avail_dsds == 0U) { avail_dsds = (unsigned int )used_dsds <= 37U ? (uint8_t )used_dsds : 37U; dsd_list_len = (uint32_t )(((int )avail_dsds + 1) * 12); used_dsds = (int )used_dsds - (int )((uint16_t )avail_dsds); tmp = kzalloc(32UL, 32U); dsd_ptr = (struct dsd_dma *)tmp; if ((unsigned long )dsd_ptr == (unsigned long )((struct dsd_dma *)0)) { return (1); } else { } next_dsd = dma_pool_alloc(ha->dl_dma_pool, 32U, & dsd_ptr->dsd_list_dma); dsd_ptr->dsd_addr = next_dsd; if ((unsigned long )next_dsd == (unsigned long )((void *)0)) { kfree((void const *)dsd_ptr); return (1); } else { } list_add_tail(& dsd_ptr->list, & ((struct crc_context *)sp->u.scmd.ctx)->dsd_list); sp->flags = (uint16_t )((unsigned int )sp->flags | 32U); tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )dsd_ptr->dsd_list_dma; tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = (unsigned int )(dsd_ptr->dsd_list_dma >> 32ULL); tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = dsd_list_len; cur_dsd = (uint32_t *)next_dsd; } else { } tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = (unsigned int )sle_dma; tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = (unsigned int )(sle_dma >> 32ULL); tmp___5 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___5 = sle_dma_len; avail_dsds = (uint8_t )((int )avail_dsds - 1); if (partial == 0U) { sle_dma = sg_prot->dma_address + (dma_addr_t )tot_prot_dma_len; sle_dma_len = 8U; tot_prot_dma_len = tot_prot_dma_len + sle_dma_len; if (sg_prot->dma_length == tot_prot_dma_len) { tot_prot_dma_len = 0U; sg_prot = sg_next(sg_prot); } else { } partial = 1U; goto alloc_and_fill; } else { } ldv_61003: tmp___6 = qla24xx_get_one_block_sg(prot_int, & sgx, & partial); if (tmp___6 != 0) { goto ldv_61002; } else { } tmp___7 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___7 = 0U; tmp___8 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___8 = 0U; tmp___9 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___9 = 0U; return (0); } } static int qla24xx_walk_and_build_sglist(struct qla_hw_data *ha , srb_t *sp , uint32_t *dsd , uint16_t tot_dsds ) { void *next_dsd ; uint8_t avail_dsds ; uint32_t dsd_list_len ; struct dsd_dma *dsd_ptr ; struct scatterlist *sg ; uint32_t *cur_dsd ; int i ; uint16_t used_dsds ; struct scsi_cmnd *cmd ; dma_addr_t sle_dma ; void *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; uint32_t *tmp___3 ; uint32_t *tmp___4 ; uint32_t *tmp___5 ; uint32_t *tmp___6 ; uint32_t *tmp___7 ; uint32_t *tmp___8 ; { avail_dsds = 0U; cur_dsd = dsd; used_dsds = tot_dsds; cmd = sp->u.scmd.cmd; i = 0; sg = scsi_sglist(cmd); goto ldv_61022; ldv_61021: ; if ((unsigned int )avail_dsds == 0U) { avail_dsds = (unsigned int )used_dsds <= 37U ? (uint8_t )used_dsds : 37U; dsd_list_len = (uint32_t )(((int )avail_dsds + 1) * 12); used_dsds = (int )used_dsds - (int )((uint16_t )avail_dsds); tmp = kzalloc(32UL, 32U); dsd_ptr = (struct dsd_dma *)tmp; if ((unsigned long )dsd_ptr == (unsigned long )((struct dsd_dma *)0)) { return (1); } else { } next_dsd = dma_pool_alloc(ha->dl_dma_pool, 32U, & dsd_ptr->dsd_list_dma); dsd_ptr->dsd_addr = next_dsd; if ((unsigned long )next_dsd == (unsigned long )((void *)0)) { kfree((void const *)dsd_ptr); return (1); } else { } list_add_tail(& dsd_ptr->list, & ((struct crc_context *)sp->u.scmd.ctx)->dsd_list); sp->flags = (uint16_t )((unsigned int )sp->flags | 32U); tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )dsd_ptr->dsd_list_dma; tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = (unsigned int )(dsd_ptr->dsd_list_dma >> 32ULL); tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = dsd_list_len; cur_dsd = (uint32_t *)next_dsd; } else { } sle_dma = sg->dma_address; tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = (unsigned int )sle_dma; tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = (unsigned int )(sle_dma >> 32ULL); tmp___5 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___5 = sg->dma_length; avail_dsds = (uint8_t )((int )avail_dsds - 1); i = i + 1; sg = sg_next(sg); ldv_61022: ; if ((int )tot_dsds > i) { goto ldv_61021; } else { } tmp___6 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___6 = 0U; tmp___7 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___7 = 0U; tmp___8 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___8 = 0U; return (0); } } static int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha , srb_t *sp , uint32_t *dsd , uint16_t tot_dsds ) { void *next_dsd ; uint8_t avail_dsds ; uint32_t dsd_list_len ; struct dsd_dma *dsd_ptr ; struct scatterlist *sg ; int i ; struct scsi_cmnd *cmd ; uint32_t *cur_dsd ; uint16_t used_dsds ; dma_addr_t sle_dma ; void *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; uint32_t *tmp___3 ; uint32_t *tmp___4 ; uint32_t *tmp___5 ; uint32_t *tmp___6 ; uint32_t *tmp___7 ; uint32_t *tmp___8 ; { avail_dsds = 0U; cur_dsd = dsd; used_dsds = tot_dsds; cmd = sp->u.scmd.cmd; i = 0; sg = scsi_prot_sglist(cmd); goto ldv_61041; ldv_61040: ; if ((unsigned int )avail_dsds == 0U) { avail_dsds = (unsigned int )used_dsds <= 37U ? (uint8_t )used_dsds : 37U; dsd_list_len = (uint32_t )(((int )avail_dsds + 1) * 12); used_dsds = (int )used_dsds - (int )((uint16_t )avail_dsds); tmp = kzalloc(32UL, 32U); dsd_ptr = (struct dsd_dma *)tmp; if ((unsigned long )dsd_ptr == (unsigned long )((struct dsd_dma *)0)) { return (1); } else { } next_dsd = dma_pool_alloc(ha->dl_dma_pool, 32U, & dsd_ptr->dsd_list_dma); dsd_ptr->dsd_addr = next_dsd; if ((unsigned long )next_dsd == (unsigned long )((void *)0)) { kfree((void const *)dsd_ptr); return (1); } else { } list_add_tail(& dsd_ptr->list, & ((struct crc_context *)sp->u.scmd.ctx)->dsd_list); sp->flags = (uint16_t )((unsigned int )sp->flags | 32U); tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )dsd_ptr->dsd_list_dma; tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = (unsigned int )(dsd_ptr->dsd_list_dma >> 32ULL); tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = dsd_list_len; cur_dsd = (uint32_t *)next_dsd; } else { } sle_dma = sg->dma_address; tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = (unsigned int )sle_dma; tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = (unsigned int )(sle_dma >> 32ULL); tmp___5 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___5 = sg->dma_length; avail_dsds = (uint8_t )((int )avail_dsds - 1); i = i + 1; sg = sg_next(sg); ldv_61041: ; if ((int )tot_dsds > i) { goto ldv_61040; } else { } tmp___6 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___6 = 0U; tmp___7 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___7 = 0U; tmp___8 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___8 = 0U; return (0); } } __inline static int qla24xx_build_scsi_crc_2_iocbs(srb_t *sp , struct cmd_type_crc_2 *cmd_pkt , uint16_t tot_dsds , uint16_t tot_prot_dsds , uint16_t fw_prot_opts ) { uint32_t *cur_dsd ; uint32_t *fcp_dl ; scsi_qla_host_t *vha ; struct scsi_cmnd *cmd ; int sgc ; uint32_t total_bytes ; uint32_t data_bytes ; uint32_t dif_bytes ; uint8_t bundling ; uint16_t blk_size ; uint8_t *clr_ptr ; struct crc_context *crc_ctx_pkt ; struct qla_hw_data *ha ; uint8_t additional_fcpcdb_len ; uint16_t fcp_cmnd_len ; struct fcp_cmnd *fcp_cmnd ; dma_addr_t crc_ctx_dma ; char tag[2U] ; unsigned char tmp ; unsigned char tmp___0 ; unsigned char tmp___1 ; unsigned char tmp___2 ; void *tmp___3 ; size_t __len ; void *__ret ; int tmp___4 ; unsigned char tmp___5 ; unsigned char tmp___6 ; unsigned char tmp___7 ; unsigned char tmp___8 ; int tmp___9 ; __u32 tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; { total_bytes = 0U; bundling = 1U; crc_ctx_pkt = (struct crc_context *)0; cmd = sp->u.scmd.cmd; sgc = 0; *((uint32_t *)(& cmd_pkt->entry_type)) = 106U; vha = (sp->fcport)->vha; ha = vha->hw; data_bytes = scsi_bufflen(cmd); if (data_bytes == 0U || (unsigned int )cmd->sc_data_direction == 3U) { cmd_pkt->byte_count = 0U; return (0); } else { } cmd_pkt->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; if ((unsigned int )cmd->sc_data_direction == 1U) { cmd_pkt->control_flags = 1U; } else if ((unsigned int )cmd->sc_data_direction == 2U) { cmd_pkt->control_flags = 2U; } else { } tmp = scsi_get_prot_op(cmd); if ((unsigned int )tmp == 1U) { bundling = 0U; } else { tmp___0 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___0 == 2U) { bundling = 0U; } else { tmp___1 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___1 == 3U) { bundling = 0U; } else { tmp___2 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___2 == 4U) { bundling = 0U; } else { } } } } tmp___3 = dma_pool_alloc(ha->dl_dma_pool, 32U, & crc_ctx_dma); sp->u.scmd.ctx = tmp___3; crc_ctx_pkt = (struct crc_context *)tmp___3; if ((unsigned long )crc_ctx_pkt == (unsigned long )((struct crc_context *)0)) { goto crc_queuing_error; } else { } clr_ptr = (uint8_t *)crc_ctx_pkt; memset((void *)clr_ptr, 0, 360UL); crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; sp->flags = (uint16_t )((unsigned int )sp->flags | 4U); crc_ctx_pkt->handle = cmd_pkt->handle; INIT_LIST_HEAD(& crc_ctx_pkt->dsd_list); qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)(& crc_ctx_pkt->ref_tag), (unsigned int )tot_prot_dsds); cmd_pkt->crc_context_address[0] = (unsigned int )crc_ctx_dma; cmd_pkt->crc_context_address[1] = (unsigned int )(crc_ctx_dma >> 32ULL); cmd_pkt->crc_context_len = 64U; if ((unsigned int )cmd->cmd_len > 16U) { additional_fcpcdb_len = (unsigned int )((uint8_t )cmd->cmd_len) + 240U; if (((unsigned int )cmd->cmd_len & 3U) != 0U) { goto crc_queuing_error; } else { } fcp_cmnd_len = (unsigned int )cmd->cmd_len + 16U; } else { additional_fcpcdb_len = 0U; fcp_cmnd_len = 32U; } fcp_cmnd = & crc_ctx_pkt->fcp_cmnd; fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; if ((unsigned int )cmd->sc_data_direction == 1U) { fcp_cmnd->additional_cdb_len = (uint8_t )((unsigned int )fcp_cmnd->additional_cdb_len | 1U); } else if ((unsigned int )cmd->sc_data_direction == 2U) { fcp_cmnd->additional_cdb_len = (uint8_t )((unsigned int )fcp_cmnd->additional_cdb_len | 2U); } else { } int_to_scsilun((cmd->device)->lun, & fcp_cmnd->lun); __len = (size_t )cmd->cmd_len; __ret = __builtin_memcpy((void *)(& fcp_cmnd->cdb), (void const *)cmd->cmnd, __len); cmd_pkt->fcp_cmnd_dseg_len = fcp_cmnd_len; cmd_pkt->fcp_cmnd_dseg_address[0] = (unsigned int )crc_ctx_dma + 64U; cmd_pkt->fcp_cmnd_dseg_address[1] = (unsigned int )((crc_ctx_dma + 64ULL) >> 32ULL); fcp_cmnd->task_management = 0U; tmp___4 = scsi_populate_tag_msg(cmd, (char *)(& tag)); if (tmp___4 != 0) { switch ((int )tag[0]) { case 33: fcp_cmnd->task_attribute = 1U; goto ldv_61073; case 34: fcp_cmnd->task_attribute = 2U; goto ldv_61073; default: fcp_cmnd->task_attribute = 0U; goto ldv_61073; } ldv_61073: ; } else { fcp_cmnd->task_attribute = 0U; } cmd_pkt->fcp_rsp_dseg_len = 0U; dif_bytes = 0U; blk_size = (uint16_t )(cmd->device)->sector_size; dif_bytes = (data_bytes / (uint32_t )blk_size) * 8U; tmp___5 = scsi_get_prot_op(sp->u.scmd.cmd); switch ((int )tmp___5) { case 1: ; case 2: total_bytes = data_bytes; data_bytes = data_bytes + dif_bytes; goto ldv_61078; case 3: ; case 4: ; case 5: ; case 6: total_bytes = data_bytes + dif_bytes; goto ldv_61078; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_iocb.o.c.prepared"), "i" (1645), "i" (12UL)); ldv_61084: ; goto ldv_61084; } ldv_61078: tmp___9 = qla2x00_hba_err_chk_enabled(sp); if (tmp___9 == 0) { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 16U); } else if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { tmp___7 = scsi_get_prot_type(sp->u.scmd.cmd); if ((unsigned int )tmp___7 == 1U) { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 1024U); } else { tmp___8 = scsi_get_prot_type(sp->u.scmd.cmd); if ((unsigned int )tmp___8 == 2U) { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 1024U); } else { tmp___6 = scsi_get_prot_type(sp->u.scmd.cmd); if ((unsigned int )tmp___6 == 3U) { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 2048U); } else { } } } } else { } if ((unsigned int )bundling == 0U) { cur_dsd = (uint32_t *)(& crc_ctx_pkt->u.nobundling.data_address); } else { fw_prot_opts = (uint16_t )((unsigned int )fw_prot_opts | 256U); crc_ctx_pkt->u.bundling.dif_byte_count = dif_bytes; crc_ctx_pkt->u.bundling.dseg_count = (int )tot_dsds - (int )tot_prot_dsds; cur_dsd = (uint32_t *)(& crc_ctx_pkt->u.bundling.data_address); } crc_ctx_pkt->blk_size = blk_size; crc_ctx_pkt->prot_opts = fw_prot_opts; crc_ctx_pkt->byte_count = data_bytes; crc_ctx_pkt->guard_seed = 0U; cmd_pkt->byte_count = total_bytes; fcp_dl = (uint32_t *)(& crc_ctx_pkt->fcp_cmnd.cdb) + ((unsigned long )additional_fcpcdb_len + 16UL); tmp___10 = __fswab32(total_bytes); *fcp_dl = tmp___10; if (data_bytes == 0U || (unsigned int )cmd->sc_data_direction == 3U) { cmd_pkt->byte_count = 0U; return (0); } else { } cmd_pkt->control_flags = (uint16_t )((unsigned int )cmd_pkt->control_flags | 4U); if ((unsigned int )bundling == 0U && (unsigned int )tot_prot_dsds != 0U) { tmp___11 = qla24xx_walk_and_build_sglist_no_difb(ha, sp, cur_dsd, (int )tot_dsds); if (tmp___11 != 0) { goto crc_queuing_error; } else { } } else { tmp___12 = qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, (int )tot_dsds - (int )tot_prot_dsds); if (tmp___12 != 0) { goto crc_queuing_error; } else { } } if ((unsigned int )bundling != 0U && (unsigned int )tot_prot_dsds != 0U) { cmd_pkt->control_flags = (uint16_t )((unsigned int )cmd_pkt->control_flags | 8U); cur_dsd = (uint32_t *)(& crc_ctx_pkt->u.bundling.dif_address); tmp___13 = qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, (int )tot_prot_dsds); if (tmp___13 != 0) { goto crc_queuing_error; } else { } } else { } return (0); crc_queuing_error: ; return (258); } } int qla24xx_start_scsi(srb_t *sp ) { int ret ; int nseg ; unsigned long flags ; uint32_t *clr_ptr ; uint32_t index ; uint32_t handle ; struct cmd_type_7 *cmd_pkt ; uint16_t cnt ; uint16_t req_cnt ; uint16_t tot_dsds ; struct req_que *req ; struct rsp_que *rsp ; struct scsi_cmnd *cmd ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; char tag[2U] ; int tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; struct scatterlist *tmp___2 ; long tmp___3 ; unsigned int tmp___4 ; unsigned int tmp___5 ; int tmp___6 ; size_t __len ; void *__ret ; { req = (struct req_que *)0; rsp = (struct rsp_que *)0; cmd = sp->u.scmd.cmd; vha = (sp->fcport)->vha; ha = vha->hw; ret = 0; qla25xx_set_que(sp, & rsp); req = vha->req; tot_dsds = 0U; if ((unsigned int )vha->marker_needed != 0U) { tmp = qla2x00_marker(vha, req, rsp, 0, 0, 2); if (tmp != 0) { return (258); } else { } vha->marker_needed = 0U; } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); handle = req->current_outstanding_cmd; index = 1U; goto ldv_61109; ldv_61108: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_61107; } else { } index = index + 1U; ldv_61109: ; if ((uint32_t )req->num_outstanding_cmds > index) { goto ldv_61108; } else { } ldv_61107: ; if ((uint32_t )req->num_outstanding_cmds == index) { goto queuing_error; } else { } tmp___4 = scsi_sg_count(cmd); if (tmp___4 != 0U) { tmp___1 = scsi_sg_count(cmd); tmp___2 = scsi_sglist(cmd); nseg = dma_map_sg_attrs(& (ha->pdev)->dev, tmp___2, (int )tmp___1, cmd->sc_data_direction, (struct dma_attrs *)0); tmp___3 = ldv__builtin_expect(nseg == 0, 0L); if (tmp___3 != 0L) { goto queuing_error; } else { } } else { nseg = 0; } tot_dsds = (uint16_t )nseg; req_cnt = qla24xx_calc_iocbs(vha, (int )tot_dsds); if ((int )req->cnt < (int )req_cnt + 2) { tmp___5 = __readl((void const volatile *)req->req_q_out); cnt = (uint16_t )tmp___5; if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } if ((int )req->cnt < (int )req_cnt + 2) { goto queuing_error; } else { } } else { } req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)((unsigned long )handle); req->cnt = (int )req->cnt - (int )req_cnt; cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; cmd_pkt->handle = ((unsigned int )req->id << 16) | handle; clr_ptr = (uint32_t *)cmd_pkt + 2UL; memset((void *)clr_ptr, 0, 56UL); cmd_pkt->dseg_count = tot_dsds; cmd_pkt->nport_handle = (sp->fcport)->loop_id; cmd_pkt->port_id[0] = (sp->fcport)->d_id.b.al_pa; cmd_pkt->port_id[1] = (sp->fcport)->d_id.b.area; cmd_pkt->port_id[2] = (sp->fcport)->d_id.b.domain; cmd_pkt->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; int_to_scsilun((cmd->device)->lun, & cmd_pkt->lun); host_to_fcp_swap((uint8_t *)(& cmd_pkt->lun), 8U); tmp___6 = scsi_populate_tag_msg(cmd, (char *)(& tag)); if (tmp___6 != 0) { switch ((int )tag[0]) { case 33: cmd_pkt->task = 1U; goto ldv_61112; case 34: cmd_pkt->task = 2U; goto ldv_61112; default: cmd_pkt->task = 0U; goto ldv_61112; } ldv_61112: ; } else { cmd_pkt->task = 0U; } __len = (size_t )cmd->cmd_len; __ret = __builtin_memcpy((void *)(& cmd_pkt->fcp_cdb), (void const *)cmd->cmnd, __len); host_to_fcp_swap((uint8_t *)(& cmd_pkt->fcp_cdb), 16U); cmd_pkt->byte_count = scsi_bufflen(cmd); qla24xx_build_scsi_iocbs(sp, cmd_pkt, (int )tot_dsds); cmd_pkt->entry_count = (unsigned char )req_cnt; cmd_pkt->entry_status = (unsigned char )rsp->id; __asm__ volatile ("sfence": : : "memory"); req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } sp->flags = (uint16_t )((unsigned int )sp->flags | 1U); writel((unsigned int )req->ring_index, (void volatile *)req->req_q_in); __readl((void const volatile *)(& (ha->iobase)->isp24.hccr)); if (*((unsigned long *)vha + 19UL) != 0UL && (rsp->ring_ptr)->signature != 3735936685U) { qla24xx_process_response_queue(vha, rsp); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); queuing_error: ; if ((unsigned int )tot_dsds != 0U) { scsi_dma_unmap(cmd); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (258); } } int qla24xx_dif_start_scsi(srb_t *sp ) { int nseg ; unsigned long flags ; uint32_t *clr_ptr ; uint32_t index ; uint32_t handle ; uint16_t cnt ; uint16_t req_cnt ; uint16_t tot_dsds ; uint16_t tot_prot_dsds ; uint16_t fw_prot_opts ; struct req_que *req ; struct rsp_que *rsp ; struct scsi_cmnd *cmd ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct cmd_type_crc_2 *cmd_pkt ; uint32_t status ; int tmp ; unsigned char tmp___0 ; int tmp___1 ; raw_spinlock_t *tmp___2 ; unsigned int tmp___3 ; struct scatterlist *tmp___4 ; long tmp___5 ; struct qla2_sgx sgx ; uint32_t partial ; int tmp___6 ; unsigned char tmp___7 ; unsigned char tmp___8 ; unsigned int tmp___9 ; unsigned int tmp___10 ; struct scatterlist *tmp___11 ; long tmp___12 ; unsigned int tmp___13 ; unsigned char tmp___14 ; unsigned char tmp___15 ; int tmp___16 ; unsigned int tmp___17 ; int tmp___18 ; { req_cnt = 0U; fw_prot_opts = 0U; req = (struct req_que *)0; rsp = (struct rsp_que *)0; cmd = sp->u.scmd.cmd; vha = (sp->fcport)->vha; ha = vha->hw; status = 0U; tmp___0 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___0 == 0U) { if ((unsigned int )cmd->cmd_len <= 16U) { tmp = qla24xx_start_scsi(sp); return (tmp); } else { } } else { } qla25xx_set_que(sp, & rsp); req = vha->req; tot_dsds = 0U; if ((unsigned int )vha->marker_needed != 0U) { tmp___1 = qla2x00_marker(vha, req, rsp, 0, 0, 2); if (tmp___1 != 0) { return (258); } else { } vha->marker_needed = 0U; } else { } tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); handle = req->current_outstanding_cmd; index = 1U; goto ldv_61143; ldv_61142: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_61141; } else { } index = index + 1U; ldv_61143: ; if ((uint32_t )req->num_outstanding_cmds > index) { goto ldv_61142; } else { } ldv_61141: ; if ((uint32_t )req->num_outstanding_cmds == index) { goto queuing_error; } else { } tmp___9 = scsi_sg_count(cmd); if (tmp___9 != 0U) { tmp___3 = scsi_sg_count(cmd); tmp___4 = scsi_sglist(cmd); nseg = dma_map_sg_attrs(& (ha->pdev)->dev, tmp___4, (int )tmp___3, cmd->sc_data_direction, (struct dma_attrs *)0); tmp___5 = ldv__builtin_expect(nseg == 0, 0L); if (tmp___5 != 0L) { goto queuing_error; } else { sp->flags = (uint16_t )((unsigned int )sp->flags | 1U); } tmp___7 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___7 == 1U) { goto _L; } else { tmp___8 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___8 == 2U) { _L: /* CIL Label */ memset((void *)(& sgx), 0, 48UL); sgx.tot_bytes = scsi_bufflen(cmd); sgx.cur_sg = scsi_sglist(cmd); sgx.sp = sp; nseg = 0; goto ldv_61148; ldv_61147: nseg = nseg + 1; ldv_61148: tmp___6 = qla24xx_get_one_block_sg((cmd->device)->sector_size, & sgx, & partial); if (tmp___6 != 0) { goto ldv_61147; } else { } } else { } } } else { nseg = 0; } tot_dsds = (uint16_t )nseg; tmp___16 = qla24xx_configure_prot_mode(sp, & fw_prot_opts); if (tmp___16 != 0) { tmp___10 = scsi_prot_sg_count(cmd); tmp___11 = scsi_prot_sglist(cmd); nseg = dma_map_sg_attrs(& (ha->pdev)->dev, tmp___11, (int )tmp___10, cmd->sc_data_direction, (struct dma_attrs *)0); tmp___12 = ldv__builtin_expect(nseg == 0, 0L); if (tmp___12 != 0L) { goto queuing_error; } else { sp->flags = (uint16_t )((unsigned int )sp->flags | 16U); } tmp___14 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___14 == 1U) { tmp___13 = scsi_bufflen(cmd); nseg = (int )(tmp___13 / (cmd->device)->sector_size); } else { tmp___15 = scsi_get_prot_op(cmd); if ((unsigned int )tmp___15 == 2U) { tmp___13 = scsi_bufflen(cmd); nseg = (int )(tmp___13 / (cmd->device)->sector_size); } else { } } } else { nseg = 0; } req_cnt = 1U; tot_prot_dsds = (uint16_t )nseg; tot_dsds = (int )((uint16_t )nseg) + (int )tot_dsds; if ((int )req->cnt < (int )req_cnt + 2) { tmp___17 = __readl((void const volatile *)req->req_q_out); cnt = (uint16_t )tmp___17; if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } if ((int )req->cnt < (int )req_cnt + 2) { goto queuing_error; } else { } } else { } status = status | 1U; req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)((unsigned long )handle); req->cnt = (int )req->cnt - (int )req_cnt; cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; cmd_pkt->handle = ((unsigned int )req->id << 16) | handle; clr_ptr = (uint32_t *)cmd_pkt + 2UL; memset((void *)clr_ptr, 0, 56UL); cmd_pkt->nport_handle = (sp->fcport)->loop_id; cmd_pkt->port_id[0] = (sp->fcport)->d_id.b.al_pa; cmd_pkt->port_id[1] = (sp->fcport)->d_id.b.area; cmd_pkt->port_id[2] = (sp->fcport)->d_id.b.domain; int_to_scsilun((cmd->device)->lun, & cmd_pkt->lun); host_to_fcp_swap((uint8_t *)(& cmd_pkt->lun), 8U); cmd_pkt->dseg_count = tot_dsds; tmp___18 = qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)req->ring_ptr, (int )tot_dsds, (int )tot_prot_dsds, (int )fw_prot_opts); if (tmp___18 != 0) { goto queuing_error; } else { } cmd_pkt->entry_count = (unsigned char )req_cnt; cmd_pkt->entry_status = (unsigned char )rsp->id; cmd_pkt->timeout = 0U; __asm__ volatile ("sfence": : : "memory"); req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } writel((unsigned int )req->ring_index, (void volatile *)req->req_q_in); __readl((void const volatile *)(& (ha->iobase)->isp24.hccr)); if (*((unsigned long *)vha + 19UL) != 0UL && (rsp->ring_ptr)->signature != 3735936685U) { qla24xx_process_response_queue(vha, rsp); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); queuing_error: ; if ((int )status & 1) { *(req->outstanding_cmds + (unsigned long )handle) = (srb_t *)0; req->cnt = (int )req->cnt + (int )req_cnt; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (258); } } static void qla25xx_set_que(srb_t *sp , struct rsp_que **rsp ) { struct scsi_cmnd *cmd ; struct qla_hw_data *ha ; int affinity ; { cmd = sp->u.scmd.cmd; ha = ((sp->fcport)->vha)->hw; affinity = (cmd->request)->cpu; if ((*((unsigned long *)ha + 2UL) != 0UL && affinity >= 0) && (int )ha->max_rsp_queues + -1 > affinity) { *rsp = *(ha->rsp_q_map + ((unsigned long )affinity + 1UL)); } else { *rsp = *(ha->rsp_q_map); } return; } } void *qla2x00_alloc_iocbs(struct scsi_qla_host *vha , srb_t *sp ) { struct qla_hw_data *ha ; struct req_que *req ; device_reg_t *reg ; uint32_t index ; uint32_t handle ; request_t *pkt ; uint16_t cnt ; uint16_t req_cnt ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; unsigned int tmp___2 ; { ha = vha->hw; req = *(ha->req_q_map); reg = (unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) ? ha->mqiobase + (unsigned long )((int )req->id * 4096) : ha->iobase; pkt = (request_t *)0; req_cnt = 1U; handle = 0U; if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto skip_cmd_array; } else { } handle = req->current_outstanding_cmd; index = 1U; goto ldv_61172; ldv_61171: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_61170; } else { } index = index + 1U; ldv_61172: ; if ((unsigned int )req->num_outstanding_cmds != 0U) { goto ldv_61171; } else { } ldv_61170: ; if ((uint32_t )req->num_outstanding_cmds == index) { ql_log(1U, vha, 28683, "No room on outstanding cmd array.\n"); goto queuing_error; } else { } req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; if ((unsigned int )sp->type != 8U) { req_cnt = (uint16_t )sp->iocbs; } else { } skip_cmd_array: ; if ((int )req->cnt < (int )req_cnt) { if ((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { tmp = readl((void const volatile *)(& reg->isp25mq.req_q_out)); cnt = (uint16_t )tmp; } else if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { tmp___0 = readl((void const volatile *)(& reg->isp82.req_q_out)); cnt = (uint16_t )tmp___0; } else if ((ha->device_type & 134217728U) != 0U) { tmp___1 = readl((void const volatile *)(& reg->isp24.req_q_out)); cnt = (uint16_t )tmp___1; } else if ((ha->device_type & 131072U) != 0U) { tmp___2 = readl((void const volatile *)(& reg->ispfx00.req_q_out)); cnt = (uint16_t )tmp___2; } else { cnt = qla2x00_debounce_register___0((uint16_t volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->isp.u.isp2100.mailbox4 : & reg->isp.u.isp2300.req_q_out)); } if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } } else { } if ((int )req->cnt < (int )req_cnt) { goto queuing_error; } else { } req->cnt = (int )req->cnt - (int )req_cnt; pkt = req->ring_ptr; memset((void *)pkt, 0, 64UL); if ((ha->device_type & 131072U) != 0U) { writeb((int )((unsigned char )req_cnt), (void volatile *)(& pkt->entry_count)); writew((int )((unsigned short )handle), (void volatile *)(& pkt->handle)); } else { pkt->entry_count = (uint8_t )req_cnt; pkt->handle = handle; } queuing_error: ; return ((void *)pkt); } } static void qla24xx_login_iocb(srb_t *sp , struct logio_entry_24xx *logio ) { struct srb_iocb *lio ; { lio = & sp->u.iocb_cmd; logio->entry_type = 82U; logio->control_flags = 0U; if (((int )lio->u.logio.flags & 2) != 0) { logio->control_flags = (uint16_t )((unsigned int )logio->control_flags | 16U); } else { } if (((int )lio->u.logio.flags & 4) != 0) { logio->control_flags = (uint16_t )((unsigned int )logio->control_flags | 32U); } else { } logio->nport_handle = (sp->fcport)->loop_id; logio->port_id[0] = (sp->fcport)->d_id.b.al_pa; logio->port_id[1] = (sp->fcport)->d_id.b.area; logio->port_id[2] = (sp->fcport)->d_id.b.domain; logio->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; return; } } static void qla2x00_login_iocb(srb_t *sp , struct mbx_entry *mbx ) { struct qla_hw_data *ha ; struct srb_iocb *lio ; uint16_t opts ; { ha = ((sp->fcport)->vha)->hw; lio = & sp->u.iocb_cmd; mbx->entry_type = 57U; if ((int )ha->device_type < 0) { mbx->loop_id.extended = (sp->fcport)->loop_id; } else { mbx->loop_id.id.standard = (unsigned char )(sp->fcport)->loop_id; } mbx->mb0 = 111U; opts = ((int )lio->u.logio.flags & 2) != 0; opts = (uint16_t )((((int )lio->u.logio.flags & 4) != 0 ? 2 : 0) | (int )((short )opts)); if ((int )ha->device_type < 0) { mbx->mb1 = (sp->fcport)->loop_id; mbx->mb10 = opts; } else { mbx->mb1 = (unsigned short )((int )((short )((int )(sp->fcport)->loop_id << 8)) | (int )((short )opts)); } mbx->mb2 = (unsigned short )(sp->fcport)->d_id.b.domain; mbx->mb3 = (unsigned short )((int )((short )((int )(sp->fcport)->d_id.b.area << 8)) | (int )((short )(sp->fcport)->d_id.b.al_pa)); mbx->mb9 = ((sp->fcport)->vha)->vp_idx; return; } } static void qla24xx_logout_iocb(srb_t *sp , struct logio_entry_24xx *logio ) { { logio->entry_type = 82U; logio->control_flags = 24U; logio->nport_handle = (sp->fcport)->loop_id; logio->port_id[0] = (sp->fcport)->d_id.b.al_pa; logio->port_id[1] = (sp->fcport)->d_id.b.area; logio->port_id[2] = (sp->fcport)->d_id.b.domain; logio->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; return; } } static void qla2x00_logout_iocb(srb_t *sp , struct mbx_entry *mbx ) { struct qla_hw_data *ha ; { ha = ((sp->fcport)->vha)->hw; mbx->entry_type = 57U; if ((int )ha->device_type < 0) { mbx->loop_id.extended = (sp->fcport)->loop_id; } else { mbx->loop_id.id.standard = (unsigned char )(sp->fcport)->loop_id; } mbx->mb0 = 113U; mbx->mb1 = (int )ha->device_type < 0 ? (sp->fcport)->loop_id : (uint16_t )((int )(sp->fcport)->loop_id << 8U); mbx->mb2 = (unsigned short )(sp->fcport)->d_id.b.domain; mbx->mb3 = (unsigned short )((int )((short )((int )(sp->fcport)->d_id.b.area << 8)) | (int )((short )(sp->fcport)->d_id.b.al_pa)); mbx->mb9 = ((sp->fcport)->vha)->vp_idx; return; } } static void qla24xx_adisc_iocb(srb_t *sp , struct logio_entry_24xx *logio ) { { logio->entry_type = 82U; logio->control_flags = 3U; logio->nport_handle = (sp->fcport)->loop_id; logio->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; return; } } static void qla2x00_adisc_iocb(srb_t *sp , struct mbx_entry *mbx ) { struct qla_hw_data *ha ; { ha = ((sp->fcport)->vha)->hw; mbx->entry_type = 57U; if ((int )ha->device_type < 0) { mbx->loop_id.extended = (sp->fcport)->loop_id; } else { mbx->loop_id.id.standard = (unsigned char )(sp->fcport)->loop_id; } mbx->mb0 = 100U; if ((int )ha->device_type < 0) { mbx->mb1 = (sp->fcport)->loop_id; mbx->mb10 = 1U; } else { mbx->mb1 = (unsigned short )((int )((short )((int )(sp->fcport)->loop_id << 8)) | 1); } mbx->mb2 = (unsigned short )((unsigned int )ha->async_pd_dma >> 16); mbx->mb3 = (unsigned short )ha->async_pd_dma; mbx->mb6 = (unsigned short )((unsigned int )(ha->async_pd_dma >> 32ULL) >> 16); mbx->mb7 = (unsigned short )(ha->async_pd_dma >> 32ULL); mbx->mb9 = ((sp->fcport)->vha)->vp_idx; return; } } static void qla24xx_tm_iocb(srb_t *sp , struct tsk_mgmt_entry *tsk ) { uint32_t flags ; unsigned int lun ; struct fc_port *fcport ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct srb_iocb *iocb ; struct req_que *req ; { fcport = sp->fcport; vha = fcport->vha; ha = vha->hw; iocb = & sp->u.iocb_cmd; req = vha->req; flags = iocb->u.tmf.flags; lun = iocb->u.tmf.lun; tsk->entry_type = 20U; tsk->entry_count = 1U; tsk->handle = ((unsigned int )req->id << 16) | tsk->handle; tsk->nport_handle = fcport->loop_id; tsk->timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; tsk->control_flags = flags; tsk->port_id[0] = fcport->d_id.b.al_pa; tsk->port_id[1] = fcport->d_id.b.area; tsk->port_id[2] = fcport->d_id.b.domain; tsk->vp_index = (uint8_t )(fcport->vha)->vp_idx; if (flags == 16U) { int_to_scsilun(lun, & tsk->lun); host_to_fcp_swap((uint8_t *)(& tsk->lun), 8U); } else { } return; } } static void qla24xx_els_iocb(srb_t *sp , struct els_entry_24xx *els_iocb ) { struct fc_bsg_job *bsg_job ; { bsg_job = sp->u.bsg_job; els_iocb->entry_type = 83U; els_iocb->entry_count = 1U; els_iocb->sys_define = 0U; els_iocb->entry_status = 0U; els_iocb->handle = sp->handle; els_iocb->nport_handle = (sp->fcport)->loop_id; els_iocb->tx_dsd_count = (unsigned short )bsg_job->request_payload.sg_cnt; els_iocb->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; els_iocb->sof_type = 16U; els_iocb->rx_dsd_count = (unsigned short )bsg_job->reply_payload.sg_cnt; els_iocb->opcode = (unsigned int )sp->type == 3U ? (bsg_job->request)->rqst_data.r_els.els_code : (bsg_job->request)->rqst_data.h_els.command_code; els_iocb->port_id[0] = (sp->fcport)->d_id.b.al_pa; els_iocb->port_id[1] = (sp->fcport)->d_id.b.area; els_iocb->port_id[2] = (sp->fcport)->d_id.b.domain; els_iocb->control_flags = 0U; els_iocb->rx_byte_count = bsg_job->reply_payload.payload_len; els_iocb->tx_byte_count = bsg_job->request_payload.payload_len; els_iocb->tx_address[0] = (unsigned int )(bsg_job->request_payload.sg_list)->dma_address; els_iocb->tx_address[1] = (unsigned int )((bsg_job->request_payload.sg_list)->dma_address >> 32ULL); els_iocb->tx_len = (bsg_job->request_payload.sg_list)->dma_length; els_iocb->rx_address[0] = (unsigned int )(bsg_job->reply_payload.sg_list)->dma_address; els_iocb->rx_address[1] = (unsigned int )((bsg_job->reply_payload.sg_list)->dma_address >> 32ULL); els_iocb->rx_len = (bsg_job->reply_payload.sg_list)->dma_length; ((sp->fcport)->vha)->qla_stats.control_requests = ((sp->fcport)->vha)->qla_stats.control_requests + 1U; return; } } static void qla2x00_ct_iocb(srb_t *sp , ms_iocb_entry_t *ct_iocb ) { uint16_t avail_dsds ; uint32_t *cur_dsd ; struct scatterlist *sg ; int index ; uint16_t tot_dsds ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct fc_bsg_job *bsg_job ; int loop_iterartion ; int cont_iocb_prsnt ; int entry_count ; dma_addr_t sle_dma ; cont_a64_entry_t *cont_pkt ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; { vha = (sp->fcport)->vha; ha = vha->hw; bsg_job = sp->u.bsg_job; loop_iterartion = 0; cont_iocb_prsnt = 0; entry_count = 1; memset((void *)ct_iocb, 0, 64UL); ct_iocb->entry_type = 41U; ct_iocb->entry_status = 0U; ct_iocb->handle1 = sp->handle; if ((int )ha->device_type < 0) { ct_iocb->loop_id.extended = (sp->fcport)->loop_id; } else { ct_iocb->loop_id.id.standard = (unsigned char )(sp->fcport)->loop_id; } ct_iocb->status = 0U; ct_iocb->control_flags = 0U; ct_iocb->timeout = 0U; ct_iocb->cmd_dsd_count = (unsigned short )bsg_job->request_payload.sg_cnt; ct_iocb->total_dsd_count = (unsigned int )((unsigned short )bsg_job->request_payload.sg_cnt) + 1U; ct_iocb->req_bytecount = bsg_job->request_payload.payload_len; ct_iocb->rsp_bytecount = bsg_job->reply_payload.payload_len; ct_iocb->dseg_req_address[0] = (unsigned int )(bsg_job->request_payload.sg_list)->dma_address; ct_iocb->dseg_req_address[1] = (unsigned int )((bsg_job->request_payload.sg_list)->dma_address >> 32ULL); ct_iocb->dseg_req_length = ct_iocb->req_bytecount; ct_iocb->dseg_rsp_address[0] = (unsigned int )(bsg_job->reply_payload.sg_list)->dma_address; ct_iocb->dseg_rsp_address[1] = (unsigned int )((bsg_job->reply_payload.sg_list)->dma_address >> 32ULL); ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount; avail_dsds = 1U; cur_dsd = (uint32_t *)(& ct_iocb->dseg_rsp_address); index = 0; tot_dsds = (uint16_t )bsg_job->reply_payload.sg_cnt; index = 0; sg = bsg_job->reply_payload.sg_list; goto ldv_61238; ldv_61237: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt = qla2x00_prep_cont_type1_iocb(vha, *((vha->hw)->req_q_map)); cur_dsd = (uint32_t *)(& cont_pkt->dseg_0_address); avail_dsds = 5U; cont_iocb_prsnt = 1; entry_count = entry_count + 1; } else { } sle_dma = sg->dma_address; tmp = cur_dsd; cur_dsd = cur_dsd + 1; *tmp = (unsigned int )sle_dma; tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )(sle_dma >> 32ULL); tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = sg->dma_length; loop_iterartion = loop_iterartion + 1; avail_dsds = (uint16_t )((int )avail_dsds - 1); index = index + 1; sg = sg_next(sg); ldv_61238: ; if ((int )tot_dsds > index) { goto ldv_61237; } else { } ct_iocb->entry_count = (uint8_t )entry_count; ((sp->fcport)->vha)->qla_stats.control_requests = ((sp->fcport)->vha)->qla_stats.control_requests + 1U; return; } } static void qla24xx_ct_iocb(srb_t *sp , struct ct_entry_24xx *ct_iocb ) { uint16_t avail_dsds ; uint32_t *cur_dsd ; struct scatterlist *sg ; int index ; uint16_t tot_dsds ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct fc_bsg_job *bsg_job ; int loop_iterartion ; int cont_iocb_prsnt ; int entry_count ; dma_addr_t sle_dma ; cont_a64_entry_t *cont_pkt ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; { vha = (sp->fcport)->vha; ha = vha->hw; bsg_job = sp->u.bsg_job; loop_iterartion = 0; cont_iocb_prsnt = 0; entry_count = 1; ct_iocb->entry_type = 41U; ct_iocb->entry_status = 0U; ct_iocb->sys_define = 0U; ct_iocb->handle = sp->handle; ct_iocb->nport_handle = (sp->fcport)->loop_id; ct_iocb->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; ct_iocb->comp_status = 0U; ct_iocb->cmd_dsd_count = (unsigned short )bsg_job->request_payload.sg_cnt; ct_iocb->timeout = 0U; ct_iocb->rsp_dsd_count = (unsigned short )bsg_job->reply_payload.sg_cnt; ct_iocb->rsp_byte_count = bsg_job->reply_payload.payload_len; ct_iocb->cmd_byte_count = bsg_job->request_payload.payload_len; ct_iocb->dseg_0_address[0] = (unsigned int )(bsg_job->request_payload.sg_list)->dma_address; ct_iocb->dseg_0_address[1] = (unsigned int )((bsg_job->request_payload.sg_list)->dma_address >> 32ULL); ct_iocb->dseg_0_len = (bsg_job->request_payload.sg_list)->dma_length; avail_dsds = 1U; cur_dsd = (uint32_t *)(& ct_iocb->dseg_1_address); index = 0; tot_dsds = (uint16_t )bsg_job->reply_payload.sg_cnt; index = 0; sg = bsg_job->reply_payload.sg_list; goto ldv_61258; ldv_61257: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt = qla2x00_prep_cont_type1_iocb(vha, *(ha->req_q_map)); cur_dsd = (uint32_t *)(& cont_pkt->dseg_0_address); avail_dsds = 5U; cont_iocb_prsnt = 1; entry_count = entry_count + 1; } else { } sle_dma = sg->dma_address; tmp = cur_dsd; cur_dsd = cur_dsd + 1; *tmp = (unsigned int )sle_dma; tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )(sle_dma >> 32ULL); tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = sg->dma_length; loop_iterartion = loop_iterartion + 1; avail_dsds = (uint16_t )((int )avail_dsds - 1); index = index + 1; sg = sg_next(sg); ldv_61258: ; if ((int )tot_dsds > index) { goto ldv_61257; } else { } ct_iocb->entry_count = (uint8_t )entry_count; return; } } int qla82xx_start_scsi(srb_t *sp ) { int ret ; int nseg ; unsigned long flags ; struct scsi_cmnd *cmd ; uint32_t *clr_ptr ; uint32_t index ; uint32_t handle ; uint16_t cnt ; uint16_t req_cnt ; uint16_t tot_dsds ; struct device_reg_82xx *reg ; uint32_t dbval ; uint32_t *fcp_dl ; uint8_t additional_cdb_len ; struct ct6_dsd *ctx ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; char tag[2U] ; int tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; struct scatterlist *tmp___2 ; long tmp___3 ; unsigned int tmp___4 ; struct cmd_type_6 *cmd_pkt ; uint16_t more_dsd_lists ; struct dsd_dma *dsd_ptr ; uint16_t i ; void *tmp___5 ; unsigned int tmp___6 ; void *tmp___7 ; void *tmp___8 ; int tmp___9 ; int tmp___10 ; size_t __len ; void *__ret ; unsigned int tmp___11 ; __u32 tmp___12 ; struct cmd_type_7 *cmd_pkt___0 ; unsigned int tmp___13 ; int tmp___14 ; size_t __len___0 ; void *__ret___0 ; unsigned int tmp___15 ; { vha = (sp->fcport)->vha; ha = vha->hw; req = (struct req_que *)0; rsp = (struct rsp_que *)0; ret = 0; reg = & (ha->iobase)->isp82; cmd = sp->u.scmd.cmd; req = vha->req; rsp = *(ha->rsp_q_map); tot_dsds = 0U; dbval = (uint32_t )(((int )ha->portnum << 5) | 4); if ((unsigned int )vha->marker_needed != 0U) { tmp = qla2x00_marker(vha, req, rsp, 0, 0, 2); if (tmp != 0) { ql_log(1U, vha, 12300, "qla2x00_marker failed for cmd=%p.\n", cmd); return (258); } else { } vha->marker_needed = 0U; } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); handle = req->current_outstanding_cmd; index = 1U; goto ldv_61288; ldv_61287: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_61286; } else { } index = index + 1U; ldv_61288: ; if ((uint32_t )req->num_outstanding_cmds > index) { goto ldv_61287; } else { } ldv_61286: ; if ((uint32_t )req->num_outstanding_cmds == index) { goto queuing_error; } else { } tmp___4 = scsi_sg_count(cmd); if (tmp___4 != 0U) { tmp___1 = scsi_sg_count(cmd); tmp___2 = scsi_sglist(cmd); nseg = dma_map_sg_attrs(& (ha->pdev)->dev, tmp___2, (int )tmp___1, cmd->sc_data_direction, (struct dma_attrs *)0); tmp___3 = ldv__builtin_expect(nseg == 0, 0L); if (tmp___3 != 0L) { goto queuing_error; } else { } } else { nseg = 0; } tot_dsds = (uint16_t )nseg; if ((int )tot_dsds > ql2xshiftctondsd) { more_dsd_lists = 0U; more_dsd_lists = qla24xx_calc_dsd_lists((int )tot_dsds); if ((int )more_dsd_lists + (int )ha->gbl_dsd_inuse > 4095) { ql_dbg(134217728U, vha, 12301, "Num of DSD list %d is than %d for cmd=%p.\n", (int )more_dsd_lists + (int )ha->gbl_dsd_inuse, 4096, cmd); goto queuing_error; } else { } if ((int )ha->gbl_dsd_avail >= (int )more_dsd_lists) { goto sufficient_dsds; } else { more_dsd_lists = (int )more_dsd_lists - (int )ha->gbl_dsd_avail; } i = 0U; goto ldv_61296; ldv_61295: tmp___5 = kzalloc(32UL, 32U); dsd_ptr = (struct dsd_dma *)tmp___5; if ((unsigned long )dsd_ptr == (unsigned long )((struct dsd_dma *)0)) { ql_log(0U, vha, 12302, "Failed to allocate memory for dsd_dma for cmd=%p.\n", cmd); goto queuing_error; } else { } dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 32U, & dsd_ptr->dsd_list_dma); if ((unsigned long )dsd_ptr->dsd_addr == (unsigned long )((void *)0)) { kfree((void const *)dsd_ptr); ql_log(0U, vha, 12303, "Failed to allocate memory for dsd_addr for cmd=%p.\n", cmd); goto queuing_error; } else { } list_add_tail(& dsd_ptr->list, & ha->gbl_dsd_list); ha->gbl_dsd_avail = (uint16_t )((int )ha->gbl_dsd_avail + 1); i = (uint16_t )((int )i + 1); ldv_61296: ; if ((int )i < (int )more_dsd_lists) { goto ldv_61295; } else { } sufficient_dsds: req_cnt = 1U; if ((int )req->cnt < (int )req_cnt + 2) { tmp___6 = __readl((void const volatile *)(& reg->req_q_out)); cnt = (unsigned short )tmp___6; if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } if ((int )req->cnt < (int )req_cnt + 2) { goto queuing_error; } else { } } else { } tmp___7 = mempool_alloc(ha->ctx_mempool, 32U); sp->u.scmd.ctx = tmp___7; ctx = (struct ct6_dsd *)tmp___7; if ((unsigned long )ctx == (unsigned long )((struct ct6_dsd *)0)) { ql_log(0U, vha, 12304, "Failed to allocate ctx for cmd=%p.\n", cmd); goto queuing_error; } else { } memset((void *)ctx, 0, 48UL); tmp___8 = dma_pool_alloc(ha->fcp_cmnd_dma_pool, 32U, & ctx->fcp_cmnd_dma); ctx->fcp_cmnd = (struct fcp_cmnd *)tmp___8; if ((unsigned long )ctx->fcp_cmnd == (unsigned long )((struct fcp_cmnd *)0)) { ql_log(0U, vha, 12305, "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); goto queuing_error; } else { } INIT_LIST_HEAD(& ctx->dsd_list); ctx->dsd_use_cnt = 0; if ((unsigned int )cmd->cmd_len > 16U) { additional_cdb_len = (unsigned int )((uint8_t )cmd->cmd_len) + 240U; if (((unsigned int )cmd->cmd_len & 3U) != 0U) { ql_log(1U, vha, 12306, "scsi cmd len %d not multiple of 4 for cmd=%p.\n", (int )cmd->cmd_len, cmd); goto queuing_error_fcp_cmnd; } else { } ctx->fcp_cmnd_len = (unsigned int )cmd->cmd_len + 16U; } else { additional_cdb_len = 0U; ctx->fcp_cmnd_len = 32U; } cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; cmd_pkt->handle = ((unsigned int )req->id << 16) | handle; clr_ptr = (uint32_t *)cmd_pkt + 2UL; memset((void *)clr_ptr, 0, 56UL); cmd_pkt->dseg_count = tot_dsds; cmd_pkt->nport_handle = (sp->fcport)->loop_id; cmd_pkt->port_id[0] = (sp->fcport)->d_id.b.al_pa; cmd_pkt->port_id[1] = (sp->fcport)->d_id.b.area; cmd_pkt->port_id[2] = (sp->fcport)->d_id.b.domain; cmd_pkt->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; tmp___9 = qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, (int )tot_dsds); if (tmp___9 != 0) { goto queuing_error_fcp_cmnd; } else { } int_to_scsilun((cmd->device)->lun, & cmd_pkt->lun); host_to_fcp_swap((uint8_t *)(& cmd_pkt->lun), 8U); memset((void *)ctx->fcp_cmnd, 0, 272UL); int_to_scsilun((cmd->device)->lun, & (ctx->fcp_cmnd)->lun); (ctx->fcp_cmnd)->additional_cdb_len = additional_cdb_len; if ((unsigned int )cmd->sc_data_direction == 1U) { (ctx->fcp_cmnd)->additional_cdb_len = (uint8_t )((unsigned int )(ctx->fcp_cmnd)->additional_cdb_len | 1U); } else if ((unsigned int )cmd->sc_data_direction == 2U) { (ctx->fcp_cmnd)->additional_cdb_len = (uint8_t )((unsigned int )(ctx->fcp_cmnd)->additional_cdb_len | 2U); } else { } tmp___10 = scsi_populate_tag_msg(cmd, (char *)(& tag)); if (tmp___10 != 0) { switch ((int )tag[0]) { case 33: (ctx->fcp_cmnd)->task_attribute = 1U; goto ldv_61300; case 34: (ctx->fcp_cmnd)->task_attribute = 2U; goto ldv_61300; } ldv_61300: ; } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { (ctx->fcp_cmnd)->task_attribute = (uint8_t )((int )((signed char )(ctx->fcp_cmnd)->task_attribute) | (int )((signed char )((int )(sp->fcport)->fcp_prio << 3))); } else { } __len = (size_t )cmd->cmd_len; __ret = __builtin_memcpy((void *)(& (ctx->fcp_cmnd)->cdb), (void const *)cmd->cmnd, __len); fcp_dl = (uint32_t *)(& (ctx->fcp_cmnd)->cdb) + ((unsigned long )additional_cdb_len + 16UL); tmp___11 = scsi_bufflen(cmd); tmp___12 = __fswab32(tmp___11); *fcp_dl = tmp___12; cmd_pkt->fcp_cmnd_dseg_len = ctx->fcp_cmnd_len; cmd_pkt->fcp_cmnd_dseg_address[0] = (unsigned int )ctx->fcp_cmnd_dma; cmd_pkt->fcp_cmnd_dseg_address[1] = (unsigned int )(ctx->fcp_cmnd_dma >> 32ULL); sp->flags = (uint16_t )((unsigned int )sp->flags | 4096U); cmd_pkt->byte_count = scsi_bufflen(cmd); cmd_pkt->entry_count = (unsigned char )req_cnt; cmd_pkt->entry_status = (unsigned char )rsp->id; } else { req_cnt = qla24xx_calc_iocbs(vha, (int )tot_dsds); if ((int )req->cnt < (int )req_cnt + 2) { tmp___13 = __readl((void const volatile *)(& reg->req_q_out)); cnt = (unsigned short )tmp___13; if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } } else { } if ((int )req->cnt < (int )req_cnt + 2) { goto queuing_error; } else { } cmd_pkt___0 = (struct cmd_type_7 *)req->ring_ptr; cmd_pkt___0->handle = ((unsigned int )req->id << 16) | handle; clr_ptr = (uint32_t *)cmd_pkt___0 + 2UL; memset((void *)clr_ptr, 0, 56UL); cmd_pkt___0->dseg_count = tot_dsds; cmd_pkt___0->nport_handle = (sp->fcport)->loop_id; cmd_pkt___0->port_id[0] = (sp->fcport)->d_id.b.al_pa; cmd_pkt___0->port_id[1] = (sp->fcport)->d_id.b.area; cmd_pkt___0->port_id[2] = (sp->fcport)->d_id.b.domain; cmd_pkt___0->vp_index = (uint8_t )((sp->fcport)->vha)->vp_idx; int_to_scsilun((cmd->device)->lun, & cmd_pkt___0->lun); host_to_fcp_swap((uint8_t *)(& cmd_pkt___0->lun), 8U); tmp___14 = scsi_populate_tag_msg(cmd, (char *)(& tag)); if (tmp___14 != 0) { switch ((int )tag[0]) { case 33: cmd_pkt___0->task = 1U; goto ldv_61307; case 34: cmd_pkt___0->task = 2U; goto ldv_61307; } ldv_61307: ; } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { cmd_pkt___0->task = (uint8_t )((int )((signed char )cmd_pkt___0->task) | (int )((signed char )((int )(sp->fcport)->fcp_prio << 3))); } else { } __len___0 = (size_t )cmd->cmd_len; __ret___0 = __builtin_memcpy((void *)(& cmd_pkt___0->fcp_cdb), (void const *)cmd->cmnd, __len___0); host_to_fcp_swap((uint8_t *)(& cmd_pkt___0->fcp_cdb), 16U); cmd_pkt___0->byte_count = scsi_bufflen(cmd); qla24xx_build_scsi_iocbs(sp, cmd_pkt___0, (int )tot_dsds); cmd_pkt___0->entry_count = (unsigned char )req_cnt; cmd_pkt___0->entry_status = (unsigned char )rsp->id; } req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)((unsigned long )handle); req->cnt = (int )req->cnt - (int )req_cnt; __asm__ volatile ("sfence": : : "memory"); req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } sp->flags = (uint16_t )((unsigned int )sp->flags | 1U); dbval = ((uint32_t )((int )req->id << 8) | dbval) | (uint32_t )((int )req->ring_index << 16); if (ql2xdbwr != 0) { qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); } else { writel(dbval, (void volatile *)ha->nxdb_wr_ptr); __asm__ volatile ("sfence": : : "memory"); goto ldv_61313; ldv_61312: writel(dbval, (void volatile *)ha->nxdb_wr_ptr); __asm__ volatile ("sfence": : : "memory"); ldv_61313: tmp___15 = readl((void const volatile *)ha->nxdb_rd_ptr); if (tmp___15 != dbval) { goto ldv_61312; } else { } } if (*((unsigned long *)vha + 19UL) != 0UL && (rsp->ring_ptr)->signature != 3735936685U) { qla24xx_process_response_queue(vha, rsp); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); queuing_error_fcp_cmnd: dma_pool_free(ha->fcp_cmnd_dma_pool, (void *)ctx->fcp_cmnd, ctx->fcp_cmnd_dma); queuing_error: ; if ((unsigned int )tot_dsds != 0U) { scsi_dma_unmap(cmd); } else { } if ((unsigned long )sp->u.scmd.ctx != (unsigned long )((void *)0)) { mempool_free(sp->u.scmd.ctx, ha->ctx_mempool); sp->u.scmd.ctx = (void *)0; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (258); } } int qla2x00_start_sp(srb_t *sp ) { int rval ; struct qla_hw_data *ha ; void *pkt ; unsigned long flags ; raw_spinlock_t *tmp ; { ha = ((sp->fcport)->vha)->hw; rval = 258; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); pkt = qla2x00_alloc_iocbs((sp->fcport)->vha, sp); if ((unsigned long )pkt == (unsigned long )((void *)0)) { ql_log(1U, (sp->fcport)->vha, 28684, "qla2x00_alloc_iocbs failed.\n"); goto done; } else { } rval = 0; switch ((int )sp->type) { case 1: ; if ((ha->device_type & 134217728U) != 0U) { qla24xx_login_iocb(sp, (struct logio_entry_24xx *)pkt); } else { qla2x00_login_iocb(sp, (struct mbx_entry *)pkt); } goto ldv_61327; case 2: ; if ((ha->device_type & 134217728U) != 0U) { qla24xx_logout_iocb(sp, (struct logio_entry_24xx *)pkt); } else { qla2x00_logout_iocb(sp, (struct mbx_entry *)pkt); } goto ldv_61327; case 3: ; case 4: qla24xx_els_iocb(sp, (struct els_entry_24xx *)pkt); goto ldv_61327; case 5: ; if ((ha->device_type & 134217728U) != 0U) { qla24xx_ct_iocb(sp, (struct ct_entry_24xx *)pkt); } else { qla2x00_ct_iocb(sp, (ms_iocb_entry_t *)pkt); } goto ldv_61327; case 6: ; if ((ha->device_type & 134217728U) != 0U) { qla24xx_adisc_iocb(sp, (struct logio_entry_24xx *)pkt); } else { qla2x00_adisc_iocb(sp, (struct mbx_entry *)pkt); } goto ldv_61327; case 7: ; if ((ha->device_type & 131072U) != 0U) { qlafx00_tm_iocb(sp, (struct tsk_mgmt_entry_fx00 *)pkt); } else { qla24xx_tm_iocb(sp, (struct tsk_mgmt_entry *)pkt); } goto ldv_61327; case 10: ; case 11: qlafx00_fxdisc_iocb(sp, (struct fxdisc_entry_fx00 *)pkt); goto ldv_61327; case 12: qlafx00_abort_iocb(sp, (struct abort_iocb_entry_fx00 *)pkt); goto ldv_61327; default: ; goto ldv_61327; } ldv_61327: __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs((sp->fcport)->vha, *(ha->req_q_map)); done: spin_unlock_irqrestore(& ha->hardware_lock, flags); return (rval); } } static void qla25xx_build_bidir_iocb(srb_t *sp , struct scsi_qla_host *vha , struct cmd_bidir *cmd_pkt , uint32_t tot_dsds ) { uint16_t avail_dsds ; uint32_t *cur_dsd ; uint32_t req_data_len ; uint32_t rsp_data_len ; struct scatterlist *sg ; int index ; int entry_count ; struct fc_bsg_job *bsg_job ; unsigned long tmp ; dma_addr_t sle_dma ; cont_a64_entry_t *cont_pkt ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; dma_addr_t sle_dma___0 ; cont_a64_entry_t *cont_pkt___0 ; uint32_t *tmp___3 ; uint32_t *tmp___4 ; uint32_t *tmp___5 ; { req_data_len = 0U; rsp_data_len = 0U; entry_count = 1; bsg_job = sp->u.bsg_job; *((uint32_t *)(& cmd_pkt->entry_type)) = 117U; cmd_pkt->wr_dseg_count = (unsigned short )bsg_job->request_payload.sg_cnt; cmd_pkt->rd_dseg_count = (unsigned short )bsg_job->reply_payload.sg_cnt; cmd_pkt->control_flags = 11U; rsp_data_len = bsg_job->request_payload.payload_len; req_data_len = rsp_data_len; cmd_pkt->wr_byte_count = req_data_len; cmd_pkt->rd_byte_count = rsp_data_len; tmp = qla2x00_get_async_timeout(vha); cmd_pkt->timeout = (unsigned int )((unsigned short )tmp) + 2U; vha->bidi_stats.transfer_bytes = vha->bidi_stats.transfer_bytes + (unsigned long long )req_data_len; vha->bidi_stats.io_count = vha->bidi_stats.io_count + 1ULL; vha->qla_stats.output_bytes = vha->qla_stats.output_bytes + (uint64_t )req_data_len; vha->qla_stats.output_requests = vha->qla_stats.output_requests + 1ULL; avail_dsds = 1U; cur_dsd = (uint32_t *)(& cmd_pkt->fcp_data_dseg_address); index = 0; index = 0; sg = bsg_job->request_payload.sg_list; goto ldv_61355; ldv_61354: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); cur_dsd = (uint32_t *)(& cont_pkt->dseg_0_address); avail_dsds = 5U; entry_count = entry_count + 1; } else { } sle_dma = sg->dma_address; tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )sle_dma; tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = (unsigned int )(sle_dma >> 32ULL); tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = sg->dma_length; avail_dsds = (uint16_t )((int )avail_dsds - 1); index = index + 1; sg = sg_next(sg); ldv_61355: ; if (bsg_job->request_payload.sg_cnt > index) { goto ldv_61354; } else { } index = 0; sg = bsg_job->reply_payload.sg_list; goto ldv_61360; ldv_61359: ; if ((unsigned int )avail_dsds == 0U) { cont_pkt___0 = qla2x00_prep_cont_type1_iocb(vha, vha->req); cur_dsd = (uint32_t *)(& cont_pkt___0->dseg_0_address); avail_dsds = 5U; entry_count = entry_count + 1; } else { } sle_dma___0 = sg->dma_address; tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = (unsigned int )sle_dma___0; tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = (unsigned int )(sle_dma___0 >> 32ULL); tmp___5 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___5 = sg->dma_length; avail_dsds = (uint16_t )((int )avail_dsds - 1); index = index + 1; sg = sg_next(sg); ldv_61360: ; if (bsg_job->reply_payload.sg_cnt > index) { goto ldv_61359; } else { } cmd_pkt->entry_count = (uint8_t )entry_count; return; } } int qla2x00_start_bidir(srb_t *sp , struct scsi_qla_host *vha , uint32_t tot_dsds ) { struct qla_hw_data *ha ; unsigned long flags ; uint32_t handle ; uint32_t index ; uint16_t req_cnt ; uint16_t cnt ; uint32_t *clr_ptr ; struct cmd_bidir *cmd_pkt ; struct rsp_que *rsp ; struct req_que *req ; int rval ; int tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; { ha = vha->hw; cmd_pkt = (struct cmd_bidir *)0; rval = 0; rval = 0; rsp = *(ha->rsp_q_map); req = vha->req; if ((unsigned int )vha->marker_needed != 0U) { tmp = qla2x00_marker(vha, req, rsp, 0, 0, 2); if (tmp != 0) { return (11); } else { } vha->marker_needed = 0U; } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); handle = req->current_outstanding_cmd; index = 1U; goto ldv_61383; ldv_61382: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_61381; } else { } index = index + 1U; ldv_61383: ; if ((uint32_t )req->num_outstanding_cmds > index) { goto ldv_61382; } else { } ldv_61381: ; if ((uint32_t )req->num_outstanding_cmds == index) { rval = 2; goto queuing_error; } else { } req_cnt = qla24xx_calc_iocbs(vha, (int )((uint16_t )tot_dsds)); if ((int )req->cnt < (int )req_cnt + 2) { tmp___1 = __readl((void const volatile *)req->req_q_out); cnt = (uint16_t )tmp___1; if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } } else { } if ((int )req->cnt < (int )req_cnt + 2) { rval = 2; goto queuing_error; } else { } cmd_pkt = (struct cmd_bidir *)req->ring_ptr; cmd_pkt->handle = ((unsigned int )req->id << 16) | handle; clr_ptr = (uint32_t *)cmd_pkt + 2UL; memset((void *)clr_ptr, 0, 56UL); cmd_pkt->nport_handle = vha->self_login_loop_id; cmd_pkt->port_id[0] = vha->d_id.b.al_pa; cmd_pkt->port_id[1] = vha->d_id.b.area; cmd_pkt->port_id[2] = vha->d_id.b.domain; qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); cmd_pkt->entry_status = (unsigned char )rsp->id; req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; req->cnt = (int )req->cnt - (int )req_cnt; __asm__ volatile ("sfence": : : "memory"); qla2x00_start_iocbs(vha, req); queuing_error: spin_unlock_irqrestore(& ha->hardware_lock, flags); return (rval); } } void choose_timer_5(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_5 = 2; return; } } void disable_suitable_timer_5(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_5) { ldv_timer_state_5 = 0; return; } else { } return; } } void activate_pending_timer_5(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_5 == (unsigned long )timer) { if (ldv_timer_state_5 == 2 || pending_flag != 0) { ldv_timer_list_5 = timer; ldv_timer_list_5->data = data; ldv_timer_state_5 = 1; } else { } return; } else { } reg_timer_5(timer); ldv_timer_list_5->data = data; return; } } int reg_timer_5(struct timer_list *timer ) { { ldv_timer_list_5 = timer; ldv_timer_state_5 = 1; return (0); } } int ldv_del_timer_27(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_28(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } extern void _raw_spin_lock_irq(raw_spinlock_t * ) ; extern void _raw_spin_unlock_irq(raw_spinlock_t * ) ; __inline static void spin_lock_irq(spinlock_t *lock ) { { _raw_spin_lock_irq(& lock->ldv_6105.rlock); return; } } __inline static void spin_unlock_irq(spinlock_t *lock ) { { _raw_spin_unlock_irq(& lock->ldv_6105.rlock); return; } } int ldv_del_timer_31(struct timer_list *ldv_func_arg1 ) ; void choose_timer_6(struct timer_list *timer ) ; int reg_timer_6(struct timer_list *timer ) ; void disable_suitable_timer_6(struct timer_list *timer ) ; void activate_pending_timer_6(struct timer_list *timer , unsigned long data , int pending_flag ) ; extern int dev_printk(char const * , struct device const * , char const * , ...) ; extern int pci_enable_msi_block(struct pci_dev * , unsigned int ) ; extern void pci_disable_msi(struct pci_dev * ) ; extern int pci_enable_msix(struct pci_dev * , struct msix_entry * , int ) ; extern void pci_disable_msix(struct pci_dev * ) ; extern int request_threaded_irq(unsigned int , irqreturn_t (*)(int , void * ) , irqreturn_t (*)(int , void * ) , unsigned long , char const * , void * ) ; __inline static int request_irq(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { int tmp ; { tmp = request_threaded_irq(irq, handler, (irqreturn_t (*)(int , void * ))0, flags, name, dev); return (tmp); } } extern void free_irq(unsigned int , void * ) ; int ldv_scsi_add_host_with_dma_32(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; __inline static void scsi_set_resid(struct scsi_cmnd *cmd , int resid ) { { cmd->sdb.resid = resid; return; } } __inline static void set_host_byte(struct scsi_cmnd *cmd , char status ) { { cmd->result = (int )(((unsigned int )cmd->result & 4278255615U) | (unsigned int )((int )status << 16)); return; } } __inline static void set_driver_byte(struct scsi_cmnd *cmd , char status ) { { cmd->result = (cmd->result & 16777215) | ((int )status << 24); return; } } static char const * const port_state_str___1[5U] = { "Unknown", "UNCONFIGURED", "DEAD", "LOST", "ONLINE"}; void qla2x00_alert_all_vps(struct rsp_que *rsp , uint16_t *mb ) ; void qla2x00_async_event(scsi_qla_host_t *vha , struct rsp_que *rsp , uint16_t *mb ) ; srb_t *qla2x00_get_sp_from_handle(scsi_qla_host_t *vha , char const *func , struct req_que *req , void *iocb ) ; void qla2x00_process_completed_request(struct scsi_qla_host *vha , struct req_que *req , uint32_t index ) ; int qla25xx_request_irq(struct rsp_que *rsp ) ; irqreturn_t qla82xx_msix_default(int irq , void *dev_id ) ; irqreturn_t qla82xx_msix_rsp_q(int irq , void *dev_id ) ; __inline static void qla2x00_do_host_ramp_up(scsi_qla_host_t *vha ) { { if ((vha->hw)->cfg_lun_q_depth >= ql2xmaxqdepth) { return; } else { } if ((long )(((unsigned long )jiffies - (vha->hw)->host_last_rampdown_time) - 15000UL) < 0L) { return; } else { } if ((long )(((unsigned long )jiffies - (vha->hw)->host_last_rampup_time) - 7500UL) < 0L) { return; } else { } set_bit(23L, (unsigned long volatile *)(& vha->dpc_flags)); return; } } __inline static void qla2x00_handle_mbx_completion(struct qla_hw_data *ha , int status ) { int tmp ; { tmp = constant_test_bit(2L, (unsigned long const volatile *)(& ha->mbx_cmd_flags)); if ((tmp != 0 && status & 1) && *((unsigned long *)ha + 2UL) != 0UL) { set_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); clear_bit(2L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); complete(& ha->mbx_intr_comp); } else { } return; } } int ql2x_ini_mode ; void qlt_response_pkt_all_vps(struct scsi_qla_host *vha , response_t *pkt ) ; void qlt_async_event(uint16_t code , struct scsi_qla_host *vha , uint16_t *mailbox ) ; int qlt_24xx_process_response_error(struct scsi_qla_host *vha , struct sts_entry_24xx *pkt ) ; irqreturn_t qla83xx_msix_atio_q(int irq , void *dev_id ) ; extern void scsi_build_sense_buffer(int , u8 * , u8 , u8 , u8 ) ; static void qla2x00_mbx_completion(scsi_qla_host_t *vha , uint16_t mb0 ) ; static void qla2x00_status_entry(scsi_qla_host_t *vha , struct rsp_que *rsp , void *pkt ) ; static void qla2x00_status_cont_entry(struct rsp_que *rsp , sts_cont_entry_t *pkt ) ; static void qla2x00_error_entry(scsi_qla_host_t *vha , struct rsp_que *rsp , sts_entry_t *pkt ) ; irqreturn_t qla2100_intr_handler(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; int status ; unsigned long iter ; uint16_t hccr ; uint16_t mb[4U] ; struct rsp_que *rsp ; unsigned long flags ; raw_spinlock_t *tmp ; void *tmp___0 ; int tmp___1 ; unsigned short tmp___2 ; unsigned short tmp___3 ; unsigned long tmp___4 ; { rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20573, "%s: NULL response queue pointer.\n", "qla2100_intr_handler"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp; status = 0; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; iter = 50UL; goto ldv_60862; ldv_60861: hccr = readw((void const volatile *)(& reg->hccr)); if (((int )hccr & 32) != 0) { tmp___1 = pci_channel_offline(ha->pdev); if (tmp___1 != 0) { goto ldv_60860; } else { } writew(4096, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); (*((ha->isp_ops)->fw_dump))(vha, 1); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_60860; } else { tmp___2 = readw((void const volatile *)(& reg->istatus)); if (((int )tmp___2 & 8) == 0) { goto ldv_60860; } else { } } tmp___3 = readw((void const volatile *)(& reg->semaphore)); if ((int )tmp___3 & 1) { writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); mb[0] = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); if ((unsigned int )mb[0] > 16383U && (int )((short )mb[0]) >= 0) { qla2x00_mbx_completion(vha, (int )mb[0]); status = status | 1; } else if ((int )((short )mb[0]) < 0 && (unsigned int )mb[0] <= 49151U) { mb[1] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 1U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 1U); mb[2] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 2U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 2U); mb[3] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 3U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 3U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); } else { ql_dbg(33554432U, vha, 20517, "Unrecognized interrupt type (%d).\n", (int )mb[0]); } writew(0, (void volatile *)(& reg->semaphore)); readw((void const volatile *)(& reg->semaphore)); } else { qla2x00_process_response_queue(rsp); writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); } ldv_60862: tmp___4 = iter; iter = iter - 1UL; if (tmp___4 != 0UL) { goto ldv_60861; } else { } ldv_60860: qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } irqreturn_t qla2300_intr_handler(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct device_reg_2xxx *reg ; int status ; unsigned long iter ; uint32_t stat ; uint16_t hccr ; uint16_t mb[4U] ; struct rsp_que *rsp ; struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; void *tmp___0 ; int tmp___1 ; long tmp___2 ; unsigned long tmp___3 ; { rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20568, "%s: NULL response queue pointer.\n", "qla2300_intr_handler"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp; status = 0; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; iter = 50UL; goto ldv_60893; ldv_60892: stat = readl((void const volatile *)(& reg->u.isp2300.host_status)); if ((stat & 256U) != 0U) { tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { goto ldv_60881; } else { } hccr = readw((void const volatile *)(& reg->hccr)); if (((int )hccr & 43264) != 0) { ql_log(1U, vha, 20518, "Parity error -- HCCR=%x, Dumping firmware.\n", (int )hccr); } else { ql_log(1U, vha, 20519, "RISC paused -- HCCR=%x, Dumping firmware.\n", (int )hccr); } writew(4096, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); (*((ha->isp_ops)->fw_dump))(vha, 1); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_60881; } else if ((stat & 32768U) == 0U) { goto ldv_60881; } else { } switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla2x00_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; writew(0, (void volatile *)(& reg->semaphore)); goto ldv_60886; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 1U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 1U); mb[2] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 2U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 2U); mb[3] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 3U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 3U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_60886; case 19U: qla2x00_process_response_queue(rsp); goto ldv_60886; case 21U: mb[0] = 32817U; mb[1] = (unsigned short )(stat >> 16); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_60886; case 22U: mb[0] = 32800U; mb[1] = (unsigned short )(stat >> 16); mb[2] = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 2U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 2U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_60886; default: ql_dbg(33554432U, vha, 20520, "Unrecognized interrupt type (%d).\n", stat & 255U); goto ldv_60886; } ldv_60886: writew(28672, (void volatile *)(& reg->hccr)); __readw((void const volatile *)(& reg->hccr)); ldv_60893: tmp___3 = iter; iter = iter - 1UL; if (tmp___3 != 0UL) { goto ldv_60892; } else { } ldv_60881: qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } static void qla2x00_mbx_completion(scsi_qla_host_t *vha , uint16_t mb0 ) { uint16_t cnt ; uint32_t mboxes ; uint16_t *wptr ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; { ha = vha->hw; reg = & (ha->iobase)->isp; mboxes = (uint32_t )((1 << (int )ha->mbx_count) + -1); if ((unsigned long )ha->mcp == (unsigned long )((mbx_cmd_t *)0)) { ql_dbg(33554432U, vha, 20481, "MBX pointer OLD_ERROR.\n"); } else { mboxes = (ha->mcp)->in_mb; } ha->flags.mbox_int = 1U; ha->mailbox_out[0] = mb0; mboxes = mboxes >> 1; wptr = (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 + 1UL : & reg->u.isp2300.mailbox0 + 1UL; cnt = 1U; goto ldv_60904; ldv_60903: ; if ((ha->device_type & 2U) != 0U && (unsigned int )cnt == 8U) { wptr = (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u_end.isp2200.mailbox8 : & reg->u.isp2300.mailbox0 + 8UL; } else { } if (((unsigned int )cnt == 4U || (unsigned int )cnt == 5U) && (int )mboxes & 1) { ha->mailbox_out[(int )cnt] = qla2x00_debounce_register___0((uint16_t volatile *)wptr); } else if ((int )mboxes & 1) { ha->mailbox_out[(int )cnt] = readw((void const volatile *)wptr); } else { } wptr = wptr + 1; mboxes = mboxes >> 1; cnt = (uint16_t )((int )cnt + 1); ldv_60904: ; if ((int )((unsigned short )ha->mbx_count) > (int )cnt) { goto ldv_60903; } else { } return; } } static void qla81xx_idc_event(scsi_qla_host_t *vha , uint16_t aen , uint16_t descr ) { char *event[3U] ; int rval ; struct device_reg_24xx *reg24 ; uint16_t *wptr ; uint16_t cnt ; uint16_t timeout ; uint16_t mb[7U] ; { event[0] = (char *)"Complete"; event[1] = (char *)"Request Notification"; event[2] = (char *)"Time Extension"; reg24 = & ((vha->hw)->iobase)->isp24; wptr = & reg24->mailbox1; cnt = 0U; goto ldv_60919; ldv_60918: mb[(int )cnt] = readw((void const volatile *)wptr); cnt = (uint16_t )((int )cnt + 1); wptr = wptr + 1; ldv_60919: ; if ((unsigned int )cnt <= 6U) { goto ldv_60918; } else { } ql_dbg(33554432U, vha, 20513, "Inter-Driver Communication %s -- %04x %04x %04x %04x %04x %04x %04x.\n", event[(int )aen & 255], (int )mb[0], (int )mb[1], (int )mb[2], (int )mb[3], (int )mb[4], (int )mb[5], (int )mb[6]); switch ((int )aen) { case 33024: ; if ((int )((short )mb[1]) < 0) { (vha->hw)->flags.idc_compl_status = 1U; if ((vha->hw)->notify_dcbx_comp != 0) { complete(& (vha->hw)->dcbx_comp); } else { } } else { } goto ldv_60922; case 33025: timeout = (unsigned int )((uint16_t )((int )descr >> 8)) & 15U; ql_dbg(33554432U, vha, 20514, "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", vha->host_no, event[(int )aen & 255], (int )timeout); if ((unsigned int )timeout == 0U) { return; } else { } rval = qla2x00_post_idc_ack_work(vha, (uint16_t *)(& mb)); if (rval != 0) { ql_log(1U, vha, 20515, "IDC failed to post ACK.\n"); } else { } goto ldv_60922; case 33026: (vha->hw)->idc_extend_tmo = (uint32_t )descr; ql_dbg(33554432U, vha, 20615, "%lu Inter-Driver Communication %s -- Extend timeout by=%d.\n", vha->host_no, event[(int )aen & 255], (vha->hw)->idc_extend_tmo); goto ldv_60922; } ldv_60922: ; return; } } char const *qla2x00_get_link_speed_str(struct qla_hw_data *ha , uint16_t speed ) { char const *link_speeds[7U] ; { link_speeds[0] = "1"; link_speeds[1] = "2"; link_speeds[2] = "?"; link_speeds[3] = "4"; link_speeds[4] = "8"; link_speeds[5] = "16"; link_speeds[6] = "10"; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { return (link_speeds[0]); } else if ((unsigned int )speed == 19U) { return (link_speeds[6]); } else if ((unsigned int )speed <= 5U) { return (link_speeds[(int )speed]); } else { return (link_speeds[2]); } } } static void qla83xx_handle_8200_aen(scsi_qla_host_t *vha , uint16_t *mb ) { struct qla_hw_data *ha ; uint32_t protocol_engine_id ; uint32_t fw_err_code ; uint32_t err_level ; uint16_t peg_fw_state ; uint16_t nw_interface_link_up ; uint16_t nw_interface_signal_detect ; uint16_t sfp_status ; uint16_t htbt_counter ; uint16_t htbt_monitor_enable ; uint16_t sfp_additonal_info ; uint16_t sfp_multirate ; uint16_t sfp_tx_fault ; uint16_t link_speed ; uint16_t dcbx_status ; { ha = vha->hw; ql_dbg(33554432U, vha, 20587, "AEN Code: mb[0] = 0x%x AEN reason: mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", (int )*mb, (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 6UL)); ql_dbg(33554432U, vha, 20588, "PH-status2: mb[3] = 0x%x PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x Drv-Presence: mb[5] = 0x%x.\n", (int )*(mb + 3UL), (int )*(mb + 7UL), (int )*(mb + 4UL), (int )*(mb + 5UL)); if (((int )*(mb + 1UL) & 14) != 0) { ha->flags.nic_core_hung = 1U; ql_log(1U, vha, 20576, "83XX: F/W Error Reported: Check if reset required.\n"); if (((int )*(mb + 1UL) & 2) != 0) { protocol_engine_id = (uint32_t )*(mb + 2UL) & 255U; fw_err_code = (uint32_t )(((int )*(mb + 2UL) >> 8) | (((int )*(mb + 6UL) & 8191) << 8)); err_level = (uint32_t )((int )*(mb + 6UL) >> 13); ql_log(1U, vha, 20577, "PegHalt Status-1 Register: protocol_engine_id=0x%x fw_err_code=0x%x err_level=0x%x.\n", protocol_engine_id, fw_err_code, err_level); ql_log(1U, vha, 20578, "PegHalt Status-2 Register: 0x%x%x.\n", (int )*(mb + 7UL), (int )*(mb + 3UL)); if (err_level == 1U) { ql_log(1U, vha, 20579, "Not a fatal error, f/w has recovered iteself.\n"); } else if (err_level == 2U) { ql_log(0U, vha, 20580, "Recoverable Fatal error: Chip reset required.\n"); qla83xx_schedule_work(vha, 1); } else if (err_level == 4U) { ql_log(0U, vha, 20581, "Unrecoverable Fatal error: Set FAILED state, reboot required.\n"); qla83xx_schedule_work(vha, 3); } else { } } else { } if (((int )*(mb + 1UL) & 4) != 0) { peg_fw_state = (unsigned int )*(mb + 2UL) & 255U; nw_interface_link_up = (uint16_t )(((int )*(mb + 2UL) & 256) >> 8); nw_interface_signal_detect = (uint16_t )(((int )*(mb + 2UL) & 512) >> 9); sfp_status = (uint16_t )(((int )*(mb + 2UL) & 3072) >> 10); htbt_counter = (uint16_t )(((int )*(mb + 2UL) & 28672) >> 12); htbt_monitor_enable = (int )*(mb + 2UL) >> 15; sfp_additonal_info = (unsigned int )*(mb + 6UL) & 3U; sfp_multirate = (uint16_t )(((int )*(mb + 6UL) & 4) >> 2); sfp_tx_fault = (uint16_t )(((int )*(mb + 6UL) & 8) >> 3); link_speed = (uint16_t )(((int )*(mb + 6UL) & 112) >> 4); dcbx_status = (uint16_t )(((int )*(mb + 6UL) & 28672) >> 12); ql_log(1U, vha, 20582, "Peg-to-Fc Status Register:\npeg_fw_state=0x%x, nw_interface_link_up=0x%x, nw_interface_signal_detect=0x%x\nsfp_statis=0x%x.\n ", (int )peg_fw_state, (int )nw_interface_link_up, (int )nw_interface_signal_detect, (int )sfp_status); ql_log(1U, vha, 20583, "htbt_counter=0x%x, htbt_monitor_enable=0x%x, sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ", (int )htbt_counter, (int )htbt_monitor_enable, (int )sfp_additonal_info, (int )sfp_multirate); ql_log(1U, vha, 20584, "sfp_tx_fault=0x%x, link_state=0x%x, dcbx_status=0x%x.\n", (int )sfp_tx_fault, (int )link_speed, (int )dcbx_status); qla83xx_schedule_work(vha, 1); } else { } if (((int )*(mb + 1UL) & 8) != 0) { ql_log(1U, vha, 20585, "Heartbeat Failure encountered, chip reset required.\n"); qla83xx_schedule_work(vha, 1); } else { } } else { } if ((int )*(mb + 1UL) & 1) { ql_log(2U, vha, 20586, "IDC Device-State changed = 0x%x.\n", (int )*(mb + 4UL)); if (*((unsigned long *)ha + 2UL) != 0UL) { return; } else { } qla83xx_schedule_work(vha, 33280); } else { } return; } } int qla2x00_is_a_vp_did(scsi_qla_host_t *vha , uint32_t rscn_entry ) { struct qla_hw_data *ha ; scsi_qla_host_t *vp ; uint32_t vp_did ; unsigned long flags ; int ret ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { ha = vha->hw; ret = 0; if ((unsigned int )ha->num_vhosts == 0U) { return (ret); } else { } tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)ha->vp_list.next; vp = (scsi_qla_host_t *)__mptr; goto ldv_60967; ldv_60966: vp_did = vp->d_id.b24; if (vp_did == rscn_entry) { ret = 1; goto ldv_60965; } else { } __mptr___0 = (struct list_head const *)vp->list.next; vp = (scsi_qla_host_t *)__mptr___0; ldv_60967: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_60966; } else { } ldv_60965: spin_unlock_irqrestore(& ha->vport_slock, flags); return (ret); } } void qla2x00_async_event(scsi_qla_host_t *vha , struct rsp_que *rsp , uint16_t *mb ) { uint16_t handle_cnt ; uint16_t cnt ; uint16_t mbx ; uint32_t handles[5U] ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; struct device_reg_24xx *reg24 ; struct device_reg_82xx *reg82 ; uint32_t rscn_entry ; uint32_t host_pid ; unsigned long flags ; unsigned short tmp ; unsigned short tmp___0 ; unsigned short tmp___1 ; unsigned short tmp___2 ; unsigned short tmp___3 ; unsigned short tmp___4 ; int tmp___5 ; char const *tmp___6 ; unsigned short tmp___7 ; unsigned short tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; char const *tmp___17 ; bool tmp___18 ; int tmp___19 ; int tmp___20 ; int tmp___21 ; raw_spinlock_t *tmp___22 ; int tmp___23 ; { ha = vha->hw; reg = & (ha->iobase)->isp; reg24 = & (ha->iobase)->isp24; reg82 = & (ha->iobase)->isp82; handle_cnt = 0U; if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { goto skip_rio; } else { } switch ((int )*mb) { case 32800: handles[0] = (unsigned int )(((int )*(mb + 2UL) << 16) | (int )*(mb + 1UL)); handle_cnt = 1U; goto ldv_60986; case 32817: handles[0] = (uint32_t )*(mb + 1UL); handle_cnt = 1U; *mb = 32800U; goto ldv_60986; case 32818: handles[0] = (uint32_t )*(mb + 1UL); handles[1] = (uint32_t )*(mb + 2UL); handle_cnt = 2U; *mb = 32800U; goto ldv_60986; case 32819: handles[0] = (uint32_t )*(mb + 1UL); handles[1] = (uint32_t )*(mb + 2UL); handles[2] = (uint32_t )*(mb + 3UL); handle_cnt = 3U; *mb = 32800U; goto ldv_60986; case 32820: handles[0] = (uint32_t )*(mb + 1UL); handles[1] = (uint32_t )*(mb + 2UL); handles[2] = (uint32_t )*(mb + 3UL); tmp = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 6U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 6U); handles[3] = (unsigned int )tmp; handle_cnt = 4U; *mb = 32800U; goto ldv_60986; case 32821: handles[0] = (uint32_t )*(mb + 1UL); handles[1] = (uint32_t )*(mb + 2UL); handles[2] = (uint32_t )*(mb + 3UL); tmp___0 = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 6U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 6U); handles[3] = (unsigned int )tmp___0; tmp___1 = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 7U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 7U); handles[4] = (unsigned int )tmp___1; handle_cnt = 5U; *mb = 32800U; goto ldv_60986; case 32834: handles[0] = (unsigned int )(((int )*(mb + 2UL) << 16) | (int )*(mb + 1UL)); tmp___2 = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 7U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 7U); tmp___3 = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 6U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 6U); handles[1] = (unsigned int )((int )tmp___2 << 16) | (unsigned int )tmp___3; handle_cnt = 2U; *mb = 32800U; goto ldv_60986; default: ; goto ldv_60986; } ldv_60986: ; skip_rio: ; switch ((int )*mb) { case 32800: ; if (*((unsigned long *)vha + 19UL) == 0UL) { goto ldv_60995; } else { } cnt = 0U; goto ldv_60997; ldv_60996: qla2x00_process_completed_request(vha, rsp->req, handles[(int )cnt]); cnt = (uint16_t )((int )cnt + 1); ldv_60997: ; if ((int )cnt < (int )handle_cnt) { goto ldv_60996; } else { } goto ldv_60995; case 32769: ql_dbg(33554432U, vha, 20482, "Asynchronous RESET.\n"); set_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_60995; case 32770: ; if ((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { tmp___4 = readw((void const volatile *)(& reg24->mailbox7)); mbx = tmp___4; } else { mbx = 0U; } ql_log(1U, vha, 20483, "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx7=%xh.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL), (int )mbx); (*((ha->isp_ops)->fw_dump))(vha, 1); if ((ha->device_type & 134217728U) != 0U) { if ((unsigned int )*(mb + 1UL) == 0U && (unsigned int )*(mb + 2UL) == 0U) { ql_log(0U, vha, 20484, "Unrecoverable Hardware Error: adapter marked OFFLINE!\n"); vha->flags.online = 0U; vha->device_flags = vha->device_flags | 32U; } else { if (((int )mbx & 8) != 0 && *((unsigned long *)ha + 2UL) != 0UL) { set_bit(19L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } } else if ((unsigned int )*(mb + 1UL) == 0U) { ql_log(0U, vha, 20485, "Unrecoverable Hardware Error: adapter marked OFFLINE!\n"); vha->flags.online = 0U; vha->device_flags = vha->device_flags | 32U; } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } goto ldv_60995; case 32771: ql_log(1U, vha, 20486, "ISP Request Transfer Error (%x).\n", (int )*(mb + 1UL)); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_60995; case 32772: ql_log(1U, vha, 20487, "ISP Response Transfer Error.\n"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_60995; case 32773: ql_dbg(33554432U, vha, 20488, "Asynchronous WAKEUP_THRES.\n"); goto ldv_60995; case 32784: ql_dbg(33554432U, vha, 20489, "LIP occurred (%x).\n", (int )*(mb + 1UL)); tmp___5 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___5 != 2) { atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); qla2x00_mark_all_devices_lost(vha, 1); } else { } if ((unsigned int )vha->vp_idx != 0U) { atomic_set(& vha->vp_state, 2); fc_vport_set_state(vha->fc_vport, 9); } else { } set_bit(9L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(12L, (unsigned long volatile *)(& vha->dpc_flags)); vha->flags.management_server_logged_in = 0U; qla2x00_post_aen_work(vha, 1, (u32 )*(mb + 1UL)); goto ldv_60995; case 32785: ; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { ha->link_data_rate = 0U; } else { ha->link_data_rate = *(mb + 1UL); } tmp___6 = qla2x00_get_link_speed_str(ha, (int )ha->link_data_rate); ql_dbg(33554432U, vha, 20490, "LOOP UP detected (%s Gbps).\n", tmp___6); vha->flags.management_server_logged_in = 0U; qla2x00_post_aen_work(vha, 2, (u32 )ha->link_data_rate); goto ldv_60995; case 32786: ; if ((ha->device_type & 8192U) != 0U || (ha->device_type & 65536U) != 0U) { tmp___7 = readw((void const volatile *)(& reg24->mailbox4)); mbx = tmp___7; } else { mbx = 0U; } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { tmp___8 = readw((void const volatile *)(& reg82->mailbox_out) + 4U); mbx = tmp___8; } else { mbx = mbx; } ql_dbg(33554432U, vha, 20491, "LOOP DOWN detected (%x %x %x %x).\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL), (int )mbx); tmp___9 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___9 != 2) { atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); vha->device_flags = vha->device_flags | 2U; qla2x00_mark_all_devices_lost(vha, 1); } else { } if ((unsigned int )vha->vp_idx != 0U) { atomic_set(& vha->vp_state, 2); fc_vport_set_state(vha->fc_vport, 9); } else { } vha->flags.management_server_logged_in = 0U; ha->link_data_rate = 65535U; qla2x00_post_aen_work(vha, 3, 0U); goto ldv_60995; case 32787: ql_dbg(33554432U, vha, 20492, "LIP reset occurred (%x).\n", (int )*(mb + 1UL)); tmp___10 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___10 != 2) { atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); qla2x00_mark_all_devices_lost(vha, 1); } else { } if ((unsigned int )vha->vp_idx != 0U) { atomic_set(& vha->vp_state, 2); fc_vport_set_state(vha->fc_vport, 9); } else { } set_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); ha->operating_mode = 0U; vha->flags.management_server_logged_in = 0U; qla2x00_post_aen_work(vha, 4, (u32 )*(mb + 1UL)); goto ldv_60995; case 32816: ; if ((int )ha->device_type & 1) { goto ldv_60995; } else { } if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { ql_dbg(33554432U, vha, 20493, "DCBX Completed -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); if (ha->notify_dcbx_comp != 0) { complete(& ha->dcbx_comp); } else { } } else { ql_dbg(33554432U, vha, 20494, "Asynchronous P2P MODE received.\n"); } tmp___12 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___12 != 2) { atomic_set(& vha->loop_state, 2); tmp___11 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___11 == 0) { atomic_set(& vha->loop_down_timer, 255); } else { } qla2x00_mark_all_devices_lost(vha, 1); } else { } if ((unsigned int )vha->vp_idx != 0U) { atomic_set(& vha->vp_state, 2); fc_vport_set_state(vha->fc_vport, 9); } else { } tmp___13 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___13 == 0) { set_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } set_bit(9L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(12L, (unsigned long volatile *)(& vha->dpc_flags)); ha->flags.gpsc_supported = 1U; vha->flags.management_server_logged_in = 0U; goto ldv_60995; case 32822: ; if ((int )ha->device_type & 1) { goto ldv_60995; } else { } ql_dbg(33554432U, vha, 20495, "Configuration change detected: value=%x.\n", (int )*(mb + 1UL)); tmp___15 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___15 != 2) { atomic_set(& vha->loop_state, 2); tmp___14 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___14 == 0) { atomic_set(& vha->loop_down_timer, 255); } else { } qla2x00_mark_all_devices_lost(vha, 1); } else { } if ((unsigned int )vha->vp_idx != 0U) { atomic_set(& vha->vp_state, 2); fc_vport_set_state(vha->fc_vport, 9); } else { } set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_60995; case 32788: ; if ((((((((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) || (ha->device_type & 8192U) != 0U) || (ha->device_type & 16384U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) || (ha->device_type & 262144U) != 0U) && (((unsigned int )*(mb + 1UL) == 65535U && ((int )*(mb + 3UL) & 255) != 255) || (unsigned int )*(mb + 1UL) != 65535U)) && (int )vha->vp_idx != ((int )*(mb + 3UL) & 255)) { goto ldv_60995; } else { } if ((unsigned int )*(mb + 1UL) == 65535U && (unsigned int )*(mb + 2UL) == 7U) { ql_dbg(33554432U, vha, 20496, "Port unavailable %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); ql_log(1U, vha, 20574, "Link is offline.\n"); tmp___16 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___16 != 2) { atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); vha->device_flags = vha->device_flags | 2U; qla2x00_mark_all_devices_lost(vha, 1); } else { } if ((unsigned int )vha->vp_idx != 0U) { atomic_set(& vha->vp_state, 2); fc_vport_set_state(vha->fc_vport, 9); qla2x00_mark_all_devices_lost(vha, 1); } else { } vha->flags.management_server_logged_in = 0U; ha->link_data_rate = 65535U; goto ldv_60995; } else { } atomic_set(& vha->loop_down_timer, 0); if ((unsigned int )*(mb + 1UL) != 65535U || ((unsigned int )*(mb + 2UL) != 6U && (unsigned int )*(mb + 2UL) != 4U)) { ql_dbg(33554432U, vha, 20497, "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); qlt_async_event((int )*mb, vha, mb); goto ldv_60995; } else { } ql_dbg(33554432U, vha, 20498, "Port database changed %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); tmp___17 = qla2x00_get_link_speed_str(ha, (int )ha->link_data_rate); ql_log(1U, vha, 20575, "Link is operational (%s Gbps).\n", tmp___17); atomic_set(& vha->loop_state, 3); qla2x00_mark_all_devices_lost(vha, 1); if ((unsigned int )vha->vp_idx == 0U) { tmp___18 = qla_ini_mode_enabled(vha); if (tmp___18) { tmp___19 = 0; } else { tmp___19 = 1; } if (tmp___19) { set_bit(21L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } } else { } set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(6L, (unsigned long volatile *)(& vha->dpc_flags)); qlt_async_event((int )*mb, vha, mb); goto ldv_60995; case 32789: ; if ((unsigned int )vha->vp_idx != 0U) { tmp___20 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->vp_flags)); if (tmp___20 != 0) { goto ldv_60995; } else { } } else { } if (*((unsigned long *)ha + 2UL) != 0UL && (int )vha->vp_idx != ((int )*(mb + 3UL) & 255)) { goto ldv_60995; } else { } ql_dbg(33554432U, vha, 20499, "RSCN database changed -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); rscn_entry = (uint32_t )((((int )*(mb + 1UL) & 255) << 16) | (int )*(mb + 2UL)); host_pid = (uint32_t )((((int )vha->d_id.b.domain << 16) | ((int )vha->d_id.b.area << 8)) | (int )vha->d_id.b.al_pa); if (rscn_entry == host_pid) { ql_dbg(33554432U, vha, 20500, "Ignoring RSCN update to local host port ID (%06x).\n", host_pid); goto ldv_60995; } else { } rscn_entry = (uint32_t )((((int )*(mb + 1UL) & 1023) << 16) | (int )*(mb + 2UL)); tmp___21 = qla2x00_is_a_vp_did(vha, rscn_entry); if (tmp___21 != 0) { goto ldv_60995; } else { } atomic_set(& vha->loop_down_timer, 0); vha->flags.management_server_logged_in = 0U; set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); set_bit(7L, (unsigned long volatile *)(& vha->dpc_flags)); qla2x00_post_aen_work(vha, 5, rscn_entry); goto ldv_60995; case 32832: ql_dbg(33554432U, vha, 20501, "[R|Z]IO update completion.\n"); if ((ha->device_type & 134217728U) != 0U) { qla24xx_process_response_queue(vha, rsp); } else { qla2x00_process_response_queue(rsp); } goto ldv_60995; case 32840: ql_dbg(33554432U, vha, 20502, "Discard RND Frame -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); goto ldv_60995; case 32808: ql_dbg(33554432U, vha, 20503, "Trace Notification -- %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL)); goto ldv_60995; case 32783: ql_dbg(33554432U, vha, 20504, "ISP84XX Alert Notification -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); tmp___22 = spinlock_check(& (ha->cs84xx)->access_lock); flags = _raw_spin_lock_irqsave(tmp___22); switch ((int )*(mb + 1UL)) { case 1: ql_log(2U, vha, 20505, "Alert 84XX: panic recovery %04x %04x.\n", (int )*(mb + 2UL), (int )*(mb + 3UL)); goto ldv_61020; case 2: (ha->cs84xx)->op_fw_version = (uint32_t )(((int )*(mb + 3UL) << 16) | (int )*(mb + 2UL)); ql_log(2U, vha, 20506, "Alert 84XX: firmware version %x.\n", (ha->cs84xx)->op_fw_version); goto ldv_61020; case 3: (ha->cs84xx)->diag_fw_version = (uint32_t )(((int )*(mb + 3UL) << 16) | (int )*(mb + 2UL)); ql_log(2U, vha, 20507, "Alert 84XX: diagnostic firmware version %x.\n", (ha->cs84xx)->diag_fw_version); goto ldv_61020; case 4: (ha->cs84xx)->diag_fw_version = (uint32_t )(((int )*(mb + 3UL) << 16) | (int )*(mb + 2UL)); (ha->cs84xx)->fw_update = 1U; ql_log(2U, vha, 20508, "Alert 84XX: gold firmware version %x.\n", (ha->cs84xx)->gold_fw_version); goto ldv_61020; default: ql_log(1U, vha, 20509, "Alert 84xx: Invalid Alert %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); } ldv_61020: spin_unlock_irqrestore(& (ha->cs84xx)->access_lock, flags); goto ldv_60995; case 32790: ql_dbg(33554432U, vha, 20510, "DCBX Started -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); goto ldv_60995; case 32818: ql_dbg(33554432U, vha, 20511, "DCBX Parameters Updated -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); goto ldv_60995; case 32817: ql_dbg(33554432U, vha, 20512, "FCF Configuration Error -- %04x %04x %04x.\n", (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); goto ldv_60995; case 33025: ; if (((vha->hw)->device_type & 65536U) != 0U || (ha->device_type & 262144U) != 0U) { *(mb + 4UL) = readw((void const volatile *)(& reg24->mailbox4)); if ((((int )*(mb + 2UL) & 32767) == 288 || ((int )*(mb + 2UL) & 32767) == 290) && ((int )*(mb + 4UL) & 14) != 0) { set_bit(20L, (unsigned long volatile *)(& vha->dpc_flags)); tmp___23 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___23 == 2) { atomic_set(& vha->loop_down_timer, 255); } else { } qla2xxx_wake_dpc(vha); } else { } } else { } case 33024: ; if (ha->notify_lb_portup_comp != 0) { complete(& ha->lb_portup_comp); } else { } case 33026: ; if ((((vha->hw)->device_type & 8192U) != 0U || ((vha->hw)->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { qla81xx_idc_event(vha, (int )*mb, (int )*(mb + 1UL)); } else { } goto ldv_60995; case 33280: *(mb + 4UL) = readw((void const volatile *)(& reg24->mailbox4)); *(mb + 5UL) = readw((void const volatile *)(& reg24->mailbox5)); *(mb + 6UL) = readw((void const volatile *)(& reg24->mailbox6)); *(mb + 7UL) = readw((void const volatile *)(& reg24->mailbox7)); qla83xx_handle_8200_aen(vha, mb); goto ldv_60995; default: ql_dbg(33554432U, vha, 20567, "Unknown AEN:%04x %04x %04x %04x\n", (int )*mb, (int )*(mb + 1UL), (int )*(mb + 2UL), (int )*(mb + 3UL)); } ldv_60995: qlt_async_event((int )*mb, vha, mb); if ((unsigned int )vha->vp_idx == 0U && (unsigned int )ha->num_vhosts != 0U) { qla2x00_alert_all_vps(rsp, mb); } else { } return; } } void qla2x00_process_completed_request(struct scsi_qla_host *vha , struct req_que *req , uint32_t index ) { srb_t *sp ; struct qla_hw_data *ha ; { ha = vha->hw; if ((uint32_t )req->num_outstanding_cmds <= index) { ql_log(1U, vha, 12308, "Invalid SCSI command index (%x).\n", index); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } return; } else { } sp = *(req->outstanding_cmds + (unsigned long )index); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { *(req->outstanding_cmds + (unsigned long )index) = (srb_t *)0; (*(sp->done))((void *)ha, (void *)sp, 0); } else { ql_log(1U, vha, 12310, "Invalid SCSI SRB.\n"); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } } return; } } srb_t *qla2x00_get_sp_from_handle(scsi_qla_host_t *vha , char const *func , struct req_que *req , void *iocb ) { struct qla_hw_data *ha ; sts_entry_t *pkt ; srb_t *sp ; uint16_t index ; { ha = vha->hw; pkt = (sts_entry_t *)iocb; sp = (srb_t *)0; index = (unsigned short )pkt->handle; if ((int )req->num_outstanding_cmds <= (int )index) { ql_log(1U, vha, 20529, "Invalid command index (%x).\n", (int )index); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } goto done; } else { } sp = *(req->outstanding_cmds + (unsigned long )index); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_log(1U, vha, 20530, "Invalid completion handle (%x) -- timed-out.\n", (int )index); return (sp); } else { } if (sp->handle != (uint32_t )index) { ql_log(1U, vha, 20531, "SRB handle (%x) mismatch %x.\n", sp->handle, (int )index); return ((srb_t *)0); } else { } *(req->outstanding_cmds + (unsigned long )index) = (srb_t *)0; done: ; return (sp); } } static void qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha , struct req_que *req , struct mbx_entry *mbx ) { char func[9U] ; char const *type ; fc_port_t *fcport ; srb_t *sp ; struct srb_iocb *lio ; uint16_t *data ; uint16_t status ; { func[0] = 'M'; func[1] = 'B'; func[2] = 'X'; func[3] = '-'; func[4] = 'I'; func[5] = 'O'; func[6] = 'C'; func[7] = 'B'; func[8] = '\000'; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)mbx); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } lio = & sp->u.iocb_cmd; type = (char const *)sp->name; fcport = sp->fcport; data = (uint16_t *)(& lio->u.logio.data); *data = 16389U; *(data + 1UL) = (unsigned int )lio->u.logio.flags & 1U; if ((unsigned int )mbx->entry_status != 0U) { ql_dbg(33554432U, vha, 20547, "Async-%s error entry - hdl=%x portid=%02x%02x%02x entry-status=%x status=%x state-flag=%x status-flags=%x.\n", type, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )mbx->entry_status, (int )mbx->status, (int )mbx->state_flags, (int )mbx->status_flags); ql_dump_buffer(33685504U, vha, 20521, (uint8_t *)mbx, 64U); goto logio_done; } else { } status = mbx->status; if (((unsigned int )status == 48U && (unsigned int )sp->type == 1U) && (unsigned int )mbx->mb0 == 16384U) { status = 0U; } else { } if ((unsigned int )status == 0U && (unsigned int )mbx->mb0 == 16384U) { ql_dbg(33554432U, vha, 20549, "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", type, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )mbx->mb1); *data = 16384U; if ((unsigned int )sp->type == 1U) { fcport->port_type = 5; if ((int )mbx->mb1 & 1) { fcport->port_type = 4; } else if (((int )mbx->mb1 & 2) != 0) { fcport->flags = fcport->flags | 4U; } else { } } else { } goto logio_done; } else { } *data = mbx->mb0; switch ((int )*data) { case 16391: *(data + 1UL) = mbx->mb1; goto ldv_61065; case 16392: ; goto ldv_61065; default: *data = 16389U; goto ldv_61065; } ldv_61065: ql_log(1U, vha, 20550, "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )status, (int )mbx->mb0, (int )mbx->mb1, (int )mbx->mb2, (int )mbx->mb6, (int )mbx->mb7); logio_done: (*(sp->done))((void *)vha, (void *)sp, 0); return; } } static void qla2x00_ct_entry(scsi_qla_host_t *vha , struct req_que *req , sts_entry_t *pkt , int iocb_type ) { char func[8U] ; char const *type ; srb_t *sp ; struct fc_bsg_job *bsg_job ; uint16_t comp_status ; int res ; { func[0] = 'C'; func[1] = 'T'; func[2] = '_'; func[3] = 'I'; func[4] = 'O'; func[5] = 'C'; func[6] = 'B'; func[7] = '\000'; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)pkt); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } bsg_job = sp->u.bsg_job; type = "ct pass-through"; comp_status = pkt->comp_status; (bsg_job->reply)->reply_data.ctels_reply.status = 0U; bsg_job->reply_len = 16U; if ((unsigned int )comp_status != 0U) { if ((unsigned int )comp_status == 21U) { res = 0; (bsg_job->reply)->reply_payload_rcv_len = (uint32_t )pkt->rsp_info_len; ql_log(1U, vha, 20552, "CT pass-through-%s error comp_status-status=0x%x total_byte = 0x%x.\n", type, (int )comp_status, (bsg_job->reply)->reply_payload_rcv_len); } else { ql_log(1U, vha, 20553, "CT pass-through-%s error comp_status-status=0x%x.\n", type, (int )comp_status); res = 458752; (bsg_job->reply)->reply_payload_rcv_len = 0U; } ql_dump_buffer(33685504U, vha, 20533, (uint8_t *)pkt, 64U); } else { res = 0; (bsg_job->reply)->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; bsg_job->reply_len = 0U; } (*(sp->done))((void *)vha, (void *)sp, res); return; } } static void qla24xx_els_ct_entry(scsi_qla_host_t *vha , struct req_que *req , struct sts_entry_24xx *pkt , int iocb_type ) { char func[12U] ; char const *type ; srb_t *sp ; struct fc_bsg_job *bsg_job ; uint16_t comp_status ; uint32_t fw_status[3U] ; uint8_t *fw_sts_ptr ; int res ; size_t __len ; void *__ret ; size_t __len___0 ; void *__ret___0 ; { func[0] = 'E'; func[1] = 'L'; func[2] = 'S'; func[3] = '_'; func[4] = 'C'; func[5] = 'T'; func[6] = '_'; func[7] = 'I'; func[8] = 'O'; func[9] = 'C'; func[10] = 'B'; func[11] = '\000'; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)pkt); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } bsg_job = sp->u.bsg_job; type = (char const *)0; switch ((int )sp->type) { case 3: ; case 4: type = "els"; goto ldv_61096; case 5: type = "ct pass-through"; goto ldv_61096; default: ql_dbg(8388608U, vha, 20542, "Unrecognized SRB: (%p) type=%d.\n", sp, (int )sp->type); return; } ldv_61096: fw_status[0] = (uint32_t )pkt->comp_status; comp_status = (uint16_t )fw_status[0]; fw_status[1] = (uint32_t )((unsigned short )((struct els_sts_entry_24xx *)pkt)->error_subcode_1); fw_status[2] = (uint32_t )((unsigned short )((struct els_sts_entry_24xx *)pkt)->error_subcode_2); (bsg_job->reply)->reply_data.ctels_reply.status = 0U; bsg_job->reply_len = 28U; if ((unsigned int )comp_status != 0U) { if ((unsigned int )comp_status == 21U) { res = 0; (bsg_job->reply)->reply_payload_rcv_len = (uint32_t )((unsigned short )((struct els_sts_entry_24xx *)pkt)->total_byte_count); ql_dbg(8388608U, vha, 20543, "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", type, sp->handle, (int )comp_status, fw_status[1], fw_status[2], (int )((unsigned short )((struct els_sts_entry_24xx *)pkt)->total_byte_count)); fw_sts_ptr = (uint8_t *)(bsg_job->req)->sense + 16UL; __len = 12UL; if (__len > 63UL) { __ret = __memcpy((void *)fw_sts_ptr, (void const *)(& fw_status), __len); } else { __ret = __builtin_memcpy((void *)fw_sts_ptr, (void const *)(& fw_status), __len); } } else { ql_dbg(8388608U, vha, 20544, "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x error subcode 1=0x%x error subcode 2=0x%x.\n", type, sp->handle, (int )comp_status, (int )((unsigned short )((struct els_sts_entry_24xx *)pkt)->error_subcode_1), (int )((unsigned short )((struct els_sts_entry_24xx *)pkt)->error_subcode_2)); res = 458752; (bsg_job->reply)->reply_payload_rcv_len = 0U; fw_sts_ptr = (uint8_t *)(bsg_job->req)->sense + 16UL; __len___0 = 12UL; if (__len___0 > 63UL) { __ret___0 = __memcpy((void *)fw_sts_ptr, (void const *)(& fw_status), __len___0); } else { __ret___0 = __builtin_memcpy((void *)fw_sts_ptr, (void const *)(& fw_status), __len___0); } } ql_dump_buffer(8519680U, vha, 20566, (uint8_t *)pkt, 64U); } else { res = 0; (bsg_job->reply)->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; bsg_job->reply_len = 0U; } (*(sp->done))((void *)vha, (void *)sp, res); return; } } static void qla24xx_logio_entry(scsi_qla_host_t *vha , struct req_que *req , struct logio_entry_24xx *logio ) { char func[11U] ; char const *type ; fc_port_t *fcport ; srb_t *sp ; struct srb_iocb *lio ; uint16_t *data ; uint32_t iop[2U] ; { func[0] = 'L'; func[1] = 'O'; func[2] = 'G'; func[3] = 'I'; func[4] = 'O'; func[5] = '-'; func[6] = 'I'; func[7] = 'O'; func[8] = 'C'; func[9] = 'B'; func[10] = '\000'; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)logio); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } lio = & sp->u.iocb_cmd; type = (char const *)sp->name; fcport = sp->fcport; data = (uint16_t *)(& lio->u.logio.data); *data = 16389U; *(data + 1UL) = (unsigned int )lio->u.logio.flags & 1U; if ((unsigned int )logio->entry_status != 0U) { ql_log(1U, fcport->vha, 20532, "Async-%s error entry - hdl=%xportid=%02x%02x%02x entry-status=%x.\n", type, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )logio->entry_status); ql_dump_buffer(33685504U, vha, 20557, (uint8_t *)logio, 64U); goto logio_done; } else { } if ((unsigned int )logio->comp_status == 0U) { ql_dbg(33554432U, fcport->vha, 20534, "Async-%s complete - hdl=%x portid=%02x%02x%02x iop0=%x.\n", type, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, logio->io_parameter[0]); *data = 16384U; if ((unsigned int )sp->type != 1U) { goto logio_done; } else { } iop[0] = logio->io_parameter[0]; if ((iop[0] & 16U) != 0U) { fcport->port_type = 5; if ((iop[0] & 256U) != 0U) { fcport->flags = fcport->flags | 4U; } else { } } else if ((iop[0] & 32U) != 0U) { fcport->port_type = 4; } else { } if ((iop[0] & 128U) != 0U) { fcport->flags = fcport->flags | 16U; } else { } if (logio->io_parameter[7] != 0U || logio->io_parameter[8] != 0U) { fcport->supported_classes = fcport->supported_classes | 4U; } else { } if (logio->io_parameter[9] != 0U || logio->io_parameter[10] != 0U) { fcport->supported_classes = fcport->supported_classes | 8U; } else { } goto logio_done; } else { } iop[0] = logio->io_parameter[0]; iop[1] = logio->io_parameter[1]; switch (iop[0]) { case 26U: *data = 16391U; *(data + 1UL) = (unsigned short )iop[1]; goto ldv_61119; case 27U: *data = 16392U; goto ldv_61119; default: *data = 16389U; goto ldv_61119; } ldv_61119: ql_dbg(33554432U, fcport->vha, 20535, "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x iop0=%x iop1=%x.\n", type, sp->handle, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )logio->comp_status, logio->io_parameter[0], logio->io_parameter[1]); logio_done: (*(sp->done))((void *)vha, (void *)sp, 0); return; } } static void qla24xx_tm_iocb_entry(scsi_qla_host_t *vha , struct req_que *req , struct tsk_mgmt_entry *tsk ) { char func[9U] ; char const *type ; fc_port_t *fcport ; srb_t *sp ; struct srb_iocb *iocb ; struct sts_entry_24xx *sts ; int error ; { func[0] = 'T'; func[1] = 'M'; func[2] = 'F'; func[3] = '-'; func[4] = 'I'; func[5] = 'O'; func[6] = 'C'; func[7] = 'B'; func[8] = '\000'; sts = (struct sts_entry_24xx *)tsk; error = 1; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)tsk); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } iocb = & sp->u.iocb_cmd; type = (char const *)sp->name; fcport = sp->fcport; if ((unsigned int )sts->entry_status != 0U) { ql_log(1U, fcport->vha, 20536, "Async-%s error - hdl=%x entry-status(%x).\n", type, sp->handle, (int )sts->entry_status); } else if ((unsigned int )sts->comp_status != 0U) { ql_log(1U, fcport->vha, 20537, "Async-%s error - hdl=%x completion status(%x).\n", type, sp->handle, (int )sts->comp_status); } else if (((int )sts->scsi_status & 256) == 0) { ql_log(1U, fcport->vha, 20538, "Async-%s error - hdl=%x no response info(%x).\n", type, sp->handle, (int )sts->scsi_status); } else if (sts->rsp_data_len <= 3U) { ql_log(1U, fcport->vha, 20539, "Async-%s error - hdl=%x not enough response(%d).\n", type, sp->handle, sts->rsp_data_len); } else if ((unsigned int )sts->data[3] != 0U) { ql_log(1U, fcport->vha, 20540, "Async-%s error - hdl=%x response(%x).\n", type, sp->handle, (int )sts->data[3]); } else { error = 0; } if (error != 0) { iocb->u.tmf.data = (uint32_t )error; ql_dump_buffer(33685504U, vha, 20565, (uint8_t *)sts, 64U); } else { } (*(sp->done))((void *)vha, (void *)sp, 0); return; } } void qla2x00_process_response_queue(struct rsp_que *rsp ) { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; sts_entry_t *pkt ; uint16_t handle_cnt ; uint16_t cnt ; void *tmp ; { ha = rsp->hw; reg = & (ha->iobase)->isp; tmp = pci_get_drvdata(ha->pdev); vha = (struct scsi_qla_host *)tmp; if (*((unsigned long *)vha + 19UL) == 0UL) { return; } else { } goto ldv_61143; ldv_61158: pkt = (sts_entry_t *)rsp->ring_ptr; rsp->ring_index = (uint16_t )((int )rsp->ring_index + 1); if ((int )rsp->ring_index == (int )rsp->length) { rsp->ring_index = 0U; rsp->ring_ptr = rsp->ring; } else { rsp->ring_ptr = rsp->ring_ptr + 1; } if ((unsigned int )pkt->entry_status != 0U) { qla2x00_error_entry(vha, rsp, pkt); ((response_t *)pkt)->signature = 3735936685U; __asm__ volatile ("sfence": : : "memory"); goto ldv_61143; } else { } switch ((int )pkt->entry_type) { case 3: qla2x00_status_entry(vha, rsp, (void *)pkt); goto ldv_61145; case 33: handle_cnt = (uint16_t )((sts21_entry_t *)pkt)->handle_count; cnt = 0U; goto ldv_61148; ldv_61147: qla2x00_process_completed_request(vha, rsp->req, ((sts21_entry_t *)pkt)->handle[(int )cnt]); cnt = (uint16_t )((int )cnt + 1); ldv_61148: ; if ((int )cnt < (int )handle_cnt) { goto ldv_61147; } else { } goto ldv_61145; case 34: handle_cnt = (uint16_t )((sts22_entry_t *)pkt)->handle_count; cnt = 0U; goto ldv_61152; ldv_61151: qla2x00_process_completed_request(vha, rsp->req, (uint32_t )((sts22_entry_t *)pkt)->handle[(int )cnt]); cnt = (uint16_t )((int )cnt + 1); ldv_61152: ; if ((int )cnt < (int )handle_cnt) { goto ldv_61151; } else { } goto ldv_61145; case 16: qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); goto ldv_61145; case 57: qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); goto ldv_61145; case 41: qla2x00_ct_entry(vha, rsp->req, pkt, 41); goto ldv_61145; default: ql_log(1U, vha, 20554, "Received unknown response pkt type %x entry status=%x.\n", (int )pkt->entry_type, (int )pkt->entry_status); goto ldv_61145; } ldv_61145: ((response_t *)pkt)->signature = 3735936685U; __asm__ volatile ("sfence": : : "memory"); ldv_61143: ; if ((rsp->ring_ptr)->signature != 3735936685U) { goto ldv_61158; } else { } writew((int )rsp->ring_index, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox5 : & reg->u.isp2300.rsp_q_out)); return; } } __inline static void qla2x00_handle_sense(srb_t *sp , uint8_t *sense_data , uint32_t par_sense_len , uint32_t sense_len , struct rsp_que *rsp , int res ) { struct scsi_qla_host *vha ; struct scsi_cmnd *cp ; uint32_t track_sense_len ; size_t __len ; void *__ret ; { vha = (sp->fcport)->vha; cp = sp->u.scmd.cmd; if (sense_len > 95U) { sense_len = 96U; } else { } sp->u.scmd.request_sense_length = sense_len; sp->u.scmd.request_sense_ptr = cp->sense_buffer; track_sense_len = sense_len; if (sense_len > par_sense_len) { sense_len = par_sense_len; } else { } __len = (size_t )sense_len; __ret = __builtin_memcpy((void *)cp->sense_buffer, (void const *)sense_data, __len); sp->u.scmd.request_sense_ptr = cp->sense_buffer + (unsigned long )sense_len; track_sense_len = track_sense_len - sense_len; sp->u.scmd.request_sense_length = track_sense_len; if (track_sense_len != 0U) { rsp->status_srb = sp; cp->result = res; } else { } if (sense_len != 0U) { ql_dbg(134348800U, vha, 12316, "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", ((sp->fcport)->vha)->host_no, (cp->device)->id, (cp->device)->lun, cp); ql_dump_buffer(134348800U, vha, 12331, cp->sense_buffer, sense_len); } else { } return; } } __inline static int qla2x00_handle_dif_error(srb_t *sp , struct sts_entry_24xx *sts24 ) { struct scsi_qla_host *vha ; struct scsi_cmnd *cmd ; uint8_t *ap ; uint8_t *ep ; uint32_t e_ref_tag ; uint32_t a_ref_tag ; uint16_t e_app_tag ; uint16_t a_app_tag ; uint16_t e_guard ; uint16_t a_guard ; sector_t tmp ; uint32_t blocks_done ; uint32_t resid ; sector_t lba_s ; sector_t tmp___0 ; unsigned int tmp___1 ; uint32_t i ; uint32_t j ; uint32_t k ; uint32_t num_ent ; struct scatterlist *sg ; struct sd_dif_tuple *spt ; unsigned int tmp___2 ; struct page *tmp___3 ; void *tmp___4 ; unsigned char tmp___5 ; unsigned int tmp___6 ; unsigned char tmp___7 ; { vha = (sp->fcport)->vha; cmd = sp->u.scmd.cmd; ap = (uint8_t *)(& sts24->data) + 12UL; ep = (uint8_t *)(& sts24->data) + 20UL; a_guard = *((uint16_t *)ap + 2U); a_app_tag = *((uint16_t *)ap); a_ref_tag = *((uint32_t *)ap + 4U); e_guard = *((uint16_t *)ep + 2U); e_app_tag = *((uint16_t *)ep); e_ref_tag = *((uint32_t *)ep + 4U); ql_dbg(134217728U, vha, 12323, "iocb(s) %p Returned STATUS.\n", sts24); tmp = scsi_get_lba(cmd); ql_dbg(134217728U, vha, 12324, "DIF OLD_ERROR in cmd 0x%x lba 0x%llx act ref tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", (int )*(cmd->cmnd), (unsigned long long )tmp, a_ref_tag, e_ref_tag, (int )a_app_tag, (int )e_app_tag, (int )a_guard, (int )e_guard); if ((unsigned int )a_app_tag == 65535U) { tmp___7 = scsi_get_prot_type(cmd); if ((unsigned int )tmp___7 != 3U || a_ref_tag == 4294967295U) { tmp___0 = scsi_get_lba(cmd); lba_s = tmp___0; blocks_done = (e_ref_tag - (uint32_t )lba_s) + 1U; tmp___1 = scsi_bufflen(cmd); resid = tmp___1 - (cmd->device)->sector_size * blocks_done; scsi_set_resid(cmd, (int )resid); cmd->result = 0; tmp___6 = scsi_prot_sg_count(cmd); if (tmp___6 != 0U) { j = 0U; k = 0U; i = 0U; sg = scsi_prot_sglist(cmd); goto ldv_61204; ldv_61203: num_ent = sg->dma_length / 8U; if (k + num_ent < blocks_done) { k = k + num_ent; goto ldv_61201; } else { } j = (blocks_done - k) - 1U; k = blocks_done; goto ldv_61202; ldv_61201: i = i + 1U; sg = sg_next(sg); ldv_61204: tmp___2 = scsi_prot_sg_count(cmd); if (tmp___2 > i) { goto ldv_61203; } else { } ldv_61202: ; if (k != blocks_done) { ql_log(1U, vha, 12335, "unexpected tag values tag:lba=%x:%llx)\n", e_ref_tag, (unsigned long long )lba_s); return (1); } else { } tmp___3 = sg_page(sg); tmp___4 = lowmem_page_address((struct page const *)tmp___3); spt = (struct sd_dif_tuple *)tmp___4 + (unsigned long )sg->offset; spt = spt + (unsigned long )j; spt->app_tag = 65535U; tmp___5 = scsi_get_prot_type(cmd); if ((unsigned int )tmp___5 == 3U) { spt->ref_tag = 4294967295U; } else { } } else { } return (0); } else { } } else { } if ((int )e_guard != (int )a_guard) { scsi_build_sense_buffer(1, cmd->sense_buffer, 5, 16, 1); set_driver_byte(cmd, 8); set_host_byte(cmd, 5); cmd->result = cmd->result | 4; return (1); } else { } if (e_ref_tag != a_ref_tag) { scsi_build_sense_buffer(1, cmd->sense_buffer, 5, 16, 3); set_driver_byte(cmd, 8); set_host_byte(cmd, 5); cmd->result = cmd->result | 4; return (1); } else { } if ((int )e_app_tag != (int )a_app_tag) { scsi_build_sense_buffer(1, cmd->sense_buffer, 5, 16, 2); set_driver_byte(cmd, 8); set_host_byte(cmd, 5); cmd->result = cmd->result | 4; return (1); } else { } return (1); } } static void qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha , void *pkt , struct req_que *req , uint32_t index ) { struct qla_hw_data *ha ; srb_t *sp ; uint16_t comp_status ; uint16_t scsi_status ; uint16_t thread_id ; uint32_t rval ; struct fc_bsg_job *bsg_job ; sts_entry_t *sts ; struct sts_entry_24xx *sts24 ; { ha = vha->hw; rval = 0U; bsg_job = (struct fc_bsg_job *)0; sts = (sts_entry_t *)pkt; sts24 = (struct sts_entry_24xx *)pkt; if ((uint32_t )req->num_outstanding_cmds <= index) { ql_log(1U, vha, 28847, "Invalid SCSI completion handle 0x%x.\n", index); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); return; } else { } sp = *(req->outstanding_cmds + (unsigned long )index); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { *(req->outstanding_cmds + (unsigned long )index) = (srb_t *)0; bsg_job = sp->u.bsg_job; } else { ql_log(1U, vha, 28848, "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", (int )req->id, index); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); return; } if ((ha->device_type & 134217728U) != 0U) { comp_status = sts24->comp_status; scsi_status = (unsigned int )sts24->scsi_status & 4095U; } else { comp_status = sts->comp_status; scsi_status = (unsigned int )sts->scsi_status & 4095U; } thread_id = (uint16_t )(bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; switch ((int )comp_status) { case 0: ; if ((unsigned int )scsi_status == 0U) { (bsg_job->reply)->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; vha->qla_stats.input_bytes = vha->qla_stats.input_bytes + (uint64_t )(bsg_job->reply)->reply_payload_rcv_len; vha->qla_stats.input_requests = vha->qla_stats.input_requests + 1ULL; rval = 0U; } else { } goto done; case 7: ql_dbg(8388608U, vha, 28849, "Command completed with date overrun thread_id=%d\n", (int )thread_id); rval = 7U; goto ldv_61223; case 21: ql_dbg(8388608U, vha, 28850, "Command completed with date underrun thread_id=%d\n", (int )thread_id); rval = 8U; goto ldv_61223; case 1792: ql_dbg(8388608U, vha, 28851, "Command completed with read data overrun thread_id=%d\n", (int )thread_id); rval = 7U; goto ldv_61223; case 1799: ql_dbg(8388608U, vha, 28852, "Command completed with read and write data overrun thread_id=%d\n", (int )thread_id); rval = 7U; goto ldv_61223; case 1813: ql_dbg(8388608U, vha, 28853, "Command completed with read data over and write data underrun thread_id=%d\n", (int )thread_id); rval = 7U; goto ldv_61223; case 5376: ql_dbg(8388608U, vha, 28854, "Command completed with read data data underrun thread_id=%d\n", (int )thread_id); rval = 8U; goto ldv_61223; case 5383: ql_dbg(8388608U, vha, 28855, "Command completed with read data under and write data overrun thread_id=%d\n", (int )thread_id); rval = 8U; goto ldv_61223; case 5397: ql_dbg(8388608U, vha, 28856, "Command completed with read and write data underrun thread_id=%d\n", (int )thread_id); rval = 8U; goto ldv_61223; case 512: ql_dbg(8388608U, vha, 28857, "Command completed with data DMA error thread_id=%d\n", (int )thread_id); rval = 29U; goto ldv_61223; case 6: ql_dbg(8388608U, vha, 28858, "Command completed with timeout thread_id=%d\n", (int )thread_id); rval = 30U; goto ldv_61223; default: ql_dbg(8388608U, vha, 28859, "Command completed with completion status=0x%x thread_id=%d\n", (int )comp_status, (int )thread_id); rval = 1U; goto ldv_61223; } ldv_61223: (bsg_job->reply)->reply_payload_rcv_len = 0U; done: (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = rval; bsg_job->reply_len = 16U; (*(sp->done))((void *)vha, (void *)sp, 0); return; } } static void qla2x00_status_entry(scsi_qla_host_t *vha , struct rsp_que *rsp , void *pkt ) { srb_t *sp ; fc_port_t *fcport ; struct scsi_cmnd *cp ; sts_entry_t *sts ; struct sts_entry_24xx *sts24 ; uint16_t comp_status ; uint16_t scsi_status ; uint16_t ox_id ; uint8_t lscsi_status ; int32_t resid ; uint32_t sense_len ; uint32_t par_sense_len ; uint32_t rsp_info_len ; uint32_t resid_len ; uint32_t fw_resid_len ; uint8_t *rsp_info ; uint8_t *sense_data ; struct qla_hw_data *ha ; uint32_t handle ; uint16_t que ; struct req_que *req ; int logit ; int res ; uint16_t state_flags ; long tmp ; long tmp___0 ; unsigned int tmp___1 ; unsigned int tmp___2 ; unsigned int tmp___3 ; unsigned int tmp___4 ; unsigned int tmp___5 ; unsigned int tmp___6 ; int tmp___7 ; int tmp___8 ; unsigned int tmp___9 ; { ha = vha->hw; logit = 1; res = 0; state_flags = 0U; sts = (sts_entry_t *)pkt; sts24 = (struct sts_entry_24xx *)pkt; if ((ha->device_type & 134217728U) != 0U) { comp_status = sts24->comp_status; scsi_status = (unsigned int )sts24->scsi_status & 4095U; state_flags = sts24->state_flags; } else { comp_status = sts->comp_status; scsi_status = (unsigned int )sts->scsi_status & 4095U; } handle = (unsigned int )((unsigned short )sts->handle); que = (unsigned short )(sts->handle >> 16); req = *(ha->req_q_map + (unsigned long )que); if ((uint32_t )req->num_outstanding_cmds > handle) { sp = *(req->outstanding_cmds + (unsigned long )handle); } else { sp = (srb_t *)0; } if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_dbg(134217728U, vha, 12311, "Invalid status handle (0x%x).\n", sts->handle); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } qla2xxx_wake_dpc(vha); return; } else { } tmp = ldv__builtin_expect(((int )state_flags & 2) != 0, 0L); if (tmp != 0L) { tmp___0 = ldv__builtin_expect((unsigned int )sp->type == 9U, 0L); if (tmp___0 != 0L) { qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); return; } else { } } else { } if ((unsigned int )comp_status == 0U && (unsigned int )scsi_status == 0U) { qla2x00_do_host_ramp_up(vha); qla2x00_process_completed_request(vha, req, handle); return; } else { } *(req->outstanding_cmds + (unsigned long )handle) = (srb_t *)0; cp = sp->u.scmd.cmd; if ((unsigned long )cp == (unsigned long )((struct scsi_cmnd *)0)) { ql_dbg(134217728U, vha, 12312, "Command already returned (0x%x/%p).\n", sts->handle, sp); return; } else { } lscsi_status = (unsigned int )((uint8_t )scsi_status) & 254U; fcport = sp->fcport; ox_id = 0U; fw_resid_len = 0U; resid_len = fw_resid_len; rsp_info_len = resid_len; par_sense_len = rsp_info_len; sense_len = par_sense_len; if ((ha->device_type & 134217728U) != 0U) { if (((int )scsi_status & 512) != 0) { sense_len = sts24->sense_len; } else { } if (((int )scsi_status & 256) != 0) { rsp_info_len = sts24->rsp_data_len; } else { } if (((int )scsi_status & 3072) != 0) { resid_len = sts24->rsp_residual_count; } else { } if ((unsigned int )comp_status == 21U) { fw_resid_len = sts24->residual_len; } else { } rsp_info = (uint8_t *)(& sts24->data); sense_data = (uint8_t *)(& sts24->data); host_to_fcp_swap((uint8_t *)(& sts24->data), 28U); ox_id = sts24->ox_id; par_sense_len = 28U; } else { if (((int )scsi_status & 512) != 0) { sense_len = (uint32_t )sts->req_sense_length; } else { } if (((int )scsi_status & 256) != 0) { rsp_info_len = (uint32_t )sts->rsp_info_len; } else { } resid_len = sts->residual_length; rsp_info = (uint8_t *)(& sts->rsp_info); sense_data = (uint8_t *)(& sts->req_sense_data); par_sense_len = 32U; } if (((int )scsi_status & 256) != 0) { if ((ha->device_type & 134217728U) != 0U) { sense_data = sense_data + (unsigned long )rsp_info_len; par_sense_len = par_sense_len - rsp_info_len; } else { } if (rsp_info_len > 3U && (unsigned int )*(rsp_info + 3UL) != 0U) { ql_dbg(134217728U, fcport->vha, 12313, "FCP I/O protocol failure (0x%x/0x%x).\n", rsp_info_len, (int )*(rsp_info + 3UL)); res = 131072; goto out; } else { } } else { } if (((ha->device_type & 134217728U) != 0U && (unsigned int )comp_status == 0U) && ((int )scsi_status & 1024) != 0) { comp_status = 7U; } else { } switch ((int )comp_status) { case 0: ; case 28: ; if ((unsigned int )scsi_status == 0U) { res = 0; goto ldv_61266; } else { } if (((int )scsi_status & 3072) != 0) { resid = (int32_t )resid_len; scsi_set_resid(cp, resid); if ((unsigned int )lscsi_status == 0U) { tmp___2 = scsi_bufflen(cp); if (tmp___2 - (unsigned int )resid < cp->underflow) { tmp___1 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12314, "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", resid, tmp___1); res = 458752; goto ldv_61266; } else { } } else { } } else { } res = (int )lscsi_status; if ((unsigned int )lscsi_status == 40U) { ql_dbg(134217728U, fcport->vha, 12315, "QUEUE FULL detected.\n"); goto ldv_61266; } else { } logit = 0; if ((unsigned int )lscsi_status != 2U) { goto ldv_61266; } else { } memset((void *)cp->sense_buffer, 0, 96UL); if (((int )scsi_status & 512) == 0) { goto ldv_61266; } else { } qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); goto ldv_61266; case 21: resid = (int32_t )((ha->device_type & 134217728U) != 0U ? fw_resid_len : resid_len); scsi_set_resid(cp, resid); if (((int )scsi_status & 2048) != 0) { if ((ha->device_type & 134217728U) != 0U && fw_resid_len != resid_len) { tmp___3 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12317, "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, tmp___3); res = (int )lscsi_status | 458752; goto check_scsi_status; } else { } if ((unsigned int )lscsi_status == 0U) { tmp___5 = scsi_bufflen(cp); if (tmp___5 - (unsigned int )resid < cp->underflow) { tmp___4 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12318, "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", resid, tmp___4); res = 458752; goto ldv_61266; } else { } } else { } } else if ((unsigned int )lscsi_status != 40U && (unsigned int )lscsi_status != 8U) { tmp___6 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12319, "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, tmp___6); res = (int )lscsi_status | 458752; goto check_scsi_status; } else { ql_dbg(134217728U, fcport->vha, 12336, "scsi_status: 0x%x, lscsi_status: 0x%x\n", (int )scsi_status, (int )lscsi_status); } res = (int )lscsi_status; logit = 0; check_scsi_status: ; if ((unsigned int )lscsi_status != 0U) { if ((unsigned int )lscsi_status == 40U) { ql_dbg(134217728U, fcport->vha, 12320, "QUEUE FULL detected.\n"); logit = 1; goto ldv_61266; } else { } if ((unsigned int )lscsi_status != 2U) { goto ldv_61266; } else { } memset((void *)cp->sense_buffer, 0, 96UL); if (((int )scsi_status & 512) == 0) { goto ldv_61266; } else { } qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); } else { } goto ldv_61266; case 41: ; case 42: ; case 43: ; case 1: ; case 40: ; case 6: ; case 4: res = 917504; if ((unsigned int )comp_status == 6U) { if ((ha->device_type & 134217728U) != 0U) { goto ldv_61266; } else if (((int )sts->status_flags & 8192) == 0) { goto ldv_61266; } else { } } else { } tmp___7 = atomic_read((atomic_t const *)(& fcport->state)); ql_dbg(134217728U, fcport->vha, 12321, "Port to be marked lost on fcport=%02x%02x%02x, current port state= %s.\n", (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, port_state_str___1[tmp___7]); tmp___8 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___8 == 4) { qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); } else { } goto ldv_61266; case 5: res = 524288; goto ldv_61266; case 12: logit = qla2x00_handle_dif_error(sp, sts24); res = cp->result; goto ldv_61266; case 3: res = 458752; if (((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) || ((((int )ha->fw_attributes_h << 16) | (int )ha->fw_attributes) & 4194304) == 0) { goto ldv_61266; } else { } if (((int )state_flags & 16) != 0) { if ((unsigned long )(cp->request)->rq_disk != (unsigned long )((struct gendisk *)0)) { dev_printk("\f", (struct device const *)(& (cp->device)->sdev_gendev), "[%s] Unsupported device \'%s\' found.\n", (char *)(& ((cp->request)->rq_disk)->disk_name), (cp->device)->vendor); } else { dev_printk("\f", (struct device const *)(& (cp->device)->sdev_gendev), "Unsupported device \'%s\' found.\n", (cp->device)->vendor); } } else { } goto ldv_61266; default: res = 458752; goto ldv_61266; } ldv_61266: ; out: ; if (logit != 0) { tmp___9 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12322, "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", (int )comp_status, (int )scsi_status, res, vha->host_no, (cp->device)->id, (cp->device)->lun, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa, (int )ox_id, cp->cmnd, tmp___9, rsp_info_len, resid_len, fw_resid_len); } else { } if (res == 0) { qla2x00_do_host_ramp_up(vha); } else { } if ((unsigned long )rsp->status_srb == (unsigned long )((srb_t *)0)) { (*(sp->done))((void *)ha, (void *)sp, res); } else { } return; } } static void qla2x00_status_cont_entry(struct rsp_que *rsp , sts_cont_entry_t *pkt ) { uint8_t sense_sz ; struct qla_hw_data *ha ; struct scsi_qla_host *vha ; void *tmp ; srb_t *sp ; struct scsi_cmnd *cp ; uint32_t sense_len ; uint8_t *sense_ptr ; size_t __len ; void *__ret ; { sense_sz = 0U; ha = rsp->hw; tmp = pci_get_drvdata(ha->pdev); vha = (struct scsi_qla_host *)tmp; sp = rsp->status_srb; if ((unsigned long )sp == (unsigned long )((srb_t *)0) || sp->u.scmd.request_sense_length == 0U) { return; } else { } sense_len = sp->u.scmd.request_sense_length; sense_ptr = sp->u.scmd.request_sense_ptr; cp = sp->u.scmd.cmd; if ((unsigned long )cp == (unsigned long )((struct scsi_cmnd *)0)) { ql_log(1U, vha, 12325, "cmd is NULL: already returned to OS (sp=%p).\n", sp); rsp->status_srb = (srb_t *)0; return; } else { } if (sense_len > 60U) { sense_sz = 60U; } else { sense_sz = (uint8_t )sense_len; } if ((ha->device_type & 134217728U) != 0U) { host_to_fcp_swap((uint8_t *)(& pkt->data), 60U); } else { } __len = (size_t )sense_sz; __ret = __builtin_memcpy((void *)sense_ptr, (void const *)(& pkt->data), __len); ql_dump_buffer(134348800U, vha, 12332, sense_ptr, (uint32_t )sense_sz); sense_len = sense_len - (uint32_t )sense_sz; sense_ptr = sense_ptr + (unsigned long )sense_sz; sp->u.scmd.request_sense_ptr = sense_ptr; sp->u.scmd.request_sense_length = sense_len; if (sense_len == 0U) { rsp->status_srb = (srb_t *)0; (*(sp->done))((void *)ha, (void *)sp, cp->result); } else { } return; } } static void qla2x00_error_entry(scsi_qla_host_t *vha , struct rsp_que *rsp , sts_entry_t *pkt ) { srb_t *sp ; struct qla_hw_data *ha ; char func[11U] ; uint16_t que ; struct req_que *req ; int res ; { ha = vha->hw; func[0] = 'E'; func[1] = 'R'; func[2] = 'R'; func[3] = 'O'; func[4] = 'R'; func[5] = '-'; func[6] = 'I'; func[7] = 'O'; func[8] = 'C'; func[9] = 'B'; func[10] = '\000'; que = (unsigned short )(pkt->handle >> 16); req = (struct req_que *)0; res = 458752; ql_dbg(33554432U, vha, 20522, "type of error status in response: 0x%x\n", (int )pkt->entry_status); if ((int )((unsigned short )ha->max_req_queues) <= (int )que || (unsigned long )*(ha->req_q_map + (unsigned long )que) == (unsigned long )((struct req_que *)0)) { goto fatal; } else { } req = *(ha->req_q_map + (unsigned long )que); if (((int )pkt->entry_status & 2) != 0) { res = 131072; } else { } sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)pkt); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { (*(sp->done))((void *)ha, (void *)sp, res); return; } else { } fatal: ql_log(1U, vha, 20528, "Error entry - invalid handle/queue.\n"); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } qla2xxx_wake_dpc(vha); return; } } static void qla24xx_mbx_completion(scsi_qla_host_t *vha , uint16_t mb0 ) { uint16_t cnt ; uint32_t mboxes ; uint16_t *wptr ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; { ha = vha->hw; reg = & (ha->iobase)->isp24; mboxes = (uint32_t )((1 << (int )ha->mbx_count) + -1); if ((unsigned long )ha->mcp == (unsigned long )((mbx_cmd_t *)0)) { ql_dbg(33554432U, vha, 20558, "MBX pointer OLD_ERROR.\n"); } else { mboxes = (ha->mcp)->in_mb; } ha->flags.mbox_int = 1U; ha->mailbox_out[0] = mb0; mboxes = mboxes >> 1; wptr = & reg->mailbox1; cnt = 1U; goto ldv_61316; ldv_61315: ; if ((int )mboxes & 1) { ha->mailbox_out[(int )cnt] = readw((void const volatile *)wptr); } else { } mboxes = mboxes >> 1; wptr = wptr + 1; cnt = (uint16_t )((int )cnt + 1); ldv_61316: ; if ((int )((unsigned short )ha->mbx_count) > (int )cnt) { goto ldv_61315; } else { } return; } } void qla24xx_process_response_queue(struct scsi_qla_host *vha , struct rsp_que *rsp ) { struct sts_entry_24xx *pkt ; struct qla_hw_data *ha ; struct device_reg_82xx *reg ; { ha = vha->hw; if (*((unsigned long *)vha + 19UL) == 0UL) { return; } else { } goto ldv_61324; ldv_61339: pkt = (struct sts_entry_24xx *)rsp->ring_ptr; rsp->ring_index = (uint16_t )((int )rsp->ring_index + 1); if ((int )rsp->ring_index == (int )rsp->length) { rsp->ring_index = 0U; rsp->ring_ptr = rsp->ring; } else { rsp->ring_ptr = rsp->ring_ptr + 1; } if ((unsigned int )pkt->entry_status != 0U) { qla2x00_error_entry(vha, rsp, (sts_entry_t *)pkt); qlt_24xx_process_response_error(vha, pkt); ((response_t *)pkt)->signature = 3735936685U; __asm__ volatile ("sfence": : : "memory"); goto ldv_61324; } else { } switch ((int )pkt->entry_type) { case 3: qla2x00_status_entry(vha, rsp, (void *)pkt); goto ldv_61326; case 16: qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); goto ldv_61326; case 50: qla24xx_report_id_acquisition(vha, (struct vp_rpt_id_entry_24xx *)pkt); goto ldv_61326; case 82: qla24xx_logio_entry(vha, rsp->req, (struct logio_entry_24xx *)pkt); goto ldv_61326; case 20: qla24xx_tm_iocb_entry(vha, rsp->req, (struct tsk_mgmt_entry *)pkt); goto ldv_61326; case 41: qla24xx_els_ct_entry(vha, rsp->req, pkt, 41); goto ldv_61326; case 83: qla24xx_els_ct_entry(vha, rsp->req, pkt, 83); goto ldv_61326; case 84: qlt_24xx_process_atio_queue(vha); case 85: ; case 18: ; case 14: qlt_response_pkt_all_vps(vha, (response_t *)pkt); goto ldv_61326; case 4: ; goto ldv_61326; default: ql_dbg(33554432U, vha, 20546, "Received unknown response pkt type %x entry status=%x.\n", (int )pkt->entry_type, (int )pkt->entry_status); goto ldv_61326; } ldv_61326: ((response_t *)pkt)->signature = 3735936685U; __asm__ volatile ("sfence": : : "memory"); ldv_61324: ; if ((rsp->ring_ptr)->signature != 3735936685U) { goto ldv_61339; } else { } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { reg = & (ha->iobase)->isp82; writel((unsigned int )rsp->ring_index, (void volatile *)(& reg->rsp_q_out)); } else { writel((unsigned int )rsp->ring_index, (void volatile *)rsp->rsp_q_out); } return; } } static void qla2xxx_check_risc_status(scsi_qla_host_t *vha ) { int rval ; uint32_t cnt ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; if (((ha->device_type & 2048U) == 0U && (ha->device_type & 8192U) == 0U) && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) { return; } else { } rval = 0; writel(31744U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(1U, (void volatile *)(& reg->iobase_window)); cnt = 10000U; goto ldv_61350; ldv_61349: ; if (cnt != 0U) { writel(1U, (void volatile *)(& reg->iobase_window)); __const_udelay(42950UL); } else { rval = 256; } cnt = cnt - 1U; ldv_61350: tmp = readl((void const volatile *)(& reg->iobase_window)); if ((tmp & 1U) == 0U && rval == 0) { goto ldv_61349; } else { } if (rval == 0) { goto next_test; } else { } rval = 0; writel(3U, (void volatile *)(& reg->iobase_window)); cnt = 100U; goto ldv_61354; ldv_61353: ; if (cnt != 0U) { writel(3U, (void volatile *)(& reg->iobase_window)); __const_udelay(42950UL); } else { rval = 256; } cnt = cnt - 1U; ldv_61354: tmp___0 = readl((void const volatile *)(& reg->iobase_window)); if ((tmp___0 & 1U) == 0U && rval == 0) { goto ldv_61353; } else { } if (rval != 0) { goto done; } else { } next_test: tmp___1 = readl((void const volatile *)(& reg->iobase_c8)); if ((tmp___1 & 8U) != 0U) { ql_log(2U, vha, 20556, "Additional code -- 0x55AA.\n"); } else { } done: writel(0U, (void volatile *)(& reg->iobase_window)); readl((void const volatile *)(& reg->iobase_window)); return; } } irqreturn_t qla24xx_intr_handler(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; int status ; unsigned long iter ; uint32_t stat ; uint32_t hccr ; uint16_t mb[8U] ; struct rsp_que *rsp ; unsigned long flags ; int tmp ; long tmp___0 ; raw_spinlock_t *tmp___1 ; void *tmp___2 ; int tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; int tmp___7 ; long tmp___8 ; unsigned long tmp___9 ; { rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20569, "%s: NULL response queue pointer.\n", "qla24xx_intr_handler"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp24; status = 0; tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (1); } else { } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); tmp___2 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___2; iter = 50UL; goto ldv_61388; ldv_61387: stat = readl((void const volatile *)(& reg->host_status)); if ((stat & 256U) != 0U) { tmp___3 = pci_channel_offline(ha->pdev); tmp___4 = ldv__builtin_expect(tmp___3 != 0, 0L); if (tmp___4 != 0L) { goto ldv_61375; } else { } hccr = readl((void const volatile *)(& reg->hccr)); ql_log(1U, vha, 20555, "RISC paused -- HCCR=%x, Dumping firmware.\n", hccr); qla2xxx_check_risc_status(vha); (*((ha->isp_ops)->fw_dump))(vha, 1); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_61375; } else if ((stat & 32768U) == 0U) { goto ldv_61375; } else { } switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla24xx_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; goto ldv_61380; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((void const volatile *)(& reg->mailbox1)); mb[2] = readw((void const volatile *)(& reg->mailbox2)); mb[3] = readw((void const volatile *)(& reg->mailbox3)); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_61380; case 19U: ; case 20U: qla24xx_process_response_queue(vha, rsp); goto ldv_61380; case 28U: qlt_24xx_process_atio_queue(vha); goto ldv_61380; case 29U: qlt_24xx_process_atio_queue(vha); qla24xx_process_response_queue(vha, rsp); goto ldv_61380; default: ql_dbg(33554432U, vha, 20559, "Unrecognized interrupt type (%d).\n", stat * 255U); goto ldv_61380; } ldv_61380: writel(2684354560U, (void volatile *)(& reg->hccr)); __readl((void const volatile *)(& reg->hccr)); tmp___5 = ldv__builtin_expect((ha->device_type & 32768U) != 0U, 0L); if (tmp___5 != 0L) { tmp___7 = 1; } else { tmp___6 = ldv__builtin_expect((ha->device_type & 65536U) != 0U, 0L); if (tmp___6 != 0L) { tmp___7 = 1; } else { tmp___7 = 0; } } if (tmp___7 != 0) { tmp___8 = ldv__builtin_expect((unsigned int )(ha->pdev)->revision == 1U, 0L); if (tmp___8 != 0L) { __const_udelay(17500UL); } else { } } else { } ldv_61388: tmp___9 = iter; iter = iter - 1UL; if (tmp___9 != 0UL) { goto ldv_61387; } else { } ldv_61375: qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } static irqreturn_t qla24xx_msix_rsp_q(int irq , void *dev_id ) { struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_24xx *reg ; struct scsi_qla_host *vha ; unsigned long flags ; raw_spinlock_t *tmp ; void *tmp___0 ; { rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20570, "%s: NULL response queue pointer.\n", "qla24xx_msix_rsp_q"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp24; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (struct scsi_qla_host *)tmp___0; qla24xx_process_response_queue(vha, rsp); if (*((unsigned long *)ha + 2UL) == 0UL) { writel(2684354560U, (void volatile *)(& reg->hccr)); __readl((void const volatile *)(& reg->hccr)); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } static irqreturn_t qla25xx_msix_rsp_q(int irq , void *dev_id ) { struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_24xx *reg ; unsigned long flags ; raw_spinlock_t *tmp ; { rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20571, "%s: NULL response queue pointer.\n", "qla25xx_msix_rsp_q"); return (0); } else { } ha = rsp->hw; if (*((unsigned long *)ha + 2UL) == 0UL) { reg = & (ha->iobase)->isp24; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writel(2684354560U, (void volatile *)(& reg->hccr)); __readl((void const volatile *)(& reg->hccr)); spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } queue_work_on((int )rsp->id + -1, ha->wq, & rsp->q_work); return (1); } } static irqreturn_t qla24xx_msix_default(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_24xx *reg ; int status ; uint32_t stat ; uint32_t hccr ; uint16_t mb[8U] ; unsigned long flags ; raw_spinlock_t *tmp ; void *tmp___0 ; int tmp___1 ; long tmp___2 ; { rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20572, "%s: NULL response queue pointer.\n", "qla24xx_msix_default"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp24; status = 0; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; stat = readl((void const volatile *)(& reg->host_status)); if ((stat & 256U) != 0U) { tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { goto ldv_61431; } else { } hccr = readl((void const volatile *)(& reg->hccr)); ql_log(2U, vha, 20560, "RISC paused -- HCCR=%x, Dumping firmware.\n", hccr); qla2xxx_check_risc_status(vha); (*((ha->isp_ops)->fw_dump))(vha, 1); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_61431; } else if ((stat & 32768U) == 0U) { goto ldv_61431; } else { } switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla24xx_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; goto ldv_61436; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((void const volatile *)(& reg->mailbox1)); mb[2] = readw((void const volatile *)(& reg->mailbox2)); mb[3] = readw((void const volatile *)(& reg->mailbox3)); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_61436; case 19U: ; case 20U: qla24xx_process_response_queue(vha, rsp); goto ldv_61436; case 28U: qlt_24xx_process_atio_queue(vha); goto ldv_61436; case 29U: qlt_24xx_process_atio_queue(vha); qla24xx_process_response_queue(vha, rsp); goto ldv_61436; default: ql_dbg(33554432U, vha, 20561, "Unrecognized interrupt type (%d).\n", stat & 255U); goto ldv_61436; } ldv_61436: writel(2684354560U, (void volatile *)(& reg->hccr)); ldv_61431: qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } static struct qla_init_msix_entry msix_entries[3U] = { {"qla2xxx (default)", & qla24xx_msix_default}, {"qla2xxx (rsp_q)", & qla24xx_msix_rsp_q}, {"qla2xxx (multiq)", & qla25xx_msix_rsp_q}}; static struct qla_init_msix_entry qla82xx_msix_entries[2U] = { {"qla2xxx (default)", & qla82xx_msix_default}, {"qla2xxx (rsp_q)", & qla82xx_msix_rsp_q}}; static struct qla_init_msix_entry qla83xx_msix_entries[3U] = { {"qla2xxx (default)", & qla24xx_msix_default}, {"qla2xxx (rsp_q)", & qla24xx_msix_rsp_q}, {"qla2xxx (atio_q)", & qla83xx_msix_atio_q}}; static void qla24xx_disable_msix(struct qla_hw_data *ha ) { int i ; struct qla_msix_entry *qentry ; scsi_qla_host_t *vha ; void *tmp ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; i = 0; goto ldv_61456; ldv_61455: qentry = ha->msix_entries + (unsigned long )i; if (qentry->have_irq != 0) { free_irq(qentry->vector, (void *)qentry->rsp); } else { } i = i + 1; ldv_61456: ; if ((int )ha->msix_count > i) { goto ldv_61455; } else { } pci_disable_msix(ha->pdev); kfree((void const *)ha->msix_entries); ha->msix_entries = (struct qla_msix_entry *)0; ha->flags.msix_enabled = 0U; ql_dbg(1073741824U, vha, 66, "Disabled the MSI.\n"); return; } } static int qla24xx_enable_msix(struct qla_hw_data *ha , struct rsp_que *rsp ) { int i ; int ret ; struct msix_entry *entries ; struct qla_msix_entry *qentry ; scsi_qla_host_t *vha ; void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = kzalloc((unsigned long )ha->msix_count * 8UL, 208U); entries = (struct msix_entry *)tmp___0; if ((unsigned long )entries == (unsigned long )((struct msix_entry *)0)) { ql_log(1U, vha, 188, "Failed to allocate memory for msix_entry.\n"); return (-12); } else { } i = 0; goto ldv_61468; ldv_61467: (entries + (unsigned long )i)->entry = (u16 )i; i = i + 1; ldv_61468: ; if ((int )ha->msix_count > i) { goto ldv_61467; } else { } ret = pci_enable_msix(ha->pdev, entries, (int )ha->msix_count); if (ret != 0) { if (ret <= 1) { goto msix_failed; } else { } ql_log(1U, vha, 198, "MSI-X: Failed to enable support -- %d/%d\n Retry with %d vectors.\n", (int )ha->msix_count, ret, ret); ha->msix_count = (uint16_t )ret; ret = pci_enable_msix(ha->pdev, entries, (int )ha->msix_count); if (ret != 0) { msix_failed: ql_log(0U, vha, 199, "MSI-X: Failed to enable support, giving up -- %d/%d.\n", (int )ha->msix_count, ret); goto msix_out; } else { } ha->max_rsp_queues = (unsigned int )((uint8_t )ha->msix_count) + 255U; } else { } tmp___1 = kzalloc((unsigned long )ha->msix_count * 24UL, 208U); ha->msix_entries = (struct qla_msix_entry *)tmp___1; if ((unsigned long )ha->msix_entries == (unsigned long )((struct qla_msix_entry *)0)) { ql_log(0U, vha, 200, "Failed to allocate memory for ha->msix_entries.\n"); ret = -12; goto msix_out; } else { } ha->flags.msix_enabled = 1U; i = 0; goto ldv_61473; ldv_61472: qentry = ha->msix_entries + (unsigned long )i; qentry->vector = (entries + (unsigned long )i)->vector; qentry->entry = (entries + (unsigned long )i)->entry; qentry->have_irq = 0; qentry->rsp = (struct rsp_que *)0; i = i + 1; ldv_61473: ; if ((int )ha->msix_count > i) { goto ldv_61472; } else { } i = 0; goto ldv_61476; ldv_61475: qentry = ha->msix_entries + (unsigned long )i; if (ql2x_ini_mode != 2 && ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { ret = request_irq(qentry->vector, qla83xx_msix_entries[i].handler, 0UL, qla83xx_msix_entries[i].name, (void *)rsp); } else if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, 0UL, qla82xx_msix_entries[i].name, (void *)rsp); } else { ret = request_irq(qentry->vector, msix_entries[i].handler, 0UL, msix_entries[i].name, (void *)rsp); } if (ret != 0) { ql_log(0U, vha, 203, "MSI-X: unable to register handler -- %x/%d.\n", qentry->vector, ret); qla24xx_disable_msix(ha); ha->mqenable = 0U; goto msix_out; } else { } qentry->have_irq = 1; qentry->rsp = rsp; rsp->msix = qentry; i = i + 1; ldv_61476: ; if ((int )ha->msix_count > i) { goto ldv_61475; } else { } if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { if (((unsigned long )ha->msixbase != (unsigned long )((device_reg_t *)0) && (unsigned long )ha->mqiobase != (unsigned long )((device_reg_t *)0)) && ((unsigned int )ha->max_rsp_queues > 1U || (unsigned int )ha->max_req_queues > 1U)) { ha->mqenable = 1U; } else { } } else if ((unsigned long )ha->mqiobase != (unsigned long )((device_reg_t *)0) && ((unsigned int )ha->max_rsp_queues > 1U || (unsigned int )ha->max_req_queues > 1U)) { ha->mqenable = 1U; } else { } ql_dbg(1048576U, vha, 49157, "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", ha->mqiobase, (int )ha->max_rsp_queues, (int )ha->max_req_queues); ql_dbg(1073741824U, vha, 85, "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", ha->mqiobase, (int )ha->max_rsp_queues, (int )ha->max_req_queues); msix_out: kfree((void const *)entries); return (ret); } } int qla2x00_request_irqs(struct qla_hw_data *ha , struct rsp_que *rsp ) { int ret ; device_reg_t *reg ; scsi_qla_host_t *vha ; void *tmp ; { reg = ha->iobase; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; if ((((((ha->device_type & 256U) == 0U && (ha->device_type & 2048U) == 0U) && (ha->device_type & 4096U) == 0U) && ((((ha->device_type & 8192U) == 0U && (ha->device_type & 16384U) == 0U) && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U)) && (ha->device_type & 32768U) == 0U) && (ha->device_type & 131072U) == 0U) { goto skip_msi; } else { } if ((unsigned int )(ha->pdev)->subsystem_vendor == 4156U && (((unsigned int )(ha->pdev)->subsystem_device == 28736U || (unsigned int )(ha->pdev)->subsystem_device == 28737U) || (unsigned int )(ha->pdev)->subsystem_device == 5893U)) { ql_log(1U, vha, 52, "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", (int )(ha->pdev)->subsystem_vendor, (int )(ha->pdev)->subsystem_device); goto skip_msi; } else { } if ((ha->device_type & 256U) != 0U && (unsigned int )(ha->pdev)->revision <= 2U) { ql_log(1U, vha, 53, "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", (int )(ha->pdev)->revision, 3); goto skip_msix; } else { } ret = qla24xx_enable_msix(ha, rsp); if (ret == 0) { ql_dbg(1073741824U, vha, 54, "MSI-X: Enabled (0x%X, 0x%X).\n", (int )ha->chip_revision, (int )ha->fw_attributes); goto clear_risc_ints; } else { } ql_log(2U, vha, 55, "MSI-X Falling back-to MSI mode -%d.\n", ret); skip_msix: ; if (((((((ha->device_type & 128U) == 0U && (ha->device_type & 256U) == 0U) && (ha->device_type & 2048U) == 0U) && (ha->device_type & 4096U) == 0U) && (ha->device_type & 8192U) == 0U) && ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U)) && (ha->device_type & 131072U) == 0U) { goto skip_msi; } else { } ret = pci_enable_msi_block(ha->pdev, 1U); if (ret == 0) { ql_dbg(1073741824U, vha, 56, "MSI: Enabled.\n"); ha->flags.msi_enabled = 1U; } else { ql_log(1U, vha, 57, "MSI-X; Falling back-to INTa mode -- %d.\n", ret); } if (*((unsigned long *)ha + 2UL) == 0UL && (ha->device_type & 16384U) != 0U) { return (258); } else { } skip_msi: ret = request_irq((ha->pdev)->irq, (ha->isp_ops)->intr_handler, *((unsigned long *)ha + 2UL) != 0UL ? 0UL : 128UL, "qla2xxx", (void *)rsp); if (ret != 0) { ql_log(1U, vha, 58, "Failed to reserve interrupt %d already in use.\n", (ha->pdev)->irq); goto fail; } else if (*((unsigned long *)ha + 2UL) == 0UL) { ql_dbg(1073741824U, vha, 293, "INTa mode: Enabled.\n"); ha->flags.mr_intr_valid = 1U; } else { } clear_risc_ints: spin_lock_irq(& ha->hardware_lock); if ((ha->device_type & 134217728U) == 0U) { writew(0, (void volatile *)(& reg->isp.semaphore)); } else { } spin_unlock_irq(& ha->hardware_lock); fail: ; return (ret); } } void qla2x00_free_irqs(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct rsp_que *rsp ; { ha = vha->hw; if ((unsigned long )ha->rsp_q_map == (unsigned long )((struct rsp_que **)0) || (unsigned long )*(ha->rsp_q_map) == (unsigned long )((struct rsp_que *)0)) { return; } else { } rsp = *(ha->rsp_q_map); if (*((unsigned long *)ha + 2UL) != 0UL) { qla24xx_disable_msix(ha); } else if (*((unsigned long *)ha + 2UL) != 0UL) { free_irq((ha->pdev)->irq, (void *)rsp); pci_disable_msi(ha->pdev); } else { free_irq((ha->pdev)->irq, (void *)rsp); } return; } } int qla25xx_request_irq(struct rsp_que *rsp ) { struct qla_hw_data *ha ; struct qla_init_msix_entry *intr ; struct qla_msix_entry *msix ; scsi_qla_host_t *vha ; void *tmp ; int ret ; { ha = rsp->hw; intr = (struct qla_init_msix_entry *)(& msix_entries) + 2UL; msix = rsp->msix; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ret = request_irq(msix->vector, intr->handler, 0UL, intr->name, (void *)rsp); if (ret != 0) { ql_log(0U, vha, 230, "MSI-X: Unable to register handler -- %x/%d.\n", msix->vector, ret); return (ret); } else { } msix->have_irq = 1; msix->rsp = rsp; return (ret); } } void choose_timer_6(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_6 = 2; return; } } int reg_timer_6(struct timer_list *timer ) { { ldv_timer_list_6 = timer; ldv_timer_state_6 = 1; return (0); } } void disable_suitable_timer_6(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_6) { ldv_timer_state_6 = 0; return; } else { } return; } } void activate_pending_timer_6(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_6 == (unsigned long )timer) { if (ldv_timer_state_6 == 2 || pending_flag != 0) { ldv_timer_list_6 = timer; ldv_timer_list_6->data = data; ldv_timer_state_6 = 1; } else { } return; } else { } reg_timer_6(timer); ldv_timer_list_6->data = data; return; } } int ldv_del_timer_31(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_32(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } int ldv_del_timer_35(struct timer_list *ldv_func_arg1 ) ; void choose_timer_7(struct timer_list *timer ) ; int ldv_scsi_add_host_with_dma_36(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; void qla2x00_get_sym_node_name(scsi_qla_host_t *vha , uint8_t *snn ) ; void qlt_rff_id(struct scsi_qla_host *vha , struct ct_sns_req *ct_req ) ; static int qla2x00_sns_ga_nxt(scsi_qla_host_t *vha , fc_port_t *fcport ) ; static int qla2x00_sns_gid_pt(scsi_qla_host_t *vha , sw_info_t *list ) ; static int qla2x00_sns_gpn_id(scsi_qla_host_t *vha , sw_info_t *list ) ; static int qla2x00_sns_gnn_id(scsi_qla_host_t *vha , sw_info_t *list ) ; static int qla2x00_sns_rft_id(scsi_qla_host_t *vha ) ; static int qla2x00_sns_rnn_id(scsi_qla_host_t *vha ) ; void *qla2x00_prep_ms_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) { struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; { ha = vha->hw; ms_pkt = ha->ms_iocb; memset((void *)ms_pkt, 0, 64UL); ms_pkt->entry_type = 41U; ms_pkt->entry_count = 1U; if ((int )ha->device_type < 0) { ms_pkt->loop_id.extended = 128U; } else { ms_pkt->loop_id.id.standard = 128U; } ms_pkt->control_flags = 34U; ms_pkt->timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; ms_pkt->cmd_dsd_count = 1U; ms_pkt->total_dsd_count = 2U; ms_pkt->rsp_bytecount = rsp_size; ms_pkt->req_bytecount = req_size; ms_pkt->dseg_req_address[0] = (unsigned int )ha->ct_sns_dma; ms_pkt->dseg_req_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ms_pkt->dseg_req_length = ms_pkt->req_bytecount; ms_pkt->dseg_rsp_address[0] = (unsigned int )ha->ct_sns_dma; ms_pkt->dseg_rsp_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount; vha->qla_stats.control_requests = vha->qla_stats.control_requests + 1U; return ((void *)ms_pkt); } } void *qla24xx_prep_ms_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) { struct qla_hw_data *ha ; struct ct_entry_24xx *ct_pkt ; { ha = vha->hw; ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; memset((void *)ct_pkt, 0, 64UL); ct_pkt->entry_type = 41U; ct_pkt->entry_count = 1U; ct_pkt->nport_handle = 2044U; ct_pkt->timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; ct_pkt->cmd_dsd_count = 1U; ct_pkt->rsp_dsd_count = 1U; ct_pkt->rsp_byte_count = rsp_size; ct_pkt->cmd_byte_count = req_size; ct_pkt->dseg_0_address[0] = (unsigned int )ha->ct_sns_dma; ct_pkt->dseg_0_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; ct_pkt->dseg_1_address[0] = (unsigned int )ha->ct_sns_dma; ct_pkt->dseg_1_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; ct_pkt->vp_index = (uint8_t )vha->vp_idx; vha->qla_stats.control_requests = vha->qla_stats.control_requests + 1U; return ((void *)ct_pkt); } } __inline static struct ct_sns_req *qla2x00_prep_ct_req(struct ct_sns_pkt *p , uint16_t cmd , uint16_t rsp_size ) { __u16 tmp ; __u16 tmp___0 ; { memset((void *)p, 0, 8208UL); p->p.req.header.revision = 1U; p->p.req.header.gs_type = 252U; p->p.req.header.gs_subtype = 2U; tmp = __fswab16((int )cmd); p->p.req.command = tmp; tmp___0 = __fswab16((int )((__u16 )(((int )rsp_size + -16) / 4))); p->p.req.max_rsp_size = tmp___0; return (& p->p.req); } } static int qla2x00_chk_ms_status(scsi_qla_host_t *vha , ms_iocb_entry_t *ms_pkt , struct ct_sns_rsp *ct_rsp , char const *routine ) { int rval ; uint16_t comp_status ; struct qla_hw_data *ha ; { ha = vha->hw; rval = 258; if ((unsigned int )ms_pkt->entry_status != 0U) { ql_dbg(268435456U, vha, 8241, "%s failed, error status (%x) on port_id: %02x%02x%02x.\n", routine, (int )ms_pkt->entry_status, (int )vha->d_id.b.domain, (int )vha->d_id.b.area, (int )vha->d_id.b.al_pa); } else { if ((ha->device_type & 134217728U) != 0U) { comp_status = ((struct ct_entry_24xx *)ms_pkt)->comp_status; } else { comp_status = ms_pkt->status; } switch ((int )comp_status) { case 0: ; case 21: ; case 7: ; if ((unsigned int )ct_rsp->header.response != 640U) { ql_dbg(268566528U, vha, 8311, "%s failed rejected request on port_id: %02x%02x%02x.\n", routine, (int )vha->d_id.b.domain, (int )vha->d_id.b.area, (int )vha->d_id.b.al_pa); ql_dump_buffer(268566528U, vha, 8312, (uint8_t *)(& ct_rsp->header), 16U); rval = 1; } else { rval = 0; } goto ldv_60765; default: ql_dbg(268435456U, vha, 8243, "%s failed, completion status (%x) on port_id: %02x%02x%02x.\n", routine, (int )comp_status, (int )vha->d_id.b.domain, (int )vha->d_id.b.area, (int )vha->d_id.b.al_pa); goto ldv_60765; } ldv_60765: ; } return (rval); } } int qla2x00_ga_nxt(scsi_qla_host_t *vha , fc_port_t *fcport ) { int rval ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; struct qla_hw_data *ha ; int tmp ; void *tmp___0 ; size_t __len ; void *__ret ; size_t __len___0 ; void *__ret___0 ; int tmp___1 ; { ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp = qla2x00_sns_ga_nxt(vha, fcport); return (tmp); } else { } tmp___0 = (*((ha->isp_ops)->prep_ms_iocb))(vha, 20U, 636U); ms_pkt = (ms_iocb_entry_t *)tmp___0; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 256, 636); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain; ct_req->req.port_id.port_id[1] = fcport->d_id.b.area; ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8290, "GA_NXT issue IOCB failed (%d).\n", rval); } else { tmp___1 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT"); if (tmp___1 != 0) { rval = 258; } else { fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0]; fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1]; fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2]; __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& fcport->node_name), (void const *)(& ct_rsp->rsp.ga_nxt.node_name), __len); } else { __ret = __builtin_memcpy((void *)(& fcport->node_name), (void const *)(& ct_rsp->rsp.ga_nxt.node_name), __len); } __len___0 = 8UL; if (__len___0 > 63UL) { __ret___0 = __memcpy((void *)(& fcport->port_name), (void const *)(& ct_rsp->rsp.ga_nxt.port_name), __len___0); } else { __ret___0 = __builtin_memcpy((void *)(& fcport->port_name), (void const *)(& ct_rsp->rsp.ga_nxt.port_name), __len___0); } fcport->fc4_type = (int )ct_rsp->rsp.ga_nxt.fc4_types[2] & 1 ? 8U : 0U; if ((unsigned int )ct_rsp->rsp.ga_nxt.port_type != 1U && (unsigned int )ct_rsp->rsp.ga_nxt.port_type != 2U) { fcport->d_id.b.domain = 240U; } else { } ql_dbg(268435456U, vha, 8291, "GA_NXT entry - nn %8phN pn %8phN port_id=%02x%02x%02x.\n", (uint8_t *)(& fcport->node_name), (uint8_t *)(& fcport->port_name), (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } } return (rval); } } __inline static int qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha ) { { return (((int )(vha->hw)->max_fibre_devices + 4) * 4); } } int qla2x00_gid_pt(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; uint16_t i ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; struct ct_sns_gid_pt_data *gid_data ; struct qla_hw_data *ha ; uint16_t gid_pt_rsp_size ; int tmp ; int tmp___0 ; void *tmp___1 ; int tmp___2 ; { ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp = qla2x00_sns_gid_pt(vha, list); return (tmp); } else { } gid_data = (struct ct_sns_gid_pt_data *)0; tmp___0 = qla2x00_gid_pt_rsp_size(vha); gid_pt_rsp_size = (uint16_t )tmp___0; tmp___1 = (*((ha->isp_ops)->prep_ms_iocb))(vha, 20U, (uint32_t )gid_pt_rsp_size); ms_pkt = (ms_iocb_entry_t *)tmp___1; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 417, (int )gid_pt_rsp_size); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.gid_pt.port_type = 127U; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8277, "GID_PT issue IOCB failed (%d).\n", rval); } else { tmp___2 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT"); if (tmp___2 != 0) { rval = 258; } else { i = 0U; goto ldv_60799; ldv_60798: gid_data = (struct ct_sns_gid_pt_data *)(& ct_rsp->rsp.gid_pt.entries) + (unsigned long )i; (list + (unsigned long )i)->d_id.b.domain = gid_data->port_id[0]; (list + (unsigned long )i)->d_id.b.area = gid_data->port_id[1]; (list + (unsigned long )i)->d_id.b.al_pa = gid_data->port_id[2]; memset((void *)(& (list + (unsigned long )i)->fabric_port_name), 0, 8UL); (list + (unsigned long )i)->fp_speed = 65535U; if ((int )((signed char )gid_data->control_byte) < 0) { (list + (unsigned long )i)->d_id.b.rsvd_1 = gid_data->control_byte; goto ldv_60797; } else { } i = (uint16_t )((int )i + 1); ldv_60799: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_60798; } else { } ldv_60797: ; if ((int )ha->max_fibre_devices == (int )i) { rval = 258; } else { } } } return (rval); } } int qla2x00_gpn_id(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; uint16_t i ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; struct qla_hw_data *ha ; int tmp ; void *tmp___0 ; size_t __len ; void *__ret ; int tmp___1 ; { rval = 0; ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp = qla2x00_sns_gpn_id(vha, list); return (tmp); } else { } i = 0U; goto ldv_60815; ldv_60814: tmp___0 = (*((ha->isp_ops)->prep_ms_iocb))(vha, 20U, 24U); ms_pkt = (ms_iocb_entry_t *)tmp___0; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 274, 24); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.port_id.port_id[0] = (list + (unsigned long )i)->d_id.b.domain; ct_req->req.port_id.port_id[1] = (list + (unsigned long )i)->d_id.b.area; ct_req->req.port_id.port_id[2] = (list + (unsigned long )i)->d_id.b.al_pa; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8278, "GPN_ID issue IOCB failed (%d).\n", rval); goto ldv_60810; } else { tmp___1 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GPN_ID"); if (tmp___1 != 0) { rval = 258; goto ldv_60810; } else { __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& (list + (unsigned long )i)->port_name), (void const *)(& ct_rsp->rsp.gpn_id.port_name), __len); } else { __ret = __builtin_memcpy((void *)(& (list + (unsigned long )i)->port_name), (void const *)(& ct_rsp->rsp.gpn_id.port_name), __len); } } } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_60810; } else { } i = (uint16_t )((int )i + 1); ldv_60815: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_60814; } else { } ldv_60810: ; return (rval); } } int qla2x00_gnn_id(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; uint16_t i ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; int tmp ; void *tmp___0 ; size_t __len ; void *__ret ; int tmp___1 ; { rval = 0; ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp = qla2x00_sns_gnn_id(vha, list); return (tmp); } else { } i = 0U; goto ldv_60831; ldv_60830: tmp___0 = (*((ha->isp_ops)->prep_ms_iocb))(vha, 20U, 24U); ms_pkt = (ms_iocb_entry_t *)tmp___0; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 275, 24); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.port_id.port_id[0] = (list + (unsigned long )i)->d_id.b.domain; ct_req->req.port_id.port_id[1] = (list + (unsigned long )i)->d_id.b.area; ct_req->req.port_id.port_id[2] = (list + (unsigned long )i)->d_id.b.al_pa; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8279, "GNN_ID issue IOCB failed (%d).\n", rval); goto ldv_60826; } else { tmp___1 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GNN_ID"); if (tmp___1 != 0) { rval = 258; goto ldv_60826; } else { __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& (list + (unsigned long )i)->node_name), (void const *)(& ct_rsp->rsp.gnn_id.node_name), __len); } else { __ret = __builtin_memcpy((void *)(& (list + (unsigned long )i)->node_name), (void const *)(& ct_rsp->rsp.gnn_id.node_name), __len); } ql_dbg(268435456U, vha, 8280, "GID_PT entry - nn %8phN pn %8phN portid=%02x%02x%02x.\n", (uint8_t *)(& (list + (unsigned long )i)->node_name), (uint8_t *)(& (list + (unsigned long )i)->port_name), (int )(list + (unsigned long )i)->d_id.b.domain, (int )(list + (unsigned long )i)->d_id.b.area, (int )(list + (unsigned long )i)->d_id.b.al_pa); } } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_60826; } else { } i = (uint16_t )((int )i + 1); ldv_60831: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_60830; } else { } ldv_60826: ; return (rval); } } int qla2x00_rft_id(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; int tmp ; void *tmp___0 ; int tmp___1 ; { ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp = qla2x00_sns_rft_id(vha); return (tmp); } else { } tmp___0 = (*((ha->isp_ops)->prep_ms_iocb))(vha, 52U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp___0; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 535, 16); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain; ct_req->req.rft_id.port_id[1] = vha->d_id.b.area; ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa; ct_req->req.rft_id.fc4_types[2] = 1U; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8259, "RFT_ID issue IOCB failed (%d).\n", rval); } else { tmp___1 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID"); if (tmp___1 != 0) { rval = 258; } else { ql_dbg(268435456U, vha, 8260, "RFT_ID exiting normally.\n"); } } return (rval); } } int qla2x00_rff_id(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *tmp ; int tmp___0 ; { ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { ql_dbg(268435456U, vha, 8262, "RFF_ID call not supported on ISP2100/ISP2200.\n"); return (0); } else { } tmp = (*((ha->isp_ops)->prep_ms_iocb))(vha, 24U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 543, 16); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain; ct_req->req.rff_id.port_id[1] = vha->d_id.b.area; ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa; qlt_rff_id(vha, ct_req); ct_req->req.rff_id.fc4_type = 8U; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8263, "RFF_ID issue IOCB failed (%d).\n", rval); } else { tmp___0 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID"); if (tmp___0 != 0) { rval = 258; } else { ql_dbg(268435456U, vha, 8264, "RFF_ID exiting normally.\n"); } } return (rval); } } int qla2x00_rnn_id(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; int tmp ; void *tmp___0 ; size_t __len ; void *__ret ; int tmp___1 ; { ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { tmp = qla2x00_sns_rnn_id(vha); return (tmp); } else { } tmp___0 = (*((ha->isp_ops)->prep_ms_iocb))(vha, 28U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp___0; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 531, 16); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain; ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area; ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa; __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& ct_req->req.rnn_id.node_name), (void const *)(& vha->node_name), __len); } else { __ret = __builtin_memcpy((void *)(& ct_req->req.rnn_id.node_name), (void const *)(& vha->node_name), __len); } rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8269, "RNN_ID issue IOCB failed (%d).\n", rval); } else { tmp___1 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID"); if (tmp___1 != 0) { rval = 258; } else { ql_dbg(268435456U, vha, 8270, "RNN_ID exiting normally.\n"); } } return (rval); } } void qla2x00_get_sym_node_name(scsi_qla_host_t *vha , uint8_t *snn ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((ha->device_type & 131072U) != 0U) { sprintf((char *)snn, "%s FW:v%s DVR:v%s", (uint8_t *)(& ha->model_number), (uint8_t *)(& ha->mr.fw_version), (char *)(& qla2x00_version_str)); } else { sprintf((char *)snn, "%s FW:v%d.%02d.%02d DVR:v%s", (uint8_t *)(& ha->model_number), (int )ha->fw_major_version, (int )ha->fw_minor_version, (int )ha->fw_subminor_version, (char *)(& qla2x00_version_str)); } return; } } int qla2x00_rsnn_nn(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *tmp ; size_t __len ; void *__ret ; size_t tmp___0 ; int tmp___1 ; { ha = vha->hw; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { ql_dbg(268435456U, vha, 8272, "RSNN_ID call unsupported on ISP2100/ISP2200.\n"); return (0); } else { } tmp = (*((ha->isp_ops)->prep_ms_iocb))(vha, 0U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 569, 16); ct_rsp = & (ha->ct_sns)->p.rsp; __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& ct_req->req.rsnn_nn.node_name), (void const *)(& vha->node_name), __len); } else { __ret = __builtin_memcpy((void *)(& ct_req->req.rsnn_nn.node_name), (void const *)(& vha->node_name), __len); } qla2x00_get_sym_node_name(vha, (uint8_t *)(& ct_req->req.rsnn_nn.sym_node_name)); tmp___0 = strlen((char const *)(& ct_req->req.rsnn_nn.sym_node_name)); ct_req->req.rsnn_nn.name_len = (unsigned char )tmp___0; ms_pkt->req_bytecount = (unsigned int )((int )ct_req->req.rsnn_nn.name_len + 25); ms_pkt->dseg_req_length = ms_pkt->req_bytecount; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8273, "RSNN_NN issue IOCB failed (%d).\n", rval); } else { tmp___1 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN"); if (tmp___1 != 0) { rval = 258; } else { ql_dbg(268435456U, vha, 8274, "RSNN_NN exiting normally.\n"); } } return (rval); } } __inline static struct sns_cmd_pkt *qla2x00_prep_sns_cmd(scsi_qla_host_t *vha , uint16_t cmd , uint16_t scmd_len , uint16_t data_size ) { uint16_t wc ; struct sns_cmd_pkt *sns_cmd ; struct qla_hw_data *ha ; { ha = vha->hw; sns_cmd = ha->sns_cmd; memset((void *)sns_cmd, 0, 2064UL); wc = (uint16_t )((unsigned int )data_size / 2U); sns_cmd->p.cmd.buffer_length = wc; sns_cmd->p.cmd.buffer_address[0] = (unsigned int )ha->sns_cmd_dma; sns_cmd->p.cmd.buffer_address[1] = (unsigned int )(ha->sns_cmd_dma >> 32ULL); sns_cmd->p.cmd.subcommand_length = scmd_len; sns_cmd->p.cmd.subcommand = cmd; wc = (uint16_t )(((int )data_size + -16) / 4); sns_cmd->p.cmd.size = wc; vha->qla_stats.control_requests = vha->qla_stats.control_requests + 1U; return (sns_cmd); } } static int qla2x00_sns_ga_nxt(scsi_qla_host_t *vha , fc_port_t *fcport ) { int rval ; struct qla_hw_data *ha ; struct sns_cmd_pkt *sns_cmd ; size_t __len ; void *__ret ; size_t __len___0 ; void *__ret___0 ; { rval = 0; ha = vha->hw; sns_cmd = qla2x00_prep_sns_cmd(vha, 256, 6, 636); sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa; sns_cmd->p.cmd.param[1] = fcport->d_id.b.area; sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain; rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 14, 2064UL); if (rval != 0) { ql_dbg(268435456U, vha, 8287, "GA_NXT Send SNS failed (%d).\n", rval); } else if ((unsigned int )sns_cmd->p.gan_data[8] != 128U || (unsigned int )sns_cmd->p.gan_data[9] != 2U) { ql_dbg(268566528U, vha, 8324, "GA_NXT failed, rejected request ga_nxt_rsp:\n"); ql_dump_buffer(268566528U, vha, 8308, (uint8_t *)(& sns_cmd->p.gan_data), 16U); rval = 258; } else { fcport->d_id.b.domain = sns_cmd->p.gan_data[17]; fcport->d_id.b.area = sns_cmd->p.gan_data[18]; fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19]; __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& fcport->node_name), (void const *)(& sns_cmd->p.gan_data) + 284U, __len); } else { __ret = __builtin_memcpy((void *)(& fcport->node_name), (void const *)(& sns_cmd->p.gan_data) + 284U, __len); } __len___0 = 8UL; if (__len___0 > 63UL) { __ret___0 = __memcpy((void *)(& fcport->port_name), (void const *)(& sns_cmd->p.gan_data) + 20U, __len___0); } else { __ret___0 = __builtin_memcpy((void *)(& fcport->port_name), (void const *)(& sns_cmd->p.gan_data) + 20U, __len___0); } if ((unsigned int )sns_cmd->p.gan_data[16] != 1U && (unsigned int )sns_cmd->p.gan_data[16] != 2U) { fcport->d_id.b.domain = 240U; } else { } ql_dbg(268435456U, vha, 8289, "GA_NXT entry - nn %8phN pn %8phN port_id=%02x%02x%02x.\n", (uint8_t *)(& fcport->node_name), (uint8_t *)(& fcport->port_name), (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } return (rval); } } static int qla2x00_sns_gid_pt(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; struct qla_hw_data *ha ; uint16_t i ; uint8_t *entry ; struct sns_cmd_pkt *sns_cmd ; uint16_t gid_pt_sns_data_size ; int tmp ; { ha = vha->hw; tmp = qla2x00_gid_pt_rsp_size(vha); gid_pt_sns_data_size = (uint16_t )tmp; sns_cmd = qla2x00_prep_sns_cmd(vha, 417, 6, (int )gid_pt_sns_data_size); sns_cmd->p.cmd.param[0] = 127U; rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 14, 2064UL); if (rval != 0) { ql_dbg(268435456U, vha, 8301, "GID_PT Send SNS failed (%d).\n", rval); } else if ((unsigned int )sns_cmd->p.gid_data[8] != 128U || (unsigned int )sns_cmd->p.gid_data[9] != 2U) { ql_dbg(268435456U, vha, 8239, "GID_PT failed, rejected request, gid_rsp:\n"); ql_dump_buffer(268566528U, vha, 8321, (uint8_t *)(& sns_cmd->p.gid_data), 16U); rval = 258; } else { i = 0U; goto ldv_60909; ldv_60908: entry = (uint8_t *)(& sns_cmd->p.gid_data) + ((unsigned long )((int )i * 4) + 16UL); (list + (unsigned long )i)->d_id.b.domain = *(entry + 1UL); (list + (unsigned long )i)->d_id.b.area = *(entry + 2UL); (list + (unsigned long )i)->d_id.b.al_pa = *(entry + 3UL); if ((int )((signed char )*entry) < 0) { (list + (unsigned long )i)->d_id.b.rsvd_1 = *entry; goto ldv_60907; } else { } i = (uint16_t )((int )i + 1); ldv_60909: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_60908; } else { } ldv_60907: ; if ((int )ha->max_fibre_devices == (int )i) { rval = 258; } else { } } return (rval); } } static int qla2x00_sns_gpn_id(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; struct qla_hw_data *ha ; uint16_t i ; struct sns_cmd_pkt *sns_cmd ; size_t __len ; void *__ret ; { rval = 0; ha = vha->hw; i = 0U; goto ldv_60923; ldv_60922: sns_cmd = qla2x00_prep_sns_cmd(vha, 274, 6, 24); sns_cmd->p.cmd.param[0] = (list + (unsigned long )i)->d_id.b.al_pa; sns_cmd->p.cmd.param[1] = (list + (unsigned long )i)->d_id.b.area; sns_cmd->p.cmd.param[2] = (list + (unsigned long )i)->d_id.b.domain; rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 14, 2064UL); if (rval != 0) { ql_dbg(268435456U, vha, 8242, "GPN_ID Send SNS failed (%d).\n", rval); } else if ((unsigned int )sns_cmd->p.gpn_data[8] != 128U || (unsigned int )sns_cmd->p.gpn_data[9] != 2U) { ql_dbg(268566528U, vha, 8318, "GPN_ID failed, rejected request, gpn_rsp:\n"); ql_dump_buffer(268566528U, vha, 8319, (uint8_t *)(& sns_cmd->p.gpn_data), 16U); rval = 258; } else { __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& (list + (unsigned long )i)->port_name), (void const *)(& sns_cmd->p.gpn_data) + 16U, __len); } else { __ret = __builtin_memcpy((void *)(& (list + (unsigned long )i)->port_name), (void const *)(& sns_cmd->p.gpn_data) + 16U, __len); } } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_60921; } else { } i = (uint16_t )((int )i + 1); ldv_60923: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_60922; } else { } ldv_60921: ; return (rval); } } static int qla2x00_sns_gnn_id(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; struct qla_hw_data *ha ; uint16_t i ; struct sns_cmd_pkt *sns_cmd ; size_t __len ; void *__ret ; { rval = 0; ha = vha->hw; i = 0U; goto ldv_60937; ldv_60936: sns_cmd = qla2x00_prep_sns_cmd(vha, 275, 6, 24); sns_cmd->p.cmd.param[0] = (list + (unsigned long )i)->d_id.b.al_pa; sns_cmd->p.cmd.param[1] = (list + (unsigned long )i)->d_id.b.area; sns_cmd->p.cmd.param[2] = (list + (unsigned long )i)->d_id.b.domain; rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 14, 2064UL); if (rval != 0) { ql_dbg(268435456U, vha, 8255, "GNN_ID Send SNS failed (%d).\n", rval); } else if ((unsigned int )sns_cmd->p.gnn_data[8] != 128U || (unsigned int )sns_cmd->p.gnn_data[9] != 2U) { ql_dbg(268566528U, vha, 8322, "GNN_ID failed, rejected request, gnn_rsp:\n"); ql_dump_buffer(268566528U, vha, 8314, (uint8_t *)(& sns_cmd->p.gnn_data), 16U); rval = 258; } else { __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& (list + (unsigned long )i)->node_name), (void const *)(& sns_cmd->p.gnn_data) + 16U, __len); } else { __ret = __builtin_memcpy((void *)(& (list + (unsigned long )i)->node_name), (void const *)(& sns_cmd->p.gnn_data) + 16U, __len); } ql_dbg(268435456U, vha, 8302, "GID_PT entry - nn %8phN pn %8phN port_id=%02x%02x%02x.\n", (uint8_t *)(& (list + (unsigned long )i)->node_name), (uint8_t *)(& (list + (unsigned long )i)->port_name), (int )(list + (unsigned long )i)->d_id.b.domain, (int )(list + (unsigned long )i)->d_id.b.area, (int )(list + (unsigned long )i)->d_id.b.al_pa); } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_60935; } else { } i = (uint16_t )((int )i + 1); ldv_60937: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_60936; } else { } ldv_60935: ; return (rval); } } static int qla2x00_sns_rft_id(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; struct sns_cmd_pkt *sns_cmd ; { ha = vha->hw; sns_cmd = qla2x00_prep_sns_cmd(vha, 535, 22, 16); sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; sns_cmd->p.cmd.param[1] = vha->d_id.b.area; sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; sns_cmd->p.cmd.param[5] = 1U; rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 30, 2064UL); if (rval != 0) { ql_dbg(268435456U, vha, 8288, "RFT_ID Send SNS failed (%d).\n", rval); } else if ((unsigned int )sns_cmd->p.rft_data[8] != 128U || (unsigned int )sns_cmd->p.rft_data[9] != 2U) { ql_dbg(268566528U, vha, 8323, "RFT_ID failed, rejected request rft_rsp:\n"); ql_dump_buffer(268566528U, vha, 8320, (uint8_t *)(& sns_cmd->p.rft_data), 16U); rval = 258; } else { ql_dbg(268435456U, vha, 8307, "RFT_ID exiting normally.\n"); } return (rval); } } static int qla2x00_sns_rnn_id(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; struct sns_cmd_pkt *sns_cmd ; { ha = vha->hw; sns_cmd = qla2x00_prep_sns_cmd(vha, 531, 10, 16); sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; sns_cmd->p.cmd.param[1] = vha->d_id.b.area; sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; sns_cmd->p.cmd.param[4] = vha->node_name[7]; sns_cmd->p.cmd.param[5] = vha->node_name[6]; sns_cmd->p.cmd.param[6] = vha->node_name[5]; sns_cmd->p.cmd.param[7] = vha->node_name[4]; sns_cmd->p.cmd.param[8] = vha->node_name[3]; sns_cmd->p.cmd.param[9] = vha->node_name[2]; sns_cmd->p.cmd.param[10] = vha->node_name[1]; sns_cmd->p.cmd.param[11] = vha->node_name[0]; rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 18, 2064UL); if (rval != 0) { ql_dbg(268435456U, vha, 8266, "RNN_ID Send SNS failed (%d).\n", rval); } else if ((unsigned int )sns_cmd->p.rnn_data[8] != 128U || (unsigned int )sns_cmd->p.rnn_data[9] != 2U) { ql_dbg(268566528U, vha, 8315, "RNN_ID failed, rejected request, rnn_rsp:\n"); ql_dump_buffer(268566528U, vha, 8316, (uint8_t *)(& sns_cmd->p.rnn_data), 16U); rval = 258; } else { ql_dbg(268435456U, vha, 8268, "RNN_ID exiting normally.\n"); } return (rval); } } static int qla2x00_mgmt_svr_login(scsi_qla_host_t *vha ) { int ret ; int rval ; uint16_t mb[32U] ; struct qla_hw_data *ha ; { ha = vha->hw; ret = 0; if (*((unsigned long *)vha + 19UL) != 0UL) { return (ret); } else { } rval = (*((ha->isp_ops)->fabric_login))(vha, (int )vha->mgmt_svr_loop_id, 255, 255, 250, (uint16_t *)(& mb), 2); if (rval != 0 || (unsigned int )mb[0] != 16384U) { if (rval == 259) { ql_dbg(268435456U, vha, 8325, "Failed management_server login: loopid=%x rval=%d\n", (int )vha->mgmt_svr_loop_id, rval); } else { ql_dbg(268435456U, vha, 8228, "Failed management_server login: loopid=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", (int )vha->mgmt_svr_loop_id, (int )mb[0], (int )mb[1], (int )mb[2], (int )mb[6], (int )mb[7]); } ret = 258; } else { vha->flags.management_server_logged_in = 1U; } return (ret); } } void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) { ms_iocb_entry_t *ms_pkt ; struct qla_hw_data *ha ; { ha = vha->hw; ms_pkt = ha->ms_iocb; memset((void *)ms_pkt, 0, 64UL); ms_pkt->entry_type = 41U; ms_pkt->entry_count = 1U; if ((int )ha->device_type < 0) { ms_pkt->loop_id.extended = vha->mgmt_svr_loop_id; } else { ms_pkt->loop_id.id.standard = (unsigned char )vha->mgmt_svr_loop_id; } ms_pkt->control_flags = 34U; ms_pkt->timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; ms_pkt->cmd_dsd_count = 1U; ms_pkt->total_dsd_count = 2U; ms_pkt->rsp_bytecount = rsp_size; ms_pkt->req_bytecount = req_size; ms_pkt->dseg_req_address[0] = (unsigned int )ha->ct_sns_dma; ms_pkt->dseg_req_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ms_pkt->dseg_req_length = ms_pkt->req_bytecount; ms_pkt->dseg_rsp_address[0] = (unsigned int )ha->ct_sns_dma; ms_pkt->dseg_rsp_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount; return ((void *)ms_pkt); } } void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) { struct ct_entry_24xx *ct_pkt ; struct qla_hw_data *ha ; { ha = vha->hw; ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; memset((void *)ct_pkt, 0, 64UL); ct_pkt->entry_type = 41U; ct_pkt->entry_count = 1U; ct_pkt->nport_handle = vha->mgmt_svr_loop_id; ct_pkt->timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; ct_pkt->cmd_dsd_count = 1U; ct_pkt->rsp_dsd_count = 1U; ct_pkt->rsp_byte_count = rsp_size; ct_pkt->cmd_byte_count = req_size; ct_pkt->dseg_0_address[0] = (unsigned int )ha->ct_sns_dma; ct_pkt->dseg_0_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; ct_pkt->dseg_1_address[0] = (unsigned int )ha->ct_sns_dma; ct_pkt->dseg_1_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; ct_pkt->vp_index = (uint8_t )vha->vp_idx; return ((void *)ct_pkt); } } __inline static ms_iocb_entry_t *qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha , uint32_t req_size ) { struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_entry_24xx *ct_pkt ; { ha = vha->hw; ms_pkt = ha->ms_iocb; ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; if ((ha->device_type & 134217728U) != 0U) { ct_pkt->cmd_byte_count = req_size; ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; } else { ms_pkt->req_bytecount = req_size; ms_pkt->dseg_req_length = ms_pkt->req_bytecount; } return (ms_pkt); } } __inline static struct ct_sns_req *qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p , uint16_t cmd , uint16_t rsp_size ) { __u16 tmp ; __u16 tmp___0 ; { memset((void *)p, 0, 8208UL); p->p.req.header.revision = 1U; p->p.req.header.gs_type = 250U; p->p.req.header.gs_subtype = 16U; tmp = __fswab16((int )cmd); p->p.req.command = tmp; tmp___0 = __fswab16((int )((__u16 )(((int )rsp_size + -16) / 4))); p->p.req.max_rsp_size = tmp___0; return (& p->p.req); } } static int qla2x00_fdmi_rhba(scsi_qla_host_t *vha ) { int rval ; int alen ; uint32_t size ; uint32_t sn ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; uint8_t *entries ; struct ct_fdmi_hba_attr *eiter ; struct qla_hw_data *ha ; void *tmp ; size_t __len ; void *__ret ; size_t __len___0 ; void *__ret___0 ; size_t __len___1 ; void *__ret___1 ; size_t tmp___0 ; __u16 tmp___1 ; size_t tmp___2 ; __u16 tmp___3 ; size_t tmp___4 ; __u16 tmp___5 ; size_t tmp___6 ; __u16 tmp___7 ; size_t tmp___8 ; __u16 tmp___9 ; size_t tmp___10 ; __u16 tmp___11 ; size_t tmp___12 ; __u16 tmp___13 ; size_t tmp___14 ; __u16 tmp___15 ; int tmp___16 ; { ha = vha->hw; tmp = (*((ha->isp_ops)->prep_ms_fdmi_iocb))(vha, 0U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, 512, 16); ct_rsp = & (ha->ct_sns)->p.rsp; __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& ct_req->req.rhba.hba_identifier), (void const *)(& vha->port_name), __len); } else { __ret = __builtin_memcpy((void *)(& ct_req->req.rhba.hba_identifier), (void const *)(& vha->port_name), __len); } ct_req->req.rhba.entry_count = 16777216U; __len___0 = 8UL; if (__len___0 > 63UL) { __ret___0 = __memcpy((void *)(& ct_req->req.rhba.port_name), (void const *)(& vha->port_name), __len___0); } else { __ret___0 = __builtin_memcpy((void *)(& ct_req->req.rhba.port_name), (void const *)(& vha->port_name), __len___0); } size = 24U; ct_req->req.rhba.attrs.count = 150994944U; entries = (uint8_t *)(& ct_req->req.rhba.hba_identifier); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 256U; eiter->len = 3072U; __len___1 = 8UL; if (__len___1 > 63UL) { __ret___1 = __memcpy((void *)(& eiter->a.node_name), (void const *)(& vha->node_name), __len___1); } else { __ret___1 = __builtin_memcpy((void *)(& eiter->a.node_name), (void const *)(& vha->node_name), __len___1); } size = size + 12U; ql_dbg(268435456U, vha, 8229, "NodeName = %8phN.\n", (uint8_t *)(& eiter->a.node_name)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 512U; tmp___0 = strlen("QLogic Corporation"); alen = (int )tmp___0; strncpy((char *)(& eiter->a.manufacturer), "QLogic Corporation", (__kernel_size_t )(alen + 1)); alen = ((alen & 3) != 0 ? 4 - (alen & 3) : 4) + alen; tmp___1 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___1; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8230, "Manufacturer = %s.\n", (uint8_t *)(& eiter->a.manufacturer)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 768U; sn = (uint32_t )(((((int )ha->serial0 & 31) << 16) | ((int )ha->serial2 << 8)) | (int )ha->serial1); sprintf((char *)(& eiter->a.serial_num), "%c%05d", sn / 100000U + 65U, sn % 100000U); tmp___2 = strlen((char const *)(& eiter->a.serial_num)); alen = (int )tmp___2; alen = ((alen & 3) != 0 ? 4 - (alen & 3) : 4) + alen; tmp___3 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___3; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8231, "Serial no. = %s.\n", (uint8_t *)(& eiter->a.serial_num)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 1024U; strcpy((char *)(& eiter->a.model), (char const *)(& ha->model_number)); tmp___4 = strlen((char const *)(& eiter->a.model)); alen = (int )tmp___4; alen = ((alen & 3) != 0 ? 4 - (alen & 3) : 4) + alen; tmp___5 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___5; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8232, "Model Name = %s.\n", (uint8_t *)(& eiter->a.model)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 1280U; strncpy((char *)(& eiter->a.model_desc), (char const *)(& ha->model_desc), 80UL); tmp___6 = strlen((char const *)(& eiter->a.model_desc)); alen = (int )tmp___6; alen = ((alen & 3) != 0 ? 4 - (alen & 3) : 4) + alen; tmp___7 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___7; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8233, "Model Desc = %s.\n", (uint8_t *)(& eiter->a.model_desc)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 1536U; strcpy((char *)(& eiter->a.hw_version), (char const *)(& ha->adapter_id)); tmp___8 = strlen((char const *)(& eiter->a.hw_version)); alen = (int )tmp___8; alen = ((alen & 3) != 0 ? 4 - (alen & 3) : 4) + alen; tmp___9 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___9; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8234, "Hardware ver = %s.\n", (uint8_t *)(& eiter->a.hw_version)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 1792U; strcpy((char *)(& eiter->a.driver_version), (char const *)(& qla2x00_version_str)); tmp___10 = strlen((char const *)(& eiter->a.driver_version)); alen = (int )tmp___10; alen = ((alen & 3) != 0 ? 4 - (alen & 3) : 4) + alen; tmp___11 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___11; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8235, "Driver ver = %s.\n", (uint8_t *)(& eiter->a.driver_version)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 2048U; strcpy((char *)(& eiter->a.orom_version), "0.00"); tmp___12 = strlen((char const *)(& eiter->a.orom_version)); alen = (int )tmp___12; alen = ((alen & 3) != 0 ? 4 - (alen & 3) : 4) + alen; tmp___13 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___13; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8236, "Optrom vers = %s.\n", (uint8_t *)(& eiter->a.orom_version)); eiter = (struct ct_fdmi_hba_attr *)entries + (unsigned long )size; eiter->type = 2304U; (*((ha->isp_ops)->fw_version_str))(vha, (char *)(& eiter->a.fw_version)); tmp___14 = strlen((char const *)(& eiter->a.fw_version)); alen = (int )tmp___14; alen = ((alen & 3) != 0 ? 4 - (alen & 3) : 4) + alen; tmp___15 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___15; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8237, "Firmware vers = %s.\n", (uint8_t *)(& eiter->a.fw_version)); qla2x00_update_ms_fdmi_iocb(vha, size + 16U); ql_dbg(268435456U, vha, 8238, "RHBA identifier = %8phN size=%d.\n", (uint8_t *)(& ct_req->req.rhba.hba_identifier), size); ql_dump_buffer(268566528U, vha, 8310, entries, size); rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8240, "RHBA issue IOCB failed (%d).\n", rval); } else { tmp___16 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA"); if (tmp___16 != 0) { rval = 258; if ((unsigned int )ct_rsp->header.reason_code == 9U && (unsigned int )ct_rsp->header.explanation_code == 16U) { ql_dbg(268435456U, vha, 8244, "HBA already registered.\n"); rval = 265; } else { } } else { ql_dbg(268435456U, vha, 8245, "RHBA exiting normally.\n"); } } return (rval); } } static int qla2x00_fdmi_dhba(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *tmp ; size_t __len ; void *__ret ; int tmp___0 ; { ha = vha->hw; tmp = (*((ha->isp_ops)->prep_ms_fdmi_iocb))(vha, 24U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, 768, 16); ct_rsp = & (ha->ct_sns)->p.rsp; __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& ct_req->req.dhba.port_name), (void const *)(& vha->port_name), __len); } else { __ret = __builtin_memcpy((void *)(& ct_req->req.dhba.port_name), (void const *)(& vha->port_name), __len); } ql_dbg(268435456U, vha, 8246, "DHBA portname = %8phN.\n", (uint8_t *)(& ct_req->req.dhba.port_name)); rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8247, "DHBA issue IOCB failed (%d).\n", rval); } else { tmp___0 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA"); if (tmp___0 != 0) { rval = 258; } else { ql_dbg(268435456U, vha, 8248, "DHBA exiting normally.\n"); } } return (rval); } } static int qla2x00_fdmi_rpa(scsi_qla_host_t *vha ) { int rval ; int alen ; uint32_t size ; uint32_t max_frame_size ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; uint8_t *entries ; struct ct_fdmi_port_attr *eiter ; struct init_cb_24xx *icb24 ; void *tmp ; size_t __len ; void *__ret ; __u32 tmp___0 ; size_t tmp___1 ; __u16 tmp___2 ; size_t tmp___3 ; __u16 tmp___4 ; size_t tmp___5 ; int tmp___6 ; { ha = vha->hw; icb24 = (struct init_cb_24xx *)ha->init_cb; tmp = (*((ha->isp_ops)->prep_ms_fdmi_iocb))(vha, 0U, 16U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, 529, 16); ct_rsp = & (ha->ct_sns)->p.rsp; __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& ct_req->req.rpa.port_name), (void const *)(& vha->port_name), __len); } else { __ret = __builtin_memcpy((void *)(& ct_req->req.rpa.port_name), (void const *)(& vha->port_name), __len); } size = 12U; ct_req->req.rpa.attrs.count = 83886080U; entries = (uint8_t *)(& ct_req->req.rpa.port_name); eiter = (struct ct_fdmi_port_attr *)entries + (unsigned long )size; eiter->type = 256U; eiter->len = 9216U; eiter->a.fc4_types[2] = 1U; size = size + 36U; ql_dbg(268435456U, vha, 8249, "FC4_TYPES=%02x %02x.\n", (int )eiter->a.fc4_types[2], (int )eiter->a.fc4_types[1]); eiter = (struct ct_fdmi_port_attr *)entries + (unsigned long )size; eiter->type = 512U; eiter->len = 2048U; if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { eiter->a.sup_speed = 67108864U; } else if ((ha->device_type & 2048U) != 0U) { eiter->a.sup_speed = 452984832U; } else if ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) { eiter->a.sup_speed = 184549376U; } else if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { eiter->a.sup_speed = 50331648U; } else { eiter->a.sup_speed = 16777216U; } size = size + 8U; ql_dbg(268435456U, vha, 8250, "Supported_Speed=%x.\n", eiter->a.sup_speed); eiter = (struct ct_fdmi_port_attr *)entries + (unsigned long )size; eiter->type = 768U; eiter->len = 2048U; switch ((int )ha->link_data_rate) { case 0: eiter->a.cur_speed = 16777216U; goto ldv_61034; case 1: eiter->a.cur_speed = 33554432U; goto ldv_61034; case 3: eiter->a.cur_speed = 134217728U; goto ldv_61034; case 4: eiter->a.cur_speed = 268435456U; goto ldv_61034; case 19: eiter->a.cur_speed = 67108864U; goto ldv_61034; case 5: eiter->a.cur_speed = 536870912U; goto ldv_61034; default: eiter->a.cur_speed = 8388608U; goto ldv_61034; } ldv_61034: size = size + 8U; ql_dbg(268435456U, vha, 8251, "Current_Speed=%x.\n", eiter->a.cur_speed); eiter = (struct ct_fdmi_port_attr *)entries + (unsigned long )size; eiter->type = 1024U; eiter->len = 2048U; max_frame_size = (uint32_t )((ha->device_type & 134217728U) != 0U ? icb24->frame_payload_size : (ha->init_cb)->frame_payload_size); tmp___0 = __fswab32(max_frame_size); eiter->a.max_frame_size = tmp___0; size = size + 8U; ql_dbg(268435456U, vha, 8252, "Max_Frame_Size=%x.\n", eiter->a.max_frame_size); eiter = (struct ct_fdmi_port_attr *)entries + (unsigned long )size; eiter->type = 1280U; tmp___1 = strlen("qla2xxx"); alen = (int )tmp___1; strncpy((char *)(& eiter->a.os_dev_name), "qla2xxx", (__kernel_size_t )(alen + 1)); alen = ((alen & 3) != 0 ? 4 - (alen & 3) : 4) + alen; tmp___2 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___2; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8267, "OS_Device_Name=%s.\n", (uint8_t *)(& eiter->a.os_dev_name)); tmp___5 = strlen((char const *)(& ((struct fc_host_attrs *)(vha->host)->shost_data)->system_hostname)); if (tmp___5 != 0UL) { ct_req->req.rpa.attrs.count = 100663296U; eiter = (struct ct_fdmi_port_attr *)entries + (unsigned long )size; eiter->type = 1536U; snprintf((char *)(& eiter->a.host_name), 32UL, "%s", (char *)(& ((struct fc_host_attrs *)(vha->host)->shost_data)->system_hostname)); tmp___3 = strlen((char const *)(& eiter->a.host_name)); alen = (int )tmp___3; alen = ((alen & 3) != 0 ? 4 - (alen & 3) : 4) + alen; tmp___4 = __fswab16((int )((unsigned int )((__u16 )alen) + 4U)); eiter->len = tmp___4; size = ((uint32_t )alen + size) + 4U; ql_dbg(268435456U, vha, 8253, "HostName=%s.\n", (uint8_t *)(& eiter->a.host_name)); } else { } qla2x00_update_ms_fdmi_iocb(vha, size + 16U); ql_dbg(268435456U, vha, 8254, "RPA portname= %8phN size=%d.\n", (uint8_t *)(& ct_req->req.rpa.port_name), size); ql_dump_buffer(268566528U, vha, 8313, entries, size); rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8256, "RPA issue IOCB failed (%d).\n", rval); } else { tmp___6 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA"); if (tmp___6 != 0) { rval = 258; } else { ql_dbg(268435456U, vha, 8257, "RPA exiting nornally.\n"); } } return (rval); } } int qla2x00_fdmi_register(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; { ha = vha->hw; if (((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) || (ha->device_type & 131072U) != 0U) { return (258); } else { } rval = qla2x00_mgmt_svr_login(vha); if (rval != 0) { return (rval); } else { } rval = qla2x00_fdmi_rhba(vha); if (rval != 0) { if (rval != 265) { return (rval); } else { } rval = qla2x00_fdmi_dhba(vha); if (rval != 0) { return (rval); } else { } rval = qla2x00_fdmi_rhba(vha); if (rval != 0) { return (rval); } else { } } else { } rval = qla2x00_fdmi_rpa(vha); return (rval); } } int qla2x00_gfpn_id(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; uint16_t i ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *tmp ; size_t __len ; void *__ret ; int tmp___0 ; { rval = 0; ha = vha->hw; if ((ha->device_type & 67108864U) == 0U) { return (258); } else { } i = 0U; goto ldv_61061; ldv_61060: tmp = (*((ha->isp_ops)->prep_ms_iocb))(vha, 20U, 24U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 284, 24); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.port_id.port_id[0] = (list + (unsigned long )i)->d_id.b.domain; ct_req->req.port_id.port_id[1] = (list + (unsigned long )i)->d_id.b.area; ct_req->req.port_id.port_id[2] = (list + (unsigned long )i)->d_id.b.al_pa; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8227, "GFPN_ID issue IOCB failed (%d).\n", rval); goto ldv_61056; } else { tmp___0 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GFPN_ID"); if (tmp___0 != 0) { rval = 258; goto ldv_61056; } else { __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& (list + (unsigned long )i)->fabric_port_name), (void const *)(& ct_rsp->rsp.gfpn_id.port_name), __len); } else { __ret = __builtin_memcpy((void *)(& (list + (unsigned long )i)->fabric_port_name), (void const *)(& ct_rsp->rsp.gfpn_id.port_name), __len); } } } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_61056; } else { } i = (uint16_t )((int )i + 1); ldv_61061: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_61060; } else { } ldv_61056: ; return (rval); } } __inline static void *qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha , uint32_t req_size , uint32_t rsp_size ) { struct ct_entry_24xx *ct_pkt ; struct qla_hw_data *ha ; { ha = vha->hw; ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; memset((void *)ct_pkt, 0, 64UL); ct_pkt->entry_type = 41U; ct_pkt->entry_count = 1U; ct_pkt->nport_handle = vha->mgmt_svr_loop_id; ct_pkt->timeout = ((unsigned int )ha->r_a_tov / 10U) * 2U; ct_pkt->cmd_dsd_count = 1U; ct_pkt->rsp_dsd_count = 1U; ct_pkt->rsp_byte_count = rsp_size; ct_pkt->cmd_byte_count = req_size; ct_pkt->dseg_0_address[0] = (unsigned int )ha->ct_sns_dma; ct_pkt->dseg_0_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; ct_pkt->dseg_1_address[0] = (unsigned int )ha->ct_sns_dma; ct_pkt->dseg_1_address[1] = (unsigned int )(ha->ct_sns_dma >> 32ULL); ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; ct_pkt->vp_index = (uint8_t )vha->vp_idx; return ((void *)ct_pkt); } } __inline static struct ct_sns_req *qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p , uint16_t cmd , uint16_t rsp_size ) { __u16 tmp ; __u16 tmp___0 ; { memset((void *)p, 0, 8208UL); p->p.req.header.revision = 1U; p->p.req.header.gs_type = 250U; p->p.req.header.gs_subtype = 1U; tmp = __fswab16((int )cmd); p->p.req.command = tmp; tmp___0 = __fswab16((int )((__u16 )(((int )rsp_size + -16) / 4))); p->p.req.max_rsp_size = tmp___0; return (& p->p.req); } } int qla2x00_gpsc(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; uint16_t i ; struct qla_hw_data *ha ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; void *tmp ; size_t __len ; void *__ret ; __u16 tmp___0 ; __u16 tmp___1 ; __u16 tmp___2 ; { ha = vha->hw; if ((ha->device_type & 67108864U) == 0U) { return (258); } else { } if (*((unsigned long *)ha + 2UL) == 0UL) { return (258); } else { } rval = qla2x00_mgmt_svr_login(vha); if (rval != 0) { return (rval); } else { } i = 0U; goto ldv_61096; ldv_61095: tmp = qla24xx_prep_ms_fm_iocb(vha, 24U, 20U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, 295, 20); ct_rsp = & (ha->ct_sns)->p.rsp; __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& ct_req->req.gpsc.port_name), (void const *)(& (list + (unsigned long )i)->fabric_port_name), __len); } else { __ret = __builtin_memcpy((void *)(& ct_req->req.gpsc.port_name), (void const *)(& (list + (unsigned long )i)->fabric_port_name), __len); } rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8281, "GPSC issue IOCB failed (%d).\n", rval); } else { rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GPSC"); if (rval != 0) { if (rval == 1 && ((unsigned int )ct_rsp->header.reason_code == 1U || (unsigned int )ct_rsp->header.reason_code == 11U)) { ql_dbg(268435456U, vha, 8282, "GPSC command unsupported, disabling query.\n"); ha->flags.gpsc_supported = 0U; rval = 258; goto ldv_61087; } else { } rval = 258; } else { tmp___0 = __fswab16((int )ct_rsp->rsp.gpsc.speed); switch ((int )tmp___0) { case 32768: (list + (unsigned long )i)->fp_speed = 0U; goto ldv_61089; case 16384: (list + (unsigned long )i)->fp_speed = 1U; goto ldv_61089; case 8192: (list + (unsigned long )i)->fp_speed = 3U; goto ldv_61089; case 4096: (list + (unsigned long )i)->fp_speed = 19U; goto ldv_61089; case 2048: (list + (unsigned long )i)->fp_speed = 4U; goto ldv_61089; case 1024: (list + (unsigned long )i)->fp_speed = 5U; goto ldv_61089; } ldv_61089: tmp___1 = __fswab16((int )ct_rsp->rsp.gpsc.speed); tmp___2 = __fswab16((int )ct_rsp->rsp.gpsc.speeds); ql_dbg(268435456U, vha, 8283, "GPSC ext entry - fpn %8phN speeds=%04x speed=%04x.\n", (uint8_t *)(& (list + (unsigned long )i)->fabric_port_name), (int )tmp___2, (int )tmp___1); } } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_61087; } else { } i = (uint16_t )((int )i + 1); ldv_61096: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_61095; } else { } ldv_61087: ; return (rval); } } void qla2x00_gff_id(scsi_qla_host_t *vha , sw_info_t *list ) { int rval ; uint16_t i ; ms_iocb_entry_t *ms_pkt ; struct ct_sns_req *ct_req ; struct ct_sns_rsp *ct_rsp ; struct qla_hw_data *ha ; uint8_t fcp_scsi_features ; void *tmp ; int tmp___0 ; { ha = vha->hw; fcp_scsi_features = 0U; i = 0U; goto ldv_61111; ldv_61110: (list + (unsigned long )i)->fc4_type = 255U; if ((ha->device_type & 134217728U) == 0U) { goto ldv_61108; } else { } tmp = (*((ha->isp_ops)->prep_ms_iocb))(vha, 20U, 144U); ms_pkt = (ms_iocb_entry_t *)tmp; ct_req = qla2x00_prep_ct_req(ha->ct_sns, 287, 144); ct_rsp = & (ha->ct_sns)->p.rsp; ct_req->req.port_id.port_id[0] = (list + (unsigned long )i)->d_id.b.domain; ct_req->req.port_id.port_id[1] = (list + (unsigned long )i)->d_id.b.area; ct_req->req.port_id.port_id[2] = (list + (unsigned long )i)->d_id.b.al_pa; rval = qla2x00_issue_iocb(vha, (void *)ha->ms_iocb, ha->ms_iocb_dma, 64UL); if (rval != 0) { ql_dbg(268435456U, vha, 8284, "GFF_ID issue IOCB failed (%d).\n", rval); } else { tmp___0 = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GFF_ID"); if (tmp___0 != 0) { ql_dbg(268435456U, vha, 8285, "GFF_ID IOCB status had a failure status code.\n"); } else { fcp_scsi_features = ct_rsp->rsp.gff_id.fc4_features[7]; fcp_scsi_features = (unsigned int )fcp_scsi_features & 15U; if ((unsigned int )fcp_scsi_features != 0U) { (list + (unsigned long )i)->fc4_type = 8U; } else { (list + (unsigned long )i)->fc4_type = 0U; } } } if ((unsigned int )(list + (unsigned long )i)->d_id.b.rsvd_1 != 0U) { goto ldv_61109; } else { } ldv_61108: i = (uint16_t )((int )i + 1); ldv_61111: ; if ((int )ha->max_fibre_devices > (int )i) { goto ldv_61110; } else { } ldv_61109: ; return; } } int reg_timer_7(struct timer_list *timer ) { { ldv_timer_list_7 = timer; ldv_timer_state_7 = 1; return (0); } } void disable_suitable_timer_7(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_7) { ldv_timer_state_7 = 0; return; } else { } return; } } void activate_pending_timer_7(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_7 == (unsigned long )timer) { if (ldv_timer_state_7 == 2 || pending_flag != 0) { ldv_timer_list_7 = timer; ldv_timer_list_7->data = data; ldv_timer_state_7 = 1; } else { } return; } else { } reg_timer_7(timer); ldv_timer_list_7->data = data; return; } } void choose_timer_7(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_7 = 2; return; } } int ldv_del_timer_35(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_36(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } void ldv__builtin_va_end(__builtin_va_list ) ; void ldv__builtin_va_start(__builtin_va_list ) ; extern int printk(char const * , ...) ; int ldv_del_timer_39(struct timer_list *ldv_func_arg1 ) ; void disable_suitable_timer_8(struct timer_list *timer ) ; void activate_pending_timer_8(struct timer_list *timer , unsigned long data , int pending_flag ) ; void choose_timer_8(struct timer_list *timer ) ; int reg_timer_8(struct timer_list *timer ) ; int ldv_scsi_add_host_with_dma_40(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; void qla2xxx_dump_post_process(scsi_qla_host_t *vha , int rval ) ; static uint32_t ql_dbg_offset = 2048U; __inline static void qla2xxx_prep_dump(struct qla_hw_data *ha , struct qla2xxx_fw_dump *fw_dump ) { __u32 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; __u32 tmp___5 ; __u32 tmp___6 ; { tmp = __fswab32((__u32 )ha->fw_major_version); fw_dump->fw_major_version = tmp; tmp___0 = __fswab32((__u32 )ha->fw_minor_version); fw_dump->fw_minor_version = tmp___0; tmp___1 = __fswab32((__u32 )ha->fw_subminor_version); fw_dump->fw_subminor_version = tmp___1; tmp___2 = __fswab32((__u32 )ha->fw_attributes); fw_dump->fw_attributes = tmp___2; tmp___3 = __fswab32((__u32 )(ha->pdev)->vendor); fw_dump->vendor = tmp___3; tmp___4 = __fswab32((__u32 )(ha->pdev)->device); fw_dump->device = tmp___4; tmp___5 = __fswab32((__u32 )(ha->pdev)->subsystem_vendor); fw_dump->subsystem_vendor = tmp___5; tmp___6 = __fswab32((__u32 )(ha->pdev)->subsystem_device); fw_dump->subsystem_device = tmp___6; return; } } __inline static void *qla2xxx_copy_queues(struct qla_hw_data *ha , void *ptr ) { struct req_que *req ; struct rsp_que *rsp ; size_t __len ; void *__ret ; size_t __len___0 ; void *__ret___0 ; { req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); __len = (unsigned long )req->length * 64UL; __ret = __builtin_memcpy(ptr, (void const *)req->ring, __len); ptr = ptr + (unsigned long )req->length * 64UL; __len___0 = (unsigned long )rsp->length * 64UL; __ret___0 = __builtin_memcpy(ptr, (void const *)rsp->ring, __len___0); return (ptr + (unsigned long )rsp->length * 64UL); } } static int qla24xx_dump_ram(struct qla_hw_data *ha , uint32_t addr , uint32_t *ram , uint32_t ram_dwords , void **nxt ) { int rval ; uint32_t cnt ; uint32_t stat ; uint32_t timer ; uint32_t dwords ; uint32_t idx ; uint16_t mb0 ; struct device_reg_24xx *reg ; dma_addr_t dump_dma ; uint32_t *dump ; int tmp ; __u32 tmp___0 ; int tmp___1 ; { reg = & (ha->iobase)->isp24; dump_dma = ha->gid_list_dma; dump = (uint32_t *)ha->gid_list; rval = 0; mb0 = 0U; writew(12, (void volatile *)(& reg->mailbox0)); clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); tmp = qla2x00_gid_list_size(ha); dwords = (uint32_t )(tmp / 4); cnt = 0U; goto ldv_43494; ldv_43493: ; if (cnt + dwords > ram_dwords) { dwords = ram_dwords - cnt; } else { } writew((int )((unsigned short )addr), (void volatile *)(& reg->mailbox1)); writew((int )((unsigned short )(addr >> 16)), (void volatile *)(& reg->mailbox8)); writew((int )((unsigned short )((unsigned int )dump_dma >> 16)), (void volatile *)(& reg->mailbox2)); writew((int )((unsigned short )dump_dma), (void volatile *)(& reg->mailbox3)); writew((int )((unsigned short )((unsigned int )(dump_dma >> 32ULL) >> 16)), (void volatile *)(& reg->mailbox6)); writew((int )((unsigned short )(dump_dma >> 32ULL)), (void volatile *)(& reg->mailbox7)); writew((int )((unsigned short )(dwords >> 16)), (void volatile *)(& reg->mailbox4)); writew((int )((unsigned short )dwords), (void volatile *)(& reg->mailbox5)); writel(1342177280U, (void volatile *)(& reg->hccr)); timer = 6000000U; goto ldv_43489; ldv_43488: stat = readl((void const volatile *)(& reg->host_status)); if ((stat & 32768U) != 0U) { stat = stat & 255U; if (((stat == 1U || stat == 2U) || stat == 16U) || stat == 17U) { set_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); mb0 = readw((void const volatile *)(& reg->mailbox0)); writel(2684354560U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); goto ldv_43487; } else { } writel(2684354560U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); } else { } __const_udelay(21475UL); timer = timer - 1U; ldv_43489: ; if (timer != 0U) { goto ldv_43488; } else { } ldv_43487: tmp___1 = test_and_clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (tmp___1 != 0) { rval = (int )mb0 & 16383; idx = 0U; goto ldv_43491; ldv_43490: tmp___0 = __fswab32(*(dump + (unsigned long )idx)); *(ram + (unsigned long )(cnt + idx)) = tmp___0; idx = idx + 1U; ldv_43491: ; if (idx < dwords) { goto ldv_43490; } else { } } else { rval = 258; } cnt = cnt + dwords; addr = addr + dwords; ldv_43494: ; if (cnt < ram_dwords && rval == 0) { goto ldv_43493; } else { } *nxt = rval == 0 ? (void *)ram + (unsigned long )cnt : (void *)0; return (rval); } } static int qla24xx_dump_memory(struct qla_hw_data *ha , uint32_t *code_ram , uint32_t cram_size , void **nxt ) { int rval ; int tmp ; { rval = qla24xx_dump_ram(ha, 131072U, code_ram, cram_size / 4U, nxt); if (rval != 0) { return (rval); } else { } tmp = qla24xx_dump_ram(ha, 1048576U, (uint32_t *)*nxt, ha->fw_memory_size - 1048575U, nxt); return (tmp); } } static uint32_t *qla24xx_read_window(struct device_reg_24xx *reg , uint32_t iobase , uint32_t count , uint32_t *buf ) { uint32_t *dmp_reg ; uint32_t *tmp ; uint32_t *tmp___0 ; unsigned int tmp___1 ; __u32 tmp___2 ; uint32_t tmp___3 ; { writel(iobase, (void volatile *)(& reg->iobase_addr)); dmp_reg = & reg->iobase_window; goto ldv_43511; ldv_43510: tmp = buf; buf = buf + 1; tmp___0 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___1 = readl((void const volatile *)tmp___0); tmp___2 = __fswab32(tmp___1); *tmp = tmp___2; ldv_43511: tmp___3 = count; count = count - 1U; if (tmp___3 != 0U) { goto ldv_43510; } else { } return (buf); } } __inline static int qla24xx_pause_risc(struct device_reg_24xx *reg ) { int rval ; uint32_t cnt ; unsigned int tmp ; { rval = 0; writel(805306368U, (void volatile *)(& reg->hccr)); cnt = 30000U; goto ldv_43519; ldv_43518: ; if (cnt != 0U) { __const_udelay(429500UL); } else { rval = 256; } cnt = cnt - 1U; ldv_43519: tmp = readl((void const volatile *)(& reg->host_status)); if ((tmp & 256U) == 0U && rval == 0) { goto ldv_43518; } else { } return (rval); } } static int qla24xx_soft_reset(struct qla_hw_data *ha ) { int rval ; uint32_t cnt ; uint16_t mb0 ; uint16_t wd ; struct device_reg_24xx *reg ; unsigned int tmp ; unsigned short tmp___0 ; unsigned short tmp___1 ; unsigned int tmp___2 ; unsigned short tmp___3 ; { rval = 0; reg = & (ha->iobase)->isp24; writel(65584U, (void volatile *)(& reg->ctrl_status)); cnt = 0U; goto ldv_43531; ldv_43530: tmp = readl((void const volatile *)(& reg->ctrl_status)); if ((tmp & 131072U) == 0U) { goto ldv_43529; } else { } __const_udelay(42950UL); cnt = cnt + 1U; ldv_43531: ; if (cnt <= 29999U) { goto ldv_43530; } else { } ldv_43529: writel(65585U, (void volatile *)(& reg->ctrl_status)); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & wd); __const_udelay(429500UL); tmp___0 = readw((void const volatile *)(& reg->mailbox0)); mb0 = tmp___0; cnt = 10000U; goto ldv_43533; ldv_43532: __const_udelay(21475UL); tmp___1 = readw((void const volatile *)(& reg->mailbox0)); mb0 = tmp___1; __asm__ volatile ("": : : "memory"); cnt = cnt - 1U; ldv_43533: ; if (cnt != 0U && (unsigned int )mb0 != 0U) { goto ldv_43532; } else { } cnt = 0U; goto ldv_43537; ldv_43536: tmp___2 = readl((void const volatile *)(& reg->ctrl_status)); if ((tmp___2 & 1U) == 0U) { goto ldv_43535; } else { } __const_udelay(42950UL); cnt = cnt + 1U; ldv_43537: ; if (cnt <= 29999U) { goto ldv_43536; } else { } ldv_43535: writel(536870912U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); cnt = 30000U; goto ldv_43539; ldv_43538: ; if (cnt != 0U) { __const_udelay(429500UL); } else { rval = 256; } cnt = cnt - 1U; ldv_43539: tmp___3 = readw((void const volatile *)(& reg->mailbox0)); if ((unsigned int )tmp___3 != 0U && rval == 0) { goto ldv_43538; } else { } return (rval); } } static int qla2xxx_dump_ram(struct qla_hw_data *ha , uint32_t addr , uint16_t *ram , uint32_t ram_words , void **nxt ) { int rval ; uint32_t cnt ; uint32_t stat ; uint32_t timer ; uint32_t words ; uint32_t idx ; uint16_t mb0 ; struct device_reg_2xxx *reg ; dma_addr_t dump_dma ; uint16_t *dump ; int tmp ; __u16 tmp___0 ; int tmp___1 ; { reg = & (ha->iobase)->isp; dump_dma = ha->gid_list_dma; dump = (uint16_t *)ha->gid_list; rval = 0; mb0 = 0U; writew(12, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); tmp = qla2x00_gid_list_size(ha); words = (uint32_t )(tmp / 2); cnt = 0U; goto ldv_43565; ldv_43564: ; if (cnt + words > ram_words) { words = ram_words - cnt; } else { } writew((int )((unsigned short )addr), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 1U : (void volatile *)(& reg->u.isp2300.mailbox0) + 1U); writew((int )((unsigned short )(addr >> 16)), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u_end.isp2200.mailbox8) : (void volatile *)(& reg->u.isp2300.mailbox0) + 8U); writew((int )((unsigned short )((unsigned int )dump_dma >> 16)), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 2U : (void volatile *)(& reg->u.isp2300.mailbox0) + 2U); writew((int )((unsigned short )dump_dma), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 3U : (void volatile *)(& reg->u.isp2300.mailbox0) + 3U); writew((int )((unsigned short )((unsigned int )(dump_dma >> 32ULL) >> 16)), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 6U : (void volatile *)(& reg->u.isp2300.mailbox0) + 6U); writew((int )((unsigned short )(dump_dma >> 32ULL)), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 7U : (void volatile *)(& reg->u.isp2300.mailbox0) + 7U); writew((int )((unsigned short )words), (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 4U : (void volatile *)(& reg->u.isp2300.mailbox0) + 4U); writew(20480, (void volatile *)(& reg->hccr)); timer = 6000000U; goto ldv_43560; ldv_43559: stat = readl((void const volatile *)(& reg->u.isp2300.host_status)); if ((stat & 32768U) != 0U) { stat = stat & 255U; if (stat == 1U || stat == 2U) { set_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); mb0 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); writew(0, (void volatile *)(& reg->semaphore)); writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); goto ldv_43558; } else if (stat == 16U || stat == 17U) { set_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); mb0 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); goto ldv_43558; } else { } writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); } else { } __const_udelay(21475UL); timer = timer - 1U; ldv_43560: ; if (timer != 0U) { goto ldv_43559; } else { } ldv_43558: tmp___1 = test_and_clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (tmp___1 != 0) { rval = (int )mb0 & 16383; idx = 0U; goto ldv_43562; ldv_43561: tmp___0 = __fswab16((int )*(dump + (unsigned long )idx)); *(ram + (unsigned long )(cnt + idx)) = tmp___0; idx = idx + 1U; ldv_43562: ; if (idx < words) { goto ldv_43561; } else { } } else { rval = 258; } cnt = cnt + words; addr = addr + words; ldv_43565: ; if (cnt < ram_words && rval == 0) { goto ldv_43564; } else { } *nxt = rval == 0 ? (void *)ram + (unsigned long )cnt : (void *)0; return (rval); } } __inline static void qla2xxx_read_window(struct device_reg_2xxx *reg , uint32_t count , uint16_t *buf ) { uint16_t *dmp_reg ; uint16_t *tmp ; uint16_t *tmp___0 ; unsigned short tmp___1 ; __u16 tmp___2 ; uint32_t tmp___3 ; { dmp_reg = & reg->u.isp2300.fb_cmd; goto ldv_43574; ldv_43573: tmp = buf; buf = buf + 1; tmp___0 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___1 = readw((void const volatile *)tmp___0); tmp___2 = __fswab16((int )tmp___1); *tmp = tmp___2; ldv_43574: tmp___3 = count; count = count - 1U; if (tmp___3 != 0U) { goto ldv_43573; } else { } return; } } __inline static void *qla24xx_copy_eft(struct qla_hw_data *ha , void *ptr ) { size_t __len ; __u32 tmp ; void *__ret ; __u32 tmp___0 ; { if ((unsigned long )ha->eft == (unsigned long )((void *)0)) { return (ptr); } else { } tmp = __fswab32((ha->fw_dump)->eft_size); __len = (size_t )tmp; __ret = __builtin_memcpy(ptr, (void const *)ha->eft, __len); tmp___0 = __fswab32((ha->fw_dump)->eft_size); return (ptr + (unsigned long )tmp___0); } } __inline static void *qla25xx_copy_fce(struct qla_hw_data *ha , void *ptr , uint32_t **last_chain ) { uint32_t cnt ; uint32_t *iter_reg ; struct qla2xxx_fce_chain *fcec ; __u32 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; uint32_t *tmp___3 ; __u32 tmp___4 ; size_t __len ; __u32 tmp___5 ; void *__ret ; __u32 tmp___6 ; { fcec = (struct qla2xxx_fce_chain *)ptr; if ((unsigned long )ha->fce == (unsigned long )((void *)0)) { return (ptr); } else { } *last_chain = & fcec->type; fcec->type = 4042981247U; tmp = __fswab32(ha->fce_bufs * 1024U + 52U); fcec->chain_size = tmp; tmp___0 = __fswab32(ha->fce_bufs * 1024U); fcec->size = tmp___0; tmp___1 = __fswab32((unsigned int )ha->fce_dma); fcec->addr_l = tmp___1; tmp___2 = __fswab32((unsigned int )(ha->fce_dma >> 32ULL)); fcec->addr_h = tmp___2; iter_reg = (uint32_t *)(& fcec->eregs); cnt = 0U; goto ldv_43592; ldv_43591: tmp___3 = iter_reg; iter_reg = iter_reg + 1; tmp___4 = __fswab32((__u32 )ha->fce_mb[cnt]); *tmp___3 = tmp___4; cnt = cnt + 1U; ldv_43592: ; if (cnt <= 7U) { goto ldv_43591; } else { } tmp___5 = __fswab32(fcec->size); __len = (size_t )tmp___5; __ret = __builtin_memcpy((void *)iter_reg, (void const *)ha->fce, __len); tmp___6 = __fswab32(fcec->size); return ((void *)iter_reg + (unsigned long )tmp___6); } } __inline static void *qla2xxx_copy_atioqueues(struct qla_hw_data *ha , void *ptr , uint32_t **last_chain ) { struct qla2xxx_mqueue_chain *q ; struct qla2xxx_mqueue_header *qh ; uint32_t num_queues ; int que ; struct __anonstruct_aq_308 aq ; struct __anonstruct_aqp_309 *aqp ; __u32 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; size_t __len ; void *__ret ; { if ((unsigned long )ha->tgt.atio_ring == (unsigned long )((struct atio *)0)) { return (ptr); } else { } num_queues = 1U; aqp = & aq; aqp->length = (int )ha->tgt.atio_q_length; aqp->ring = (void *)ha->tgt.atio_ring; que = 0; goto ldv_43615; ldv_43614: q = (struct qla2xxx_mqueue_chain *)ptr; *last_chain = & q->type; q->type = 4076535679U; tmp = __fswab32((__u32 )((unsigned long )aqp->length) * 64U + 20U); q->chain_size = tmp; ptr = ptr + 8UL; qh = (struct qla2xxx_mqueue_header *)ptr; qh->queue = 50331648U; tmp___0 = __fswab32((__u32 )que); qh->number = tmp___0; tmp___1 = __fswab32((__u32 )((unsigned long )aqp->length) * 64U); qh->size = tmp___1; ptr = ptr + 12UL; __len = (unsigned long )aqp->length * 64UL; __ret = __builtin_memcpy(ptr, (void const *)aqp->ring, __len); ptr = ptr + (unsigned long )aqp->length * 64UL; que = que + 1; ldv_43615: ; if ((uint32_t )que < num_queues) { goto ldv_43614; } else { } return (ptr); } } __inline static void *qla25xx_copy_mqueues(struct qla_hw_data *ha , void *ptr , uint32_t **last_chain ) { struct qla2xxx_mqueue_chain *q ; struct qla2xxx_mqueue_header *qh ; struct req_que *req ; struct rsp_que *rsp ; int que ; __u32 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; size_t __len ; void *__ret ; __u32 tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; size_t __len___0 ; void *__ret___0 ; { if ((unsigned int )ha->mqenable == 0U) { return (ptr); } else { } que = 1; goto ldv_43632; ldv_43631: req = *(ha->req_q_map + (unsigned long )que); if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { goto ldv_43627; } else { } q = (struct qla2xxx_mqueue_chain *)ptr; *last_chain = & q->type; q->type = 4076535679U; tmp = __fswab32((__u32 )req->length * 64U + 20U); q->chain_size = tmp; ptr = ptr + 8UL; qh = (struct qla2xxx_mqueue_header *)ptr; qh->queue = 16777216U; tmp___0 = __fswab32((__u32 )que); qh->number = tmp___0; tmp___1 = __fswab32((__u32 )req->length * 64U); qh->size = tmp___1; ptr = ptr + 12UL; __len = (unsigned long )req->length * 64UL; __ret = __builtin_memcpy(ptr, (void const *)req->ring, __len); ptr = ptr + (unsigned long )req->length * 64UL; que = que + 1; ldv_43632: ; if ((int )ha->max_req_queues > que) { goto ldv_43631; } else { } ldv_43627: que = 1; goto ldv_43638; ldv_43637: rsp = *(ha->rsp_q_map + (unsigned long )que); if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { goto ldv_43633; } else { } q = (struct qla2xxx_mqueue_chain *)ptr; *last_chain = & q->type; q->type = 4076535679U; tmp___2 = __fswab32((__u32 )rsp->length * 64U + 20U); q->chain_size = tmp___2; ptr = ptr + 8UL; qh = (struct qla2xxx_mqueue_header *)ptr; qh->queue = 33554432U; tmp___3 = __fswab32((__u32 )que); qh->number = tmp___3; tmp___4 = __fswab32((__u32 )rsp->length * 64U); qh->size = tmp___4; ptr = ptr + 12UL; __len___0 = (unsigned long )rsp->length * 64UL; __ret___0 = __builtin_memcpy(ptr, (void const *)rsp->ring, __len___0); ptr = ptr + (unsigned long )rsp->length * 64UL; que = que + 1; ldv_43638: ; if ((int )ha->max_rsp_queues > que) { goto ldv_43637; } else { } ldv_43633: ; return (ptr); } } __inline static void *qla25xx_copy_mq(struct qla_hw_data *ha , void *ptr , uint32_t **last_chain ) { uint32_t cnt ; uint32_t que_idx ; uint8_t que_cnt ; struct qla2xxx_mq_chain *mq ; device_reg_t *reg ; __u32 tmp ; unsigned int tmp___0 ; __u32 tmp___1 ; unsigned int tmp___2 ; __u32 tmp___3 ; unsigned int tmp___4 ; __u32 tmp___5 ; unsigned int tmp___6 ; __u32 tmp___7 ; { mq = (struct qla2xxx_mq_chain *)ptr; if ((unsigned int )ha->mqenable == 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { return (ptr); } else { } mq = (struct qla2xxx_mq_chain *)ptr; *last_chain = & mq->type; mq->type = 4059758463U; mq->chain_size = 201457664U; que_cnt = (uint8_t )((int )ha->max_req_queues > (int )ha->max_rsp_queues ? ha->max_req_queues : ha->max_rsp_queues); tmp = __fswab32((__u32 )que_cnt); mq->count = tmp; cnt = 0U; goto ldv_43650; ldv_43649: reg = (unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) ? ha->mqiobase + (unsigned long )(cnt * 4096U) : ha->iobase; que_idx = cnt * 4U; tmp___0 = readl((void const volatile *)(& reg->isp25mq.req_q_in)); tmp___1 = __fswab32(tmp___0); mq->qregs[que_idx] = tmp___1; tmp___2 = readl((void const volatile *)(& reg->isp25mq.req_q_out)); tmp___3 = __fswab32(tmp___2); mq->qregs[que_idx + 1U] = tmp___3; tmp___4 = readl((void const volatile *)(& reg->isp25mq.rsp_q_in)); tmp___5 = __fswab32(tmp___4); mq->qregs[que_idx + 2U] = tmp___5; tmp___6 = readl((void const volatile *)(& reg->isp25mq.rsp_q_out)); tmp___7 = __fswab32(tmp___6); mq->qregs[que_idx + 3U] = tmp___7; cnt = cnt + 1U; ldv_43650: ; if ((uint32_t )que_cnt > cnt) { goto ldv_43649; } else { } return (ptr + 524UL); } } void qla2xxx_dump_post_process(scsi_qla_host_t *vha , int rval ) { struct qla_hw_data *ha ; { ha = vha->hw; if (rval != 0) { ql_log(1U, vha, 53248, "Failed to dump firmware (%x).\n", rval); ha->fw_dumped = 0; } else { ql_log(2U, vha, 53249, "Firmware dump saved to temp buffer (%ld/%p).\n", vha->host_no, ha->fw_dump); ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, 0U); } return; } } void qla2300_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { int rval ; uint32_t cnt ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; uint16_t *dmp_reg ; unsigned long flags ; struct qla2300_fw_dump *fw ; void *nxt ; struct scsi_qla_host *base_vha ; void *tmp ; raw_spinlock_t *tmp___0 ; unsigned short tmp___1 ; __u16 tmp___2 ; unsigned short tmp___3 ; uint16_t *tmp___4 ; unsigned short tmp___5 ; __u16 tmp___6 ; uint16_t *tmp___7 ; unsigned short tmp___8 ; __u16 tmp___9 ; uint16_t *tmp___10 ; unsigned short tmp___11 ; __u16 tmp___12 ; uint16_t *tmp___13 ; unsigned short tmp___14 ; __u16 tmp___15 ; unsigned short tmp___16 ; unsigned short tmp___17 ; { ha = vha->hw; reg = & (ha->iobase)->isp; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; flags = 0UL; if (hardware_locked == 0) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 53250, "No buffer available for dump.\n"); goto qla2300_fw_dump_failed; } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 53251, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto qla2300_fw_dump_failed; } else { } fw = & (ha->fw_dump)->isp.isp23; qla2xxx_prep_dump(ha, ha->fw_dump); rval = 0; tmp___1 = readw((void const volatile *)(& reg->hccr)); tmp___2 = __fswab16((int )tmp___1); fw->hccr = tmp___2; writew(8192, (void volatile *)(& reg->hccr)); if ((ha->device_type & 4U) != 0U) { cnt = 30000U; goto ldv_43675; ldv_43674: ; if (cnt != 0U) { __const_udelay(429500UL); } else { rval = 256; } cnt = cnt - 1U; ldv_43675: tmp___3 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___3 & 32) == 0 && rval == 0) { goto ldv_43674; } else { } } else { readw((void const volatile *)(& reg->hccr)); __const_udelay(42950UL); } if (rval == 0) { dmp_reg = & reg->flash_address; cnt = 0U; goto ldv_43678; ldv_43677: tmp___4 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___5 = readw((void const volatile *)tmp___4); tmp___6 = __fswab16((int )tmp___5); fw->pbiu_reg[cnt] = tmp___6; cnt = cnt + 1U; ldv_43678: ; if (cnt <= 7U) { goto ldv_43677; } else { } dmp_reg = & reg->u.isp2300.req_q_in; cnt = 0U; goto ldv_43681; ldv_43680: tmp___7 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___8 = readw((void const volatile *)tmp___7); tmp___9 = __fswab16((int )tmp___8); fw->risc_host_reg[cnt] = tmp___9; cnt = cnt + 1U; ldv_43681: ; if (cnt <= 7U) { goto ldv_43680; } else { } dmp_reg = & reg->u.isp2300.mailbox0; cnt = 0U; goto ldv_43684; ldv_43683: tmp___10 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___11 = readw((void const volatile *)tmp___10); tmp___12 = __fswab16((int )tmp___11); fw->mailbox_reg[cnt] = tmp___12; cnt = cnt + 1U; ldv_43684: ; if (cnt <= 31U) { goto ldv_43683; } else { } writew(64, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 32U, (uint16_t *)(& fw->resp_dma_reg)); writew(80, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 48U, (uint16_t *)(& fw->dma_reg)); writew(0, (void volatile *)(& reg->ctrl_status)); dmp_reg = & reg->risc_hw; cnt = 0U; goto ldv_43687; ldv_43686: tmp___13 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___14 = readw((void const volatile *)tmp___13); tmp___15 = __fswab16((int )tmp___14); fw->risc_hdw_reg[cnt] = tmp___15; cnt = cnt + 1U; ldv_43687: ; if (cnt <= 15U) { goto ldv_43686; } else { } writew(8192, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp0_reg)); writew(8704, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp1_reg)); writew(9216, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp2_reg)); writew(9728, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp3_reg)); writew(10240, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp4_reg)); writew(10752, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp5_reg)); writew(11264, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp6_reg)); writew(11776, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp7_reg)); writew(16, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 64U, (uint16_t *)(& fw->frame_buf_hdw_reg)); writew(32, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 64U, (uint16_t *)(& fw->fpm_b0_reg)); writew(48, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 64U, (uint16_t *)(& fw->fpm_b1_reg)); writew(1, (void volatile *)(& reg->ctrl_status)); cnt = 0U; goto ldv_43691; ldv_43690: tmp___16 = readw((void const volatile *)(& reg->ctrl_status)); if (((int )tmp___16 & 1) == 0) { goto ldv_43689; } else { } __const_udelay(42950UL); cnt = cnt + 1U; ldv_43691: ; if (cnt <= 29999U) { goto ldv_43690; } else { } ldv_43689: ; } else { } if ((ha->device_type & 4U) == 0U) { cnt = 30000U; goto ldv_43693; ldv_43692: ; if (cnt != 0U) { __const_udelay(429500UL); } else { rval = 256; } cnt = cnt - 1U; ldv_43693: tmp___17 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); if ((unsigned int )tmp___17 != 0U && rval == 0) { goto ldv_43692; } else { } } else { } if (rval == 0) { rval = qla2xxx_dump_ram(ha, 2048U, (uint16_t *)(& fw->risc_ram), 63488U, & nxt); } else { } if (rval == 0) { rval = qla2xxx_dump_ram(ha, 65536U, (uint16_t *)(& fw->stack_ram), 4096U, & nxt); } else { } if (rval == 0) { rval = qla2xxx_dump_ram(ha, 69632U, (uint16_t *)(& fw->data_ram), ha->fw_memory_size - 69631U, & nxt); } else { } if (rval == 0) { qla2xxx_copy_queues(ha, nxt); } else { } qla2xxx_dump_post_process(base_vha, rval); qla2300_fw_dump_failed: ; if (hardware_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } return; } } void qla2100_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { int rval ; uint32_t cnt ; uint32_t timer ; uint16_t risc_address ; uint16_t mb0 ; uint16_t mb2 ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; uint16_t *dmp_reg ; unsigned long flags ; struct qla2100_fw_dump *fw ; struct scsi_qla_host *base_vha ; void *tmp ; raw_spinlock_t *tmp___0 ; unsigned short tmp___1 ; __u16 tmp___2 ; unsigned short tmp___3 ; uint16_t *tmp___4 ; unsigned short tmp___5 ; __u16 tmp___6 ; uint16_t *tmp___7 ; unsigned short tmp___8 ; __u16 tmp___9 ; uint16_t *tmp___10 ; unsigned short tmp___11 ; __u16 tmp___12 ; uint16_t *tmp___13 ; unsigned short tmp___14 ; __u16 tmp___15 ; unsigned short tmp___16 ; unsigned short tmp___17 ; unsigned short tmp___18 ; unsigned short tmp___19 ; unsigned short tmp___20 ; __u16 tmp___21 ; int tmp___22 ; { ha = vha->hw; reg = & (ha->iobase)->isp; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; risc_address = 0U; mb2 = 0U; mb0 = mb2; flags = 0UL; if (hardware_locked == 0) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 53252, "No buffer available for dump.\n"); goto qla2100_fw_dump_failed; } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 53253, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto qla2100_fw_dump_failed; } else { } fw = & (ha->fw_dump)->isp.isp21; qla2xxx_prep_dump(ha, ha->fw_dump); rval = 0; tmp___1 = readw((void const volatile *)(& reg->hccr)); tmp___2 = __fswab16((int )tmp___1); fw->hccr = tmp___2; writew(8192, (void volatile *)(& reg->hccr)); cnt = 30000U; goto ldv_43716; ldv_43715: ; if (cnt != 0U) { __const_udelay(429500UL); } else { rval = 256; } cnt = cnt - 1U; ldv_43716: tmp___3 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___3 & 32) == 0 && rval == 0) { goto ldv_43715; } else { } if (rval == 0) { dmp_reg = & reg->flash_address; cnt = 0U; goto ldv_43719; ldv_43718: tmp___4 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___5 = readw((void const volatile *)tmp___4); tmp___6 = __fswab16((int )tmp___5); fw->pbiu_reg[cnt] = tmp___6; cnt = cnt + 1U; ldv_43719: ; if (cnt <= 7U) { goto ldv_43718; } else { } dmp_reg = & reg->u.isp2100.mailbox0; cnt = 0U; goto ldv_43722; ldv_43721: ; if (cnt == 8U) { dmp_reg = & reg->u_end.isp2200.mailbox8; } else { } tmp___7 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___8 = readw((void const volatile *)tmp___7); tmp___9 = __fswab16((int )tmp___8); fw->mailbox_reg[cnt] = tmp___9; cnt = cnt + 1U; ldv_43722: ; if ((uint32_t )ha->mbx_count > cnt) { goto ldv_43721; } else { } dmp_reg = (uint16_t *)(& reg->u.isp2100.unused_2); cnt = 0U; goto ldv_43725; ldv_43724: tmp___10 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___11 = readw((void const volatile *)tmp___10); tmp___12 = __fswab16((int )tmp___11); fw->dma_reg[cnt] = tmp___12; cnt = cnt + 1U; ldv_43725: ; if (cnt <= 47U) { goto ldv_43724; } else { } writew(0, (void volatile *)(& reg->ctrl_status)); dmp_reg = & reg->risc_hw; cnt = 0U; goto ldv_43728; ldv_43727: tmp___13 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___14 = readw((void const volatile *)tmp___13); tmp___15 = __fswab16((int )tmp___14); fw->risc_hdw_reg[cnt] = tmp___15; cnt = cnt + 1U; ldv_43728: ; if (cnt <= 15U) { goto ldv_43727; } else { } writew(8192, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp0_reg)); writew(8448, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp1_reg)); writew(8704, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp2_reg)); writew(8960, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp3_reg)); writew(9216, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp4_reg)); writew(9472, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp5_reg)); writew(9728, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp6_reg)); writew(9984, (void volatile *)(& reg->pcr)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->risc_gp7_reg)); writew(16, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 16U, (uint16_t *)(& fw->frame_buf_hdw_reg)); writew(32, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 64U, (uint16_t *)(& fw->fpm_b0_reg)); writew(48, (void volatile *)(& reg->ctrl_status)); qla2xxx_read_window(reg, 64U, (uint16_t *)(& fw->fpm_b1_reg)); writew(1, (void volatile *)(& reg->ctrl_status)); } else { } cnt = 30000U; goto ldv_43731; ldv_43730: ; if (cnt != 0U) { __const_udelay(429500UL); } else { rval = 256; } cnt = cnt - 1U; ldv_43731: tmp___16 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); if ((unsigned int )tmp___16 != 0U && rval == 0) { goto ldv_43730; } else { } if (rval == 0) { if ((ha->device_type & 2U) != 0U) { goto _L; } else if ((int )ha->device_type & 1) { tmp___18 = readw((void const volatile *)(& reg->mctr)); if (((int )tmp___18 & 3) != 0) { _L: /* CIL Label */ writew(8192, (void volatile *)(& reg->hccr)); cnt = 30000U; goto ldv_43734; ldv_43733: ; if (cnt != 0U) { __const_udelay(429500UL); } else { rval = 256; } cnt = cnt - 1U; ldv_43734: tmp___17 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___17 & 32) == 0 && rval == 0) { goto ldv_43733; } else { } if (rval == 0) { if ((int )ha->device_type & 1) { writew(241, (void volatile *)(& reg->mctr)); } else { writew(242, (void volatile *)(& reg->mctr)); } readw((void const volatile *)(& reg->mctr)); writew(12288, (void volatile *)(& reg->hccr)); } else { } } else { } } else { } } else { } if (rval == 0) { risc_address = 4096U; writew(5, (void volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); } else { } cnt = 0U; goto ldv_43740; ldv_43739: writew((int )risc_address, (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void volatile *)(& reg->u.isp2100.mailbox0) + 1U : (void volatile *)(& reg->u.isp2300.mailbox0) + 1U); writew(20480, (void volatile *)(& reg->hccr)); timer = 6000000U; goto ldv_43738; ldv_43737: tmp___20 = readw((void const volatile *)(& reg->istatus)); if (((int )tmp___20 & 8) != 0) { tmp___19 = readw((void const volatile *)(& reg->semaphore)); if ((int )tmp___19 & 1) { set_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); mb0 = readw((void const volatile *)((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0)); mb2 = readw((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? (void const volatile *)(& reg->u.isp2100.mailbox0) + 2U : (void const volatile *)(& reg->u.isp2300.mailbox0) + 2U); writew(0, (void volatile *)(& reg->semaphore)); writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); goto ldv_43736; } else { } writew(28672, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); } else { } __const_udelay(21475UL); timer = timer - 1U; ldv_43738: ; if (timer != 0U) { goto ldv_43737; } else { } ldv_43736: tmp___22 = test_and_clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (tmp___22 != 0) { rval = (int )mb0 & 16383; tmp___21 = __fswab16((int )mb2); fw->risc_ram[cnt] = tmp___21; } else { rval = 258; } cnt = cnt + 1U; risc_address = (uint16_t )((int )risc_address + 1); ldv_43740: ; if (cnt <= 61439U && rval == 0) { goto ldv_43739; } else { } if (rval == 0) { qla2xxx_copy_queues(ha, (void *)(& fw->risc_ram) + (unsigned long )cnt); } else { } qla2xxx_dump_post_process(base_vha, rval); qla2100_fw_dump_failed: ; if (hardware_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } return; } } void qla24xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { int rval ; uint32_t cnt ; uint32_t risc_address ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; uint32_t *dmp_reg ; uint32_t *iter_reg ; uint16_t *mbx_reg ; unsigned long flags ; struct qla24xx_fw_dump *fw ; uint32_t ext_mem_cnt ; void *nxt ; void *nxt_chain ; uint32_t *last_chain ; struct scsi_qla_host *base_vha ; void *tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; __u32 tmp___2 ; uint32_t *tmp___3 ; unsigned int tmp___4 ; __u32 tmp___5 ; unsigned int tmp___6 ; __u32 tmp___7 ; unsigned int tmp___8 ; __u32 tmp___9 ; unsigned int tmp___10 ; __u32 tmp___11 ; unsigned int tmp___12 ; __u32 tmp___13 ; unsigned int tmp___14 ; __u32 tmp___15 ; unsigned int tmp___16 ; __u32 tmp___17 ; unsigned int tmp___18 ; __u32 tmp___19 ; uint16_t *tmp___20 ; unsigned short tmp___21 ; __u16 tmp___22 ; uint32_t *tmp___23 ; uint32_t *tmp___24 ; unsigned int tmp___25 ; __u32 tmp___26 ; uint32_t *tmp___27 ; uint32_t *tmp___28 ; unsigned int tmp___29 ; __u32 tmp___30 ; uint32_t *tmp___31 ; uint32_t *tmp___32 ; unsigned int tmp___33 ; __u32 tmp___34 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; last_chain = (uint32_t *)0U; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return; } else { } ext_mem_cnt = 0U; risc_address = ext_mem_cnt; flags = 0UL; if (hardware_locked == 0) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 53254, "No buffer available for dump.\n"); goto qla24xx_fw_dump_failed; } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 53255, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto qla24xx_fw_dump_failed; } else { } fw = & (ha->fw_dump)->isp.isp24; qla2xxx_prep_dump(ha, ha->fw_dump); tmp___1 = readl((void const volatile *)(& reg->host_status)); tmp___2 = __fswab32(tmp___1); fw->host_status = tmp___2; rval = qla24xx_pause_risc(reg); if (rval != 0) { goto qla24xx_fw_dump_failed_0; } else { } dmp_reg = & reg->flash_addr; cnt = 0U; goto ldv_43767; ldv_43766: tmp___3 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___4 = readl((void const volatile *)tmp___3); tmp___5 = __fswab32(tmp___4); fw->host_reg[cnt] = tmp___5; cnt = cnt + 1U; ldv_43767: ; if (cnt <= 31U) { goto ldv_43766; } else { } writel(0U, (void volatile *)(& reg->ictrl)); readl((void const volatile *)(& reg->ictrl)); writel(3952U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(2952790016U, (void volatile *)(& reg->iobase_select)); tmp___6 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___7 = __fswab32(tmp___6); fw->shadow_reg[0] = tmp___7; writel(2953838592U, (void volatile *)(& reg->iobase_select)); tmp___8 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___9 = __fswab32(tmp___8); fw->shadow_reg[1] = tmp___9; writel(2954887168U, (void volatile *)(& reg->iobase_select)); tmp___10 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___11 = __fswab32(tmp___10); fw->shadow_reg[2] = tmp___11; writel(2955935744U, (void volatile *)(& reg->iobase_select)); tmp___12 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___13 = __fswab32(tmp___12); fw->shadow_reg[3] = tmp___13; writel(2956984320U, (void volatile *)(& reg->iobase_select)); tmp___14 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___15 = __fswab32(tmp___14); fw->shadow_reg[4] = tmp___15; writel(2958032896U, (void volatile *)(& reg->iobase_select)); tmp___16 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___17 = __fswab32(tmp___16); fw->shadow_reg[5] = tmp___17; writel(2959081472U, (void volatile *)(& reg->iobase_select)); tmp___18 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___19 = __fswab32(tmp___18); fw->shadow_reg[6] = tmp___19; mbx_reg = & reg->mailbox0; cnt = 0U; goto ldv_43770; ldv_43769: tmp___20 = mbx_reg; mbx_reg = mbx_reg + 1; tmp___21 = readw((void const volatile *)tmp___20); tmp___22 = __fswab16((int )tmp___21); fw->mailbox_reg[cnt] = tmp___22; cnt = cnt + 1U; ldv_43770: ; if (cnt <= 31U) { goto ldv_43769; } else { } iter_reg = (uint32_t *)(& fw->xseq_gp_reg); iter_reg = qla24xx_read_window(reg, 48896U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48928U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48960U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48992U, 16U, iter_reg); qla24xx_read_window(reg, 49008U, 16U, iter_reg); qla24xx_read_window(reg, 49120U, 16U, (uint32_t *)(& fw->xseq_0_reg)); qla24xx_read_window(reg, 49136U, 16U, (uint32_t *)(& fw->xseq_1_reg)); iter_reg = (uint32_t *)(& fw->rseq_gp_reg); iter_reg = qla24xx_read_window(reg, 65280U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65296U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65312U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65328U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65344U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65360U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65376U, 16U, iter_reg); qla24xx_read_window(reg, 65392U, 16U, iter_reg); qla24xx_read_window(reg, 65488U, 16U, (uint32_t *)(& fw->rseq_0_reg)); qla24xx_read_window(reg, 65504U, 16U, (uint32_t *)(& fw->rseq_1_reg)); qla24xx_read_window(reg, 65520U, 16U, (uint32_t *)(& fw->rseq_2_reg)); qla24xx_read_window(reg, 28928U, 16U, (uint32_t *)(& fw->cmd_dma_reg)); iter_reg = (uint32_t *)(& fw->req0_dma_reg); iter_reg = qla24xx_read_window(reg, 29184U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_43773; ldv_43772: tmp___23 = iter_reg; iter_reg = iter_reg + 1; tmp___24 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___25 = readl((void const volatile *)tmp___24); tmp___26 = __fswab32(tmp___25); *tmp___23 = tmp___26; cnt = cnt + 1U; ldv_43773: ; if (cnt <= 6U) { goto ldv_43772; } else { } iter_reg = (uint32_t *)(& fw->resp0_dma_reg); iter_reg = qla24xx_read_window(reg, 29440U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_43776; ldv_43775: tmp___27 = iter_reg; iter_reg = iter_reg + 1; tmp___28 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___29 = readl((void const volatile *)tmp___28); tmp___30 = __fswab32(tmp___29); *tmp___27 = tmp___30; cnt = cnt + 1U; ldv_43776: ; if (cnt <= 6U) { goto ldv_43775; } else { } iter_reg = (uint32_t *)(& fw->req1_dma_reg); iter_reg = qla24xx_read_window(reg, 29696U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_43779; ldv_43778: tmp___31 = iter_reg; iter_reg = iter_reg + 1; tmp___32 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___33 = readl((void const volatile *)tmp___32); tmp___34 = __fswab32(tmp___33); *tmp___31 = tmp___34; cnt = cnt + 1U; ldv_43779: ; if (cnt <= 6U) { goto ldv_43778; } else { } iter_reg = (uint32_t *)(& fw->xmt0_dma_reg); iter_reg = qla24xx_read_window(reg, 30208U, 16U, iter_reg); qla24xx_read_window(reg, 30224U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt1_dma_reg); iter_reg = qla24xx_read_window(reg, 30240U, 16U, iter_reg); qla24xx_read_window(reg, 30256U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt2_dma_reg); iter_reg = qla24xx_read_window(reg, 30272U, 16U, iter_reg); qla24xx_read_window(reg, 30288U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt3_dma_reg); iter_reg = qla24xx_read_window(reg, 30304U, 16U, iter_reg); qla24xx_read_window(reg, 30320U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt4_dma_reg); iter_reg = qla24xx_read_window(reg, 30336U, 16U, iter_reg); qla24xx_read_window(reg, 30352U, 16U, iter_reg); qla24xx_read_window(reg, 30368U, 16U, (uint32_t *)(& fw->xmt_data_dma_reg)); iter_reg = (uint32_t *)(& fw->rcvt0_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30464U, 16U, iter_reg); qla24xx_read_window(reg, 30480U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rcvt1_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30496U, 16U, iter_reg); qla24xx_read_window(reg, 30512U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->risc_gp_reg); iter_reg = qla24xx_read_window(reg, 3840U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3856U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3872U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3888U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3904U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3920U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3936U, 16U, iter_reg); qla24xx_read_window(reg, 3952U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->lmc_reg); iter_reg = qla24xx_read_window(reg, 12288U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12304U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12320U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12336U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12352U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12368U, 16U, iter_reg); qla24xx_read_window(reg, 12384U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fpm_hdw_reg); iter_reg = qla24xx_read_window(reg, 16384U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16400U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16416U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16432U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16448U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16464U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16480U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16496U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16512U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16528U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16544U, 16U, iter_reg); qla24xx_read_window(reg, 16560U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fb_hdw_reg); iter_reg = qla24xx_read_window(reg, 24576U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24592U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24608U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24624U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24640U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24832U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24880U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24976U, 16U, iter_reg); qla24xx_read_window(reg, 25008U, 16U, iter_reg); rval = qla24xx_soft_reset(ha); if (rval != 0) { goto qla24xx_fw_dump_failed_0; } else { } rval = qla24xx_dump_memory(ha, (uint32_t *)(& fw->code_ram), 32768U, & nxt); if (rval != 0) { goto qla24xx_fw_dump_failed_0; } else { } nxt = qla2xxx_copy_queues(ha, nxt); qla24xx_copy_eft(ha, nxt); nxt_chain = (void *)ha->fw_dump + (unsigned long )ha->chain_offset; nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, & last_chain); if ((unsigned long )last_chain != (unsigned long )((uint32_t *)0U)) { (ha->fw_dump)->version = (ha->fw_dump)->version | 128U; *last_chain = *last_chain | 128U; } else { } ha->fw_dump_len = (uint32_t )((long )nxt_chain) - (uint32_t )((long )ha->fw_dump); qla24xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla24xx_fw_dump_failed: ; if (hardware_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } return; } } void qla25xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { int rval ; uint32_t cnt ; uint32_t risc_address ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; uint32_t *dmp_reg ; uint32_t *iter_reg ; uint16_t *mbx_reg ; unsigned long flags ; struct qla25xx_fw_dump *fw ; uint32_t ext_mem_cnt ; void *nxt ; void *nxt_chain ; uint32_t *last_chain ; struct scsi_qla_host *base_vha ; void *tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; __u32 tmp___2 ; uint32_t *tmp___3 ; unsigned int tmp___4 ; __u32 tmp___5 ; uint32_t *tmp___6 ; unsigned int tmp___7 ; __u32 tmp___8 ; unsigned int tmp___9 ; __u32 tmp___10 ; unsigned int tmp___11 ; __u32 tmp___12 ; uint32_t *tmp___13 ; unsigned int tmp___14 ; __u32 tmp___15 ; unsigned int tmp___16 ; __u32 tmp___17 ; unsigned int tmp___18 ; __u32 tmp___19 ; unsigned int tmp___20 ; __u32 tmp___21 ; unsigned int tmp___22 ; __u32 tmp___23 ; unsigned int tmp___24 ; __u32 tmp___25 ; unsigned int tmp___26 ; __u32 tmp___27 ; unsigned int tmp___28 ; __u32 tmp___29 ; unsigned int tmp___30 ; __u32 tmp___31 ; unsigned int tmp___32 ; __u32 tmp___33 ; unsigned int tmp___34 ; __u32 tmp___35 ; unsigned int tmp___36 ; __u32 tmp___37 ; unsigned int tmp___38 ; __u32 tmp___39 ; uint16_t *tmp___40 ; unsigned short tmp___41 ; __u16 tmp___42 ; uint32_t *tmp___43 ; uint32_t *tmp___44 ; unsigned int tmp___45 ; __u32 tmp___46 ; uint32_t *tmp___47 ; uint32_t *tmp___48 ; unsigned int tmp___49 ; __u32 tmp___50 ; uint32_t *tmp___51 ; uint32_t *tmp___52 ; unsigned int tmp___53 ; __u32 tmp___54 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; last_chain = (uint32_t *)0U; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; ext_mem_cnt = 0U; risc_address = ext_mem_cnt; flags = 0UL; if (hardware_locked == 0) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 53256, "No buffer available for dump.\n"); goto qla25xx_fw_dump_failed; } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 53257, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto qla25xx_fw_dump_failed; } else { } fw = & (ha->fw_dump)->isp.isp25; qla2xxx_prep_dump(ha, ha->fw_dump); (ha->fw_dump)->version = 33554432U; tmp___1 = readl((void const volatile *)(& reg->host_status)); tmp___2 = __fswab32(tmp___1); fw->host_status = tmp___2; rval = qla24xx_pause_risc(reg); if (rval != 0) { goto qla25xx_fw_dump_failed_0; } else { } iter_reg = (uint32_t *)(& fw->host_risc_reg); iter_reg = qla24xx_read_window(reg, 28672U, 16U, iter_reg); qla24xx_read_window(reg, 28688U, 16U, iter_reg); writel(31744U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(1U, (void volatile *)(& reg->iobase_window)); dmp_reg = & reg->iobase_c4; tmp___3 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___4 = readl((void const volatile *)tmp___3); tmp___5 = __fswab32(tmp___4); fw->pcie_regs[0] = tmp___5; tmp___6 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___7 = readl((void const volatile *)tmp___6); tmp___8 = __fswab32(tmp___7); fw->pcie_regs[1] = tmp___8; tmp___9 = readl((void const volatile *)dmp_reg); tmp___10 = __fswab32(tmp___9); fw->pcie_regs[2] = tmp___10; tmp___11 = readl((void const volatile *)(& reg->iobase_window)); tmp___12 = __fswab32(tmp___11); fw->pcie_regs[3] = tmp___12; writel(0U, (void volatile *)(& reg->iobase_window)); readl((void const volatile *)(& reg->iobase_window)); dmp_reg = & reg->flash_addr; cnt = 0U; goto ldv_43806; ldv_43805: tmp___13 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___14 = readl((void const volatile *)tmp___13); tmp___15 = __fswab32(tmp___14); fw->host_reg[cnt] = tmp___15; cnt = cnt + 1U; ldv_43806: ; if (cnt <= 31U) { goto ldv_43805; } else { } writel(0U, (void volatile *)(& reg->ictrl)); readl((void const volatile *)(& reg->ictrl)); writel(3952U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(2952790016U, (void volatile *)(& reg->iobase_select)); tmp___16 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___17 = __fswab32(tmp___16); fw->shadow_reg[0] = tmp___17; writel(2953838592U, (void volatile *)(& reg->iobase_select)); tmp___18 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___19 = __fswab32(tmp___18); fw->shadow_reg[1] = tmp___19; writel(2954887168U, (void volatile *)(& reg->iobase_select)); tmp___20 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___21 = __fswab32(tmp___20); fw->shadow_reg[2] = tmp___21; writel(2955935744U, (void volatile *)(& reg->iobase_select)); tmp___22 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___23 = __fswab32(tmp___22); fw->shadow_reg[3] = tmp___23; writel(2956984320U, (void volatile *)(& reg->iobase_select)); tmp___24 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___25 = __fswab32(tmp___24); fw->shadow_reg[4] = tmp___25; writel(2958032896U, (void volatile *)(& reg->iobase_select)); tmp___26 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___27 = __fswab32(tmp___26); fw->shadow_reg[5] = tmp___27; writel(2959081472U, (void volatile *)(& reg->iobase_select)); tmp___28 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___29 = __fswab32(tmp___28); fw->shadow_reg[6] = tmp___29; writel(2960130048U, (void volatile *)(& reg->iobase_select)); tmp___30 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___31 = __fswab32(tmp___30); fw->shadow_reg[7] = tmp___31; writel(2961178624U, (void volatile *)(& reg->iobase_select)); tmp___32 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___33 = __fswab32(tmp___32); fw->shadow_reg[8] = tmp___33; writel(2962227200U, (void volatile *)(& reg->iobase_select)); tmp___34 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___35 = __fswab32(tmp___34); fw->shadow_reg[9] = tmp___35; writel(2963275776U, (void volatile *)(& reg->iobase_select)); tmp___36 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___37 = __fswab32(tmp___36); fw->shadow_reg[10] = tmp___37; writel(16U, (void volatile *)(& reg->iobase_addr)); tmp___38 = readl((void const volatile *)(& reg->iobase_window)); tmp___39 = __fswab32(tmp___38); fw->risc_io_reg = tmp___39; mbx_reg = & reg->mailbox0; cnt = 0U; goto ldv_43809; ldv_43808: tmp___40 = mbx_reg; mbx_reg = mbx_reg + 1; tmp___41 = readw((void const volatile *)tmp___40); tmp___42 = __fswab16((int )tmp___41); fw->mailbox_reg[cnt] = tmp___42; cnt = cnt + 1U; ldv_43809: ; if (cnt <= 31U) { goto ldv_43808; } else { } iter_reg = (uint32_t *)(& fw->xseq_gp_reg); iter_reg = qla24xx_read_window(reg, 48896U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48928U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48960U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48992U, 16U, iter_reg); qla24xx_read_window(reg, 49008U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xseq_0_reg); iter_reg = qla24xx_read_window(reg, 49088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 49104U, 16U, iter_reg); qla24xx_read_window(reg, 49120U, 16U, iter_reg); qla24xx_read_window(reg, 49136U, 16U, (uint32_t *)(& fw->xseq_1_reg)); iter_reg = (uint32_t *)(& fw->rseq_gp_reg); iter_reg = qla24xx_read_window(reg, 65280U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65296U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65312U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65328U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65344U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65360U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65376U, 16U, iter_reg); qla24xx_read_window(reg, 65392U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rseq_0_reg); iter_reg = qla24xx_read_window(reg, 65472U, 16U, iter_reg); qla24xx_read_window(reg, 65488U, 16U, iter_reg); qla24xx_read_window(reg, 65504U, 16U, (uint32_t *)(& fw->rseq_1_reg)); qla24xx_read_window(reg, 65520U, 16U, (uint32_t *)(& fw->rseq_2_reg)); iter_reg = (uint32_t *)(& fw->aseq_gp_reg); iter_reg = qla24xx_read_window(reg, 45056U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45072U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45104U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45120U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45136U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45152U, 16U, iter_reg); qla24xx_read_window(reg, 45168U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->aseq_0_reg); iter_reg = qla24xx_read_window(reg, 45248U, 16U, iter_reg); qla24xx_read_window(reg, 45264U, 16U, iter_reg); qla24xx_read_window(reg, 45280U, 16U, (uint32_t *)(& fw->aseq_1_reg)); qla24xx_read_window(reg, 45296U, 16U, (uint32_t *)(& fw->aseq_2_reg)); qla24xx_read_window(reg, 28928U, 16U, (uint32_t *)(& fw->cmd_dma_reg)); iter_reg = (uint32_t *)(& fw->req0_dma_reg); iter_reg = qla24xx_read_window(reg, 29184U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_43812; ldv_43811: tmp___43 = iter_reg; iter_reg = iter_reg + 1; tmp___44 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___45 = readl((void const volatile *)tmp___44); tmp___46 = __fswab32(tmp___45); *tmp___43 = tmp___46; cnt = cnt + 1U; ldv_43812: ; if (cnt <= 6U) { goto ldv_43811; } else { } iter_reg = (uint32_t *)(& fw->resp0_dma_reg); iter_reg = qla24xx_read_window(reg, 29440U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_43815; ldv_43814: tmp___47 = iter_reg; iter_reg = iter_reg + 1; tmp___48 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___49 = readl((void const volatile *)tmp___48); tmp___50 = __fswab32(tmp___49); *tmp___47 = tmp___50; cnt = cnt + 1U; ldv_43815: ; if (cnt <= 6U) { goto ldv_43814; } else { } iter_reg = (uint32_t *)(& fw->req1_dma_reg); iter_reg = qla24xx_read_window(reg, 29696U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_43818; ldv_43817: tmp___51 = iter_reg; iter_reg = iter_reg + 1; tmp___52 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___53 = readl((void const volatile *)tmp___52); tmp___54 = __fswab32(tmp___53); *tmp___51 = tmp___54; cnt = cnt + 1U; ldv_43818: ; if (cnt <= 6U) { goto ldv_43817; } else { } iter_reg = (uint32_t *)(& fw->xmt0_dma_reg); iter_reg = qla24xx_read_window(reg, 30208U, 16U, iter_reg); qla24xx_read_window(reg, 30224U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt1_dma_reg); iter_reg = qla24xx_read_window(reg, 30240U, 16U, iter_reg); qla24xx_read_window(reg, 30256U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt2_dma_reg); iter_reg = qla24xx_read_window(reg, 30272U, 16U, iter_reg); qla24xx_read_window(reg, 30288U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt3_dma_reg); iter_reg = qla24xx_read_window(reg, 30304U, 16U, iter_reg); qla24xx_read_window(reg, 30320U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt4_dma_reg); iter_reg = qla24xx_read_window(reg, 30336U, 16U, iter_reg); qla24xx_read_window(reg, 30352U, 16U, iter_reg); qla24xx_read_window(reg, 30368U, 16U, (uint32_t *)(& fw->xmt_data_dma_reg)); iter_reg = (uint32_t *)(& fw->rcvt0_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30464U, 16U, iter_reg); qla24xx_read_window(reg, 30480U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rcvt1_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30496U, 16U, iter_reg); qla24xx_read_window(reg, 30512U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->risc_gp_reg); iter_reg = qla24xx_read_window(reg, 3840U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3856U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3872U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3888U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3904U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3920U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3936U, 16U, iter_reg); qla24xx_read_window(reg, 3952U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->lmc_reg); iter_reg = qla24xx_read_window(reg, 12288U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12304U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12320U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12336U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12352U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12368U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12384U, 16U, iter_reg); qla24xx_read_window(reg, 12400U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fpm_hdw_reg); iter_reg = qla24xx_read_window(reg, 16384U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16400U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16416U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16432U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16448U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16464U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16480U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16496U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16512U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16528U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16544U, 16U, iter_reg); qla24xx_read_window(reg, 16560U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fb_hdw_reg); iter_reg = qla24xx_read_window(reg, 24576U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24592U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24608U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24624U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24640U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24832U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24880U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25008U, 16U, iter_reg); qla24xx_read_window(reg, 28416U, 16U, iter_reg); nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + (unsigned long )ha->chain_offset, & last_chain); rval = qla24xx_soft_reset(ha); if (rval != 0) { goto qla25xx_fw_dump_failed_0; } else { } rval = qla24xx_dump_memory(ha, (uint32_t *)(& fw->code_ram), 32768U, & nxt); if (rval != 0) { goto qla25xx_fw_dump_failed_0; } else { } nxt = qla2xxx_copy_queues(ha, nxt); qla24xx_copy_eft(ha, nxt); nxt_chain = qla25xx_copy_fce(ha, nxt_chain, & last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, & last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, & last_chain); if ((unsigned long )last_chain != (unsigned long )((uint32_t *)0U)) { (ha->fw_dump)->version = (ha->fw_dump)->version | 128U; *last_chain = *last_chain | 128U; } else { } ha->fw_dump_len = (uint32_t )((long )nxt_chain) - (uint32_t )((long )ha->fw_dump); qla25xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla25xx_fw_dump_failed: ; if (hardware_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } return; } } void qla81xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { int rval ; uint32_t cnt ; uint32_t risc_address ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; uint32_t *dmp_reg ; uint32_t *iter_reg ; uint16_t *mbx_reg ; unsigned long flags ; struct qla81xx_fw_dump *fw ; uint32_t ext_mem_cnt ; void *nxt ; void *nxt_chain ; uint32_t *last_chain ; struct scsi_qla_host *base_vha ; void *tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; __u32 tmp___2 ; uint32_t *tmp___3 ; unsigned int tmp___4 ; __u32 tmp___5 ; uint32_t *tmp___6 ; unsigned int tmp___7 ; __u32 tmp___8 ; unsigned int tmp___9 ; __u32 tmp___10 ; unsigned int tmp___11 ; __u32 tmp___12 ; uint32_t *tmp___13 ; unsigned int tmp___14 ; __u32 tmp___15 ; unsigned int tmp___16 ; __u32 tmp___17 ; unsigned int tmp___18 ; __u32 tmp___19 ; unsigned int tmp___20 ; __u32 tmp___21 ; unsigned int tmp___22 ; __u32 tmp___23 ; unsigned int tmp___24 ; __u32 tmp___25 ; unsigned int tmp___26 ; __u32 tmp___27 ; unsigned int tmp___28 ; __u32 tmp___29 ; unsigned int tmp___30 ; __u32 tmp___31 ; unsigned int tmp___32 ; __u32 tmp___33 ; unsigned int tmp___34 ; __u32 tmp___35 ; unsigned int tmp___36 ; __u32 tmp___37 ; unsigned int tmp___38 ; __u32 tmp___39 ; uint16_t *tmp___40 ; unsigned short tmp___41 ; __u16 tmp___42 ; uint32_t *tmp___43 ; uint32_t *tmp___44 ; unsigned int tmp___45 ; __u32 tmp___46 ; uint32_t *tmp___47 ; uint32_t *tmp___48 ; unsigned int tmp___49 ; __u32 tmp___50 ; uint32_t *tmp___51 ; uint32_t *tmp___52 ; unsigned int tmp___53 ; __u32 tmp___54 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; last_chain = (uint32_t *)0U; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; ext_mem_cnt = 0U; risc_address = ext_mem_cnt; flags = 0UL; if (hardware_locked == 0) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 53258, "No buffer available for dump.\n"); goto qla81xx_fw_dump_failed; } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 53259, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto qla81xx_fw_dump_failed; } else { } fw = & (ha->fw_dump)->isp.isp81; qla2xxx_prep_dump(ha, ha->fw_dump); tmp___1 = readl((void const volatile *)(& reg->host_status)); tmp___2 = __fswab32(tmp___1); fw->host_status = tmp___2; rval = qla24xx_pause_risc(reg); if (rval != 0) { goto qla81xx_fw_dump_failed_0; } else { } iter_reg = (uint32_t *)(& fw->host_risc_reg); iter_reg = qla24xx_read_window(reg, 28672U, 16U, iter_reg); qla24xx_read_window(reg, 28688U, 16U, iter_reg); writel(31744U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(1U, (void volatile *)(& reg->iobase_window)); dmp_reg = & reg->iobase_c4; tmp___3 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___4 = readl((void const volatile *)tmp___3); tmp___5 = __fswab32(tmp___4); fw->pcie_regs[0] = tmp___5; tmp___6 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___7 = readl((void const volatile *)tmp___6); tmp___8 = __fswab32(tmp___7); fw->pcie_regs[1] = tmp___8; tmp___9 = readl((void const volatile *)dmp_reg); tmp___10 = __fswab32(tmp___9); fw->pcie_regs[2] = tmp___10; tmp___11 = readl((void const volatile *)(& reg->iobase_window)); tmp___12 = __fswab32(tmp___11); fw->pcie_regs[3] = tmp___12; writel(0U, (void volatile *)(& reg->iobase_window)); readl((void const volatile *)(& reg->iobase_window)); dmp_reg = & reg->flash_addr; cnt = 0U; goto ldv_43845; ldv_43844: tmp___13 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___14 = readl((void const volatile *)tmp___13); tmp___15 = __fswab32(tmp___14); fw->host_reg[cnt] = tmp___15; cnt = cnt + 1U; ldv_43845: ; if (cnt <= 31U) { goto ldv_43844; } else { } writel(0U, (void volatile *)(& reg->ictrl)); readl((void const volatile *)(& reg->ictrl)); writel(3952U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(2952790016U, (void volatile *)(& reg->iobase_select)); tmp___16 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___17 = __fswab32(tmp___16); fw->shadow_reg[0] = tmp___17; writel(2953838592U, (void volatile *)(& reg->iobase_select)); tmp___18 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___19 = __fswab32(tmp___18); fw->shadow_reg[1] = tmp___19; writel(2954887168U, (void volatile *)(& reg->iobase_select)); tmp___20 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___21 = __fswab32(tmp___20); fw->shadow_reg[2] = tmp___21; writel(2955935744U, (void volatile *)(& reg->iobase_select)); tmp___22 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___23 = __fswab32(tmp___22); fw->shadow_reg[3] = tmp___23; writel(2956984320U, (void volatile *)(& reg->iobase_select)); tmp___24 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___25 = __fswab32(tmp___24); fw->shadow_reg[4] = tmp___25; writel(2958032896U, (void volatile *)(& reg->iobase_select)); tmp___26 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___27 = __fswab32(tmp___26); fw->shadow_reg[5] = tmp___27; writel(2959081472U, (void volatile *)(& reg->iobase_select)); tmp___28 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___29 = __fswab32(tmp___28); fw->shadow_reg[6] = tmp___29; writel(2960130048U, (void volatile *)(& reg->iobase_select)); tmp___30 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___31 = __fswab32(tmp___30); fw->shadow_reg[7] = tmp___31; writel(2961178624U, (void volatile *)(& reg->iobase_select)); tmp___32 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___33 = __fswab32(tmp___32); fw->shadow_reg[8] = tmp___33; writel(2962227200U, (void volatile *)(& reg->iobase_select)); tmp___34 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___35 = __fswab32(tmp___34); fw->shadow_reg[9] = tmp___35; writel(2963275776U, (void volatile *)(& reg->iobase_select)); tmp___36 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___37 = __fswab32(tmp___36); fw->shadow_reg[10] = tmp___37; writel(16U, (void volatile *)(& reg->iobase_addr)); tmp___38 = readl((void const volatile *)(& reg->iobase_window)); tmp___39 = __fswab32(tmp___38); fw->risc_io_reg = tmp___39; mbx_reg = & reg->mailbox0; cnt = 0U; goto ldv_43848; ldv_43847: tmp___40 = mbx_reg; mbx_reg = mbx_reg + 1; tmp___41 = readw((void const volatile *)tmp___40); tmp___42 = __fswab16((int )tmp___41); fw->mailbox_reg[cnt] = tmp___42; cnt = cnt + 1U; ldv_43848: ; if (cnt <= 31U) { goto ldv_43847; } else { } iter_reg = (uint32_t *)(& fw->xseq_gp_reg); iter_reg = qla24xx_read_window(reg, 48896U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48928U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48960U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48992U, 16U, iter_reg); qla24xx_read_window(reg, 49008U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xseq_0_reg); iter_reg = qla24xx_read_window(reg, 49088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 49104U, 16U, iter_reg); qla24xx_read_window(reg, 49120U, 16U, iter_reg); qla24xx_read_window(reg, 49136U, 16U, (uint32_t *)(& fw->xseq_1_reg)); iter_reg = (uint32_t *)(& fw->rseq_gp_reg); iter_reg = qla24xx_read_window(reg, 65280U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65296U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65312U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65328U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65344U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65360U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65376U, 16U, iter_reg); qla24xx_read_window(reg, 65392U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rseq_0_reg); iter_reg = qla24xx_read_window(reg, 65472U, 16U, iter_reg); qla24xx_read_window(reg, 65488U, 16U, iter_reg); qla24xx_read_window(reg, 65504U, 16U, (uint32_t *)(& fw->rseq_1_reg)); qla24xx_read_window(reg, 65520U, 16U, (uint32_t *)(& fw->rseq_2_reg)); iter_reg = (uint32_t *)(& fw->aseq_gp_reg); iter_reg = qla24xx_read_window(reg, 45056U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45072U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45104U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45120U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45136U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45152U, 16U, iter_reg); qla24xx_read_window(reg, 45168U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->aseq_0_reg); iter_reg = qla24xx_read_window(reg, 45248U, 16U, iter_reg); qla24xx_read_window(reg, 45264U, 16U, iter_reg); qla24xx_read_window(reg, 45280U, 16U, (uint32_t *)(& fw->aseq_1_reg)); qla24xx_read_window(reg, 45296U, 16U, (uint32_t *)(& fw->aseq_2_reg)); qla24xx_read_window(reg, 28928U, 16U, (uint32_t *)(& fw->cmd_dma_reg)); iter_reg = (uint32_t *)(& fw->req0_dma_reg); iter_reg = qla24xx_read_window(reg, 29184U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_43851; ldv_43850: tmp___43 = iter_reg; iter_reg = iter_reg + 1; tmp___44 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___45 = readl((void const volatile *)tmp___44); tmp___46 = __fswab32(tmp___45); *tmp___43 = tmp___46; cnt = cnt + 1U; ldv_43851: ; if (cnt <= 6U) { goto ldv_43850; } else { } iter_reg = (uint32_t *)(& fw->resp0_dma_reg); iter_reg = qla24xx_read_window(reg, 29440U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_43854; ldv_43853: tmp___47 = iter_reg; iter_reg = iter_reg + 1; tmp___48 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___49 = readl((void const volatile *)tmp___48); tmp___50 = __fswab32(tmp___49); *tmp___47 = tmp___50; cnt = cnt + 1U; ldv_43854: ; if (cnt <= 6U) { goto ldv_43853; } else { } iter_reg = (uint32_t *)(& fw->req1_dma_reg); iter_reg = qla24xx_read_window(reg, 29696U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_43857; ldv_43856: tmp___51 = iter_reg; iter_reg = iter_reg + 1; tmp___52 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___53 = readl((void const volatile *)tmp___52); tmp___54 = __fswab32(tmp___53); *tmp___51 = tmp___54; cnt = cnt + 1U; ldv_43857: ; if (cnt <= 6U) { goto ldv_43856; } else { } iter_reg = (uint32_t *)(& fw->xmt0_dma_reg); iter_reg = qla24xx_read_window(reg, 30208U, 16U, iter_reg); qla24xx_read_window(reg, 30224U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt1_dma_reg); iter_reg = qla24xx_read_window(reg, 30240U, 16U, iter_reg); qla24xx_read_window(reg, 30256U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt2_dma_reg); iter_reg = qla24xx_read_window(reg, 30272U, 16U, iter_reg); qla24xx_read_window(reg, 30288U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt3_dma_reg); iter_reg = qla24xx_read_window(reg, 30304U, 16U, iter_reg); qla24xx_read_window(reg, 30320U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt4_dma_reg); iter_reg = qla24xx_read_window(reg, 30336U, 16U, iter_reg); qla24xx_read_window(reg, 30352U, 16U, iter_reg); qla24xx_read_window(reg, 30368U, 16U, (uint32_t *)(& fw->xmt_data_dma_reg)); iter_reg = (uint32_t *)(& fw->rcvt0_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30464U, 16U, iter_reg); qla24xx_read_window(reg, 30480U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rcvt1_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30496U, 16U, iter_reg); qla24xx_read_window(reg, 30512U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->risc_gp_reg); iter_reg = qla24xx_read_window(reg, 3840U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3856U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3872U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3888U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3904U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3920U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3936U, 16U, iter_reg); qla24xx_read_window(reg, 3952U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->lmc_reg); iter_reg = qla24xx_read_window(reg, 12288U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12304U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12320U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12336U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12352U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12368U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12384U, 16U, iter_reg); qla24xx_read_window(reg, 12400U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fpm_hdw_reg); iter_reg = qla24xx_read_window(reg, 16384U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16400U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16416U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16432U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16448U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16464U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16480U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16496U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16512U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16528U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16544U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16560U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16576U, 16U, iter_reg); qla24xx_read_window(reg, 16592U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fb_hdw_reg); iter_reg = qla24xx_read_window(reg, 24576U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24592U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24608U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24624U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24640U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24832U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24880U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25008U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25024U, 16U, iter_reg); qla24xx_read_window(reg, 28416U, 16U, iter_reg); nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + (unsigned long )ha->chain_offset, & last_chain); rval = qla24xx_soft_reset(ha); if (rval != 0) { goto qla81xx_fw_dump_failed_0; } else { } rval = qla24xx_dump_memory(ha, (uint32_t *)(& fw->code_ram), 32768U, & nxt); if (rval != 0) { goto qla81xx_fw_dump_failed_0; } else { } nxt = qla2xxx_copy_queues(ha, nxt); qla24xx_copy_eft(ha, nxt); nxt_chain = qla25xx_copy_fce(ha, nxt_chain, & last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, & last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, & last_chain); if ((unsigned long )last_chain != (unsigned long )((uint32_t *)0U)) { (ha->fw_dump)->version = (ha->fw_dump)->version | 128U; *last_chain = *last_chain | 128U; } else { } ha->fw_dump_len = (uint32_t )((long )nxt_chain) - (uint32_t )((long )ha->fw_dump); qla81xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla81xx_fw_dump_failed: ; if (hardware_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } return; } } void qla83xx_fw_dump(scsi_qla_host_t *vha , int hardware_locked ) { int rval ; uint32_t cnt ; uint32_t reg_data ; uint32_t risc_address ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; uint32_t *dmp_reg ; uint32_t *iter_reg ; uint16_t *mbx_reg ; unsigned long flags ; struct qla83xx_fw_dump *fw ; uint32_t ext_mem_cnt ; void *nxt ; void *nxt_chain ; uint32_t *last_chain ; struct scsi_qla_host *base_vha ; void *tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; __u32 tmp___2 ; uint32_t *tmp___3 ; unsigned int tmp___4 ; __u32 tmp___5 ; uint32_t *tmp___6 ; unsigned int tmp___7 ; __u32 tmp___8 ; unsigned int tmp___9 ; __u32 tmp___10 ; unsigned int tmp___11 ; __u32 tmp___12 ; uint32_t *tmp___13 ; unsigned int tmp___14 ; __u32 tmp___15 ; unsigned int tmp___16 ; __u32 tmp___17 ; unsigned int tmp___18 ; __u32 tmp___19 ; unsigned int tmp___20 ; __u32 tmp___21 ; unsigned int tmp___22 ; __u32 tmp___23 ; unsigned int tmp___24 ; __u32 tmp___25 ; unsigned int tmp___26 ; __u32 tmp___27 ; unsigned int tmp___28 ; __u32 tmp___29 ; unsigned int tmp___30 ; __u32 tmp___31 ; unsigned int tmp___32 ; __u32 tmp___33 ; unsigned int tmp___34 ; __u32 tmp___35 ; unsigned int tmp___36 ; __u32 tmp___37 ; unsigned int tmp___38 ; __u32 tmp___39 ; uint16_t *tmp___40 ; unsigned short tmp___41 ; __u16 tmp___42 ; uint32_t *tmp___43 ; uint32_t *tmp___44 ; unsigned int tmp___45 ; __u32 tmp___46 ; uint32_t *tmp___47 ; uint32_t *tmp___48 ; unsigned int tmp___49 ; __u32 tmp___50 ; uint32_t *tmp___51 ; uint32_t *tmp___52 ; unsigned int tmp___53 ; __u32 tmp___54 ; unsigned short tmp___55 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; last_chain = (uint32_t *)0U; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; ext_mem_cnt = 0U; risc_address = ext_mem_cnt; flags = 0UL; if (hardware_locked == 0) { tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); } else { } if ((unsigned long )ha->fw_dump == (unsigned long )((struct qla2xxx_fw_dump *)0)) { ql_log(1U, vha, 53260, "No buffer available for dump!!!\n"); goto qla83xx_fw_dump_failed; } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 53261, "Firmware has been previously dumped (%p) -- ignoring request...\n", ha->fw_dump); goto qla83xx_fw_dump_failed; } else { } fw = & (ha->fw_dump)->isp.isp83; qla2xxx_prep_dump(ha, ha->fw_dump); tmp___1 = readl((void const volatile *)(& reg->host_status)); tmp___2 = __fswab32(tmp___1); fw->host_status = tmp___2; rval = qla24xx_pause_risc(reg); if (rval != 0) { goto qla83xx_fw_dump_failed_0; } else { } writel(24576U, (void volatile *)(& reg->iobase_addr)); dmp_reg = & reg->iobase_window; reg_data = readl((void const volatile *)dmp_reg); writel(0U, (void volatile *)dmp_reg); dmp_reg = (uint32_t *)(& reg->unused_4_1); reg_data = readl((void const volatile *)dmp_reg); writel(0U, (void volatile *)dmp_reg); writel(24592U, (void volatile *)(& reg->iobase_addr)); dmp_reg = (uint32_t *)(& reg->unused_4_1) + 2UL; reg_data = readl((void const volatile *)dmp_reg); writel(0U, (void volatile *)dmp_reg); writel(3952U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(1610612736U, (void volatile *)(& reg->iobase_select)); iter_reg = (uint32_t *)(& fw->host_risc_reg); iter_reg = qla24xx_read_window(reg, 28672U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28688U, 16U, iter_reg); qla24xx_read_window(reg, 28736U, 16U, iter_reg); writel(31744U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(1U, (void volatile *)(& reg->iobase_window)); dmp_reg = & reg->iobase_c4; tmp___3 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___4 = readl((void const volatile *)tmp___3); tmp___5 = __fswab32(tmp___4); fw->pcie_regs[0] = tmp___5; tmp___6 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___7 = readl((void const volatile *)tmp___6); tmp___8 = __fswab32(tmp___7); fw->pcie_regs[1] = tmp___8; tmp___9 = readl((void const volatile *)dmp_reg); tmp___10 = __fswab32(tmp___9); fw->pcie_regs[2] = tmp___10; tmp___11 = readl((void const volatile *)(& reg->iobase_window)); tmp___12 = __fswab32(tmp___11); fw->pcie_regs[3] = tmp___12; writel(0U, (void volatile *)(& reg->iobase_window)); readl((void const volatile *)(& reg->iobase_window)); dmp_reg = & reg->flash_addr; cnt = 0U; goto ldv_43885; ldv_43884: tmp___13 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___14 = readl((void const volatile *)tmp___13); tmp___15 = __fswab32(tmp___14); fw->host_reg[cnt] = tmp___15; cnt = cnt + 1U; ldv_43885: ; if (cnt <= 31U) { goto ldv_43884; } else { } writel(0U, (void volatile *)(& reg->ictrl)); readl((void const volatile *)(& reg->ictrl)); writel(3952U, (void volatile *)(& reg->iobase_addr)); readl((void const volatile *)(& reg->iobase_addr)); writel(2952790016U, (void volatile *)(& reg->iobase_select)); tmp___16 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___17 = __fswab32(tmp___16); fw->shadow_reg[0] = tmp___17; writel(2953838592U, (void volatile *)(& reg->iobase_select)); tmp___18 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___19 = __fswab32(tmp___18); fw->shadow_reg[1] = tmp___19; writel(2954887168U, (void volatile *)(& reg->iobase_select)); tmp___20 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___21 = __fswab32(tmp___20); fw->shadow_reg[2] = tmp___21; writel(2955935744U, (void volatile *)(& reg->iobase_select)); tmp___22 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___23 = __fswab32(tmp___22); fw->shadow_reg[3] = tmp___23; writel(2956984320U, (void volatile *)(& reg->iobase_select)); tmp___24 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___25 = __fswab32(tmp___24); fw->shadow_reg[4] = tmp___25; writel(2958032896U, (void volatile *)(& reg->iobase_select)); tmp___26 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___27 = __fswab32(tmp___26); fw->shadow_reg[5] = tmp___27; writel(2959081472U, (void volatile *)(& reg->iobase_select)); tmp___28 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___29 = __fswab32(tmp___28); fw->shadow_reg[6] = tmp___29; writel(2960130048U, (void volatile *)(& reg->iobase_select)); tmp___30 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___31 = __fswab32(tmp___30); fw->shadow_reg[7] = tmp___31; writel(2961178624U, (void volatile *)(& reg->iobase_select)); tmp___32 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___33 = __fswab32(tmp___32); fw->shadow_reg[8] = tmp___33; writel(2962227200U, (void volatile *)(& reg->iobase_select)); tmp___34 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___35 = __fswab32(tmp___34); fw->shadow_reg[9] = tmp___35; writel(2963275776U, (void volatile *)(& reg->iobase_select)); tmp___36 = readl((void const volatile *)(& reg->iobase_sdata)); tmp___37 = __fswab32(tmp___36); fw->shadow_reg[10] = tmp___37; writel(16U, (void volatile *)(& reg->iobase_addr)); tmp___38 = readl((void const volatile *)(& reg->iobase_window)); tmp___39 = __fswab32(tmp___38); fw->risc_io_reg = tmp___39; mbx_reg = & reg->mailbox0; cnt = 0U; goto ldv_43888; ldv_43887: tmp___40 = mbx_reg; mbx_reg = mbx_reg + 1; tmp___41 = readw((void const volatile *)tmp___40); tmp___42 = __fswab16((int )tmp___41); fw->mailbox_reg[cnt] = tmp___42; cnt = cnt + 1U; ldv_43888: ; if (cnt <= 31U) { goto ldv_43887; } else { } iter_reg = (uint32_t *)(& fw->xseq_gp_reg); iter_reg = qla24xx_read_window(reg, 48640U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48656U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48672U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48688U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48704U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48720U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48736U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48752U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48896U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48928U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48960U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 48992U, 16U, iter_reg); qla24xx_read_window(reg, 49008U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xseq_0_reg); iter_reg = qla24xx_read_window(reg, 49088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 49104U, 16U, iter_reg); qla24xx_read_window(reg, 49120U, 16U, iter_reg); qla24xx_read_window(reg, 49136U, 16U, (uint32_t *)(& fw->xseq_1_reg)); qla24xx_read_window(reg, 48880U, 16U, (uint32_t *)(& fw->xseq_2_reg)); iter_reg = (uint32_t *)(& fw->rseq_gp_reg); iter_reg = qla24xx_read_window(reg, 65024U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65040U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65056U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65072U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65104U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65120U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65136U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65280U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65296U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65312U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65328U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65344U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65360U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 65376U, 16U, iter_reg); qla24xx_read_window(reg, 65392U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rseq_0_reg); iter_reg = qla24xx_read_window(reg, 65472U, 16U, iter_reg); qla24xx_read_window(reg, 65488U, 16U, iter_reg); qla24xx_read_window(reg, 65504U, 16U, (uint32_t *)(& fw->rseq_1_reg)); qla24xx_read_window(reg, 65520U, 16U, (uint32_t *)(& fw->rseq_2_reg)); qla24xx_read_window(reg, 65264U, 16U, (uint32_t *)(& fw->rseq_3_reg)); iter_reg = (uint32_t *)(& fw->aseq_gp_reg); iter_reg = qla24xx_read_window(reg, 45056U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45072U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45088U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45104U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45120U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45136U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45152U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45168U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45312U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45328U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45344U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45360U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45376U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45392U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 45408U, 16U, iter_reg); qla24xx_read_window(reg, 45424U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->aseq_0_reg); iter_reg = qla24xx_read_window(reg, 45248U, 16U, iter_reg); qla24xx_read_window(reg, 45264U, 16U, iter_reg); qla24xx_read_window(reg, 45280U, 16U, (uint32_t *)(& fw->aseq_1_reg)); qla24xx_read_window(reg, 45296U, 16U, (uint32_t *)(& fw->aseq_2_reg)); qla24xx_read_window(reg, 45552U, 16U, (uint32_t *)(& fw->aseq_3_reg)); iter_reg = (uint32_t *)(& fw->cmd_dma_reg); iter_reg = qla24xx_read_window(reg, 28928U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28960U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28976U, 16U, iter_reg); qla24xx_read_window(reg, 29168U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->req0_dma_reg); iter_reg = qla24xx_read_window(reg, 29184U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_43891; ldv_43890: tmp___43 = iter_reg; iter_reg = iter_reg + 1; tmp___44 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___45 = readl((void const volatile *)tmp___44); tmp___46 = __fswab32(tmp___45); *tmp___43 = tmp___46; cnt = cnt + 1U; ldv_43891: ; if (cnt <= 6U) { goto ldv_43890; } else { } iter_reg = (uint32_t *)(& fw->resp0_dma_reg); iter_reg = qla24xx_read_window(reg, 29440U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_43894; ldv_43893: tmp___47 = iter_reg; iter_reg = iter_reg + 1; tmp___48 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___49 = readl((void const volatile *)tmp___48); tmp___50 = __fswab32(tmp___49); *tmp___47 = tmp___50; cnt = cnt + 1U; ldv_43894: ; if (cnt <= 6U) { goto ldv_43893; } else { } iter_reg = (uint32_t *)(& fw->req1_dma_reg); iter_reg = qla24xx_read_window(reg, 29696U, 8U, iter_reg); dmp_reg = & reg->iobase_q; cnt = 0U; goto ldv_43897; ldv_43896: tmp___51 = iter_reg; iter_reg = iter_reg + 1; tmp___52 = dmp_reg; dmp_reg = dmp_reg + 1; tmp___53 = readl((void const volatile *)tmp___52); tmp___54 = __fswab32(tmp___53); *tmp___51 = tmp___54; cnt = cnt + 1U; ldv_43897: ; if (cnt <= 6U) { goto ldv_43896; } else { } iter_reg = (uint32_t *)(& fw->xmt0_dma_reg); iter_reg = qla24xx_read_window(reg, 30208U, 16U, iter_reg); qla24xx_read_window(reg, 30224U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt1_dma_reg); iter_reg = qla24xx_read_window(reg, 30240U, 16U, iter_reg); qla24xx_read_window(reg, 30256U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt2_dma_reg); iter_reg = qla24xx_read_window(reg, 30272U, 16U, iter_reg); qla24xx_read_window(reg, 30288U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt3_dma_reg); iter_reg = qla24xx_read_window(reg, 30304U, 16U, iter_reg); qla24xx_read_window(reg, 30320U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->xmt4_dma_reg); iter_reg = qla24xx_read_window(reg, 30336U, 16U, iter_reg); qla24xx_read_window(reg, 30352U, 16U, iter_reg); qla24xx_read_window(reg, 30368U, 16U, (uint32_t *)(& fw->xmt_data_dma_reg)); iter_reg = (uint32_t *)(& fw->rcvt0_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30464U, 16U, iter_reg); qla24xx_read_window(reg, 30480U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rcvt1_data_dma_reg); iter_reg = qla24xx_read_window(reg, 30496U, 16U, iter_reg); qla24xx_read_window(reg, 30512U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->risc_gp_reg); iter_reg = qla24xx_read_window(reg, 3840U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3856U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3872U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3888U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3904U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3920U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 3936U, 16U, iter_reg); qla24xx_read_window(reg, 3952U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->lmc_reg); iter_reg = qla24xx_read_window(reg, 12288U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12304U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12320U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12336U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12352U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12368U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 12384U, 16U, iter_reg); qla24xx_read_window(reg, 12400U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->fpm_hdw_reg); iter_reg = qla24xx_read_window(reg, 16384U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16400U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16416U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16432U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16448U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16464U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16480U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16496U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16512U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16528U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16544U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16560U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16576U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16592U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 16608U, 16U, iter_reg); qla24xx_read_window(reg, 16624U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rq0_array_reg); iter_reg = qla24xx_read_window(reg, 23552U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23568U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23584U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23600U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23616U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23632U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23648U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23664U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23680U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23696U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23712U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23728U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23744U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23760U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23776U, 16U, iter_reg); qla24xx_read_window(reg, 23792U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rq1_array_reg); iter_reg = qla24xx_read_window(reg, 23808U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23824U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23840U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23856U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23872U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23888U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23904U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23920U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23936U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23952U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23968U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 23984U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24000U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24016U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24032U, 16U, iter_reg); qla24xx_read_window(reg, 24048U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rp0_array_reg); iter_reg = qla24xx_read_window(reg, 24064U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24080U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24096U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24112U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24128U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24144U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24160U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24176U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24192U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24208U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24224U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24240U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24256U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24272U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24288U, 16U, iter_reg); qla24xx_read_window(reg, 24304U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->rp1_array_reg); iter_reg = qla24xx_read_window(reg, 24320U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24336U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24352U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24368U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24384U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24400U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24416U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24432U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24448U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24464U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24480U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24496U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24512U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24528U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24544U, 16U, iter_reg); qla24xx_read_window(reg, 24560U, 16U, iter_reg); iter_reg = (uint32_t *)(& fw->at0_array_reg); iter_reg = qla24xx_read_window(reg, 28800U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28816U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28832U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28848U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28864U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28880U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 28896U, 16U, iter_reg); qla24xx_read_window(reg, 28912U, 16U, iter_reg); qla24xx_read_window(reg, 30720U, 16U, (uint32_t *)(& fw->queue_control_reg)); iter_reg = (uint32_t *)(& fw->fb_hdw_reg); iter_reg = qla24xx_read_window(reg, 24576U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24592U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24608U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24624U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24640U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24672U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24688U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24832U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24880U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24912U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24944U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 24976U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25008U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25024U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25904U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25920U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25936U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25952U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25968U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 25984U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 26000U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 26016U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 26032U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 26048U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 26064U, 16U, iter_reg); iter_reg = qla24xx_read_window(reg, 26080U, 16U, iter_reg); qla24xx_read_window(reg, 28416U, 16U, iter_reg); nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + (unsigned long )ha->chain_offset, & last_chain); rval = qla24xx_soft_reset(ha); if (rval != 0) { ql_log(1U, vha, 53262, "SOFT RESET FAILED, forcing continuation of dump!!!\n"); rval = 0; ql_log(1U, vha, 53263, "try a bigger hammer!!!\n"); writel(268435456U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); writel(1073741824U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); writel(536870912U, (void volatile *)(& reg->hccr)); readl((void const volatile *)(& reg->hccr)); cnt = 30000U; goto ldv_43900; ldv_43899: __const_udelay(21475UL); cnt = cnt - 1U; ldv_43900: ; if (cnt != 0U) { tmp___55 = readw((void const volatile *)(& reg->mailbox0)); if ((unsigned int )tmp___55 != 0U) { goto ldv_43899; } else { goto ldv_43901; } } else { } ldv_43901: ; if (cnt == 0U) { nxt = (void *)(& fw->code_ram); nxt = nxt + 36864UL; nxt = nxt + (unsigned long )(ha->fw_memory_size - 1048575U); goto copy_queue; } else { ql_log(1U, vha, 53264, "bigger hammer success?\n"); } } else { } rval = qla24xx_dump_memory(ha, (uint32_t *)(& fw->code_ram), 36864U, & nxt); if (rval != 0) { goto qla83xx_fw_dump_failed_0; } else { } copy_queue: nxt = qla2xxx_copy_queues(ha, nxt); qla24xx_copy_eft(ha, nxt); nxt_chain = qla25xx_copy_fce(ha, nxt_chain, & last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, & last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, & last_chain); if ((unsigned long )last_chain != (unsigned long )((uint32_t *)0U)) { (ha->fw_dump)->version = (ha->fw_dump)->version | 128U; *last_chain = *last_chain | 128U; } else { } ha->fw_dump_len = (uint32_t )((long )nxt_chain) - (uint32_t )((long )ha->fw_dump); qla83xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla83xx_fw_dump_failed: ; if (hardware_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } return; } } __inline static int ql_mask_match(uint32_t level ) { { if (ql2xextended_error_logging == 1) { ql2xextended_error_logging = 507510784; } else { } return ((level & (uint32_t )ql2xextended_error_logging) == level); } } void ql_dbg(uint32_t level , scsi_qla_host_t *vha , int32_t id , char const *fmt , ...) { va_list va ; struct va_format vaf ; int tmp ; struct pci_dev const *pdev ; char const *tmp___0 ; { tmp = ql_mask_match(level); if (tmp == 0) { return; } else { } ldv__builtin_va_start((__va_list_tag *)(& va)); vaf.fmt = fmt; vaf.va = & va; if ((unsigned long )vha != (unsigned long )((scsi_qla_host_t *)0)) { pdev = (struct pci_dev const *)(vha->hw)->pdev; tmp___0 = dev_name(& pdev->dev); printk("\f%s [%s]-%04x:%ld: %pV", (char *)"qla2xxx", tmp___0, (uint32_t )id + ql_dbg_offset, vha->host_no, & vaf); } else { printk("\f%s [%s]-%04x: : %pV", (char *)"qla2xxx", (char *)"0000:00:00.0", (uint32_t )id + ql_dbg_offset, & vaf); } ldv__builtin_va_end((__va_list_tag *)(& va)); return; } } void ql_dbg_pci(uint32_t level , struct pci_dev *pdev , int32_t id , char const *fmt , ...) { va_list va ; struct va_format vaf ; int tmp ; char const *tmp___0 ; { if ((unsigned long )pdev == (unsigned long )((struct pci_dev *)0)) { return; } else { } tmp = ql_mask_match(level); if (tmp == 0) { return; } else { } ldv__builtin_va_start((__va_list_tag *)(& va)); vaf.fmt = fmt; vaf.va = & va; tmp___0 = dev_name((struct device const *)(& pdev->dev)); printk("\f%s [%s]-%04x: : %pV", (char *)"qla2xxx", tmp___0, (uint32_t )id + ql_dbg_offset, & vaf); ldv__builtin_va_end((__va_list_tag *)(& va)); return; } } void ql_log(uint32_t level , scsi_qla_host_t *vha , int32_t id , char const *fmt , ...) { va_list va ; struct va_format vaf ; char pbuf[128U] ; struct pci_dev const *pdev ; char const *tmp ; { if ((uint32_t )ql_errlev < level) { return; } else { } if ((unsigned long )vha != (unsigned long )((scsi_qla_host_t *)0)) { pdev = (struct pci_dev const *)(vha->hw)->pdev; tmp = dev_name(& pdev->dev); snprintf((char *)(& pbuf), 128UL, "%s [%s]-%04x:%ld: ", (char *)"qla2xxx", tmp, id, vha->host_no); } else { snprintf((char *)(& pbuf), 128UL, "%s [%s]-%04x: : ", (char *)"qla2xxx", (char *)"0000:00:00.0", id); } pbuf[127UL] = 0; ldv__builtin_va_start((__va_list_tag *)(& va)); vaf.fmt = fmt; vaf.va = & va; switch (level) { case 0U: printk("\n%s%pV", (char *)(& pbuf), & vaf); goto ldv_43934; case 1U: printk("\v%s%pV", (char *)(& pbuf), & vaf); goto ldv_43934; case 2U: printk("\f%s%pV", (char *)(& pbuf), & vaf); goto ldv_43934; default: printk("\016%s%pV", (char *)(& pbuf), & vaf); goto ldv_43934; } ldv_43934: ldv__builtin_va_end((__va_list_tag *)(& va)); return; } } void ql_log_pci(uint32_t level , struct pci_dev *pdev , int32_t id , char const *fmt , ...) { va_list va ; struct va_format vaf ; char pbuf[128U] ; char const *tmp ; { if ((unsigned long )pdev == (unsigned long )((struct pci_dev *)0)) { return; } else { } if ((uint32_t )ql_errlev < level) { return; } else { } tmp = dev_name((struct device const *)(& pdev->dev)); snprintf((char *)(& pbuf), 128UL, "%s [%s]-%04x: : ", (char *)"qla2xxx", tmp, id); pbuf[127UL] = 0; ldv__builtin_va_start((__va_list_tag *)(& va)); vaf.fmt = fmt; vaf.va = & va; switch (level) { case 0U: printk("\n%s%pV", (char *)(& pbuf), & vaf); goto ldv_43948; case 1U: printk("\v%s%pV", (char *)(& pbuf), & vaf); goto ldv_43948; case 2U: printk("\f%s%pV", (char *)(& pbuf), & vaf); goto ldv_43948; default: printk("\016%s%pV", (char *)(& pbuf), & vaf); goto ldv_43948; } ldv_43948: ldv__builtin_va_end((__va_list_tag *)(& va)); return; } } void ql_dump_regs(uint32_t level , scsi_qla_host_t *vha , int32_t id ) { int i ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; struct device_reg_24xx *reg24 ; struct device_reg_82xx *reg82 ; uint16_t *mbx_reg ; int tmp ; uint16_t *tmp___0 ; unsigned short tmp___1 ; { ha = vha->hw; reg = & (ha->iobase)->isp; reg24 = & (ha->iobase)->isp24; reg82 = & (ha->iobase)->isp82; tmp = ql_mask_match(level); if (tmp == 0) { return; } else { } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { mbx_reg = (uint16_t *)(& reg82->mailbox_in); } else if ((ha->device_type & 134217728U) != 0U) { mbx_reg = & reg24->mailbox0; } else { mbx_reg = (int )ha->device_type & 1 || (ha->device_type & 2U) != 0U ? & reg->u.isp2100.mailbox0 : & reg->u.isp2300.mailbox0; } ql_dbg(level, vha, id, "Mailbox registers:\n"); i = 0; goto ldv_43964; ldv_43963: tmp___0 = mbx_reg; mbx_reg = mbx_reg + 1; tmp___1 = readw((void const volatile *)tmp___0); ql_dbg(level, vha, id, "mbox[%d] 0x%04x\n", i, (int )tmp___1); i = i + 1; ldv_43964: ; if (i <= 5) { goto ldv_43963; } else { } return; } } void ql_dump_buffer(uint32_t level , scsi_qla_host_t *vha , int32_t id , uint8_t *b , uint32_t size ) { uint32_t cnt ; uint8_t c ; int tmp ; uint8_t *tmp___0 ; { tmp = ql_mask_match(level); if (tmp == 0) { return; } else { } ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh Fh\n"); ql_dbg(level, vha, id, "--------------------------------------------------------------\n"); ql_dbg(level, vha, id, " "); cnt = 0U; goto ldv_43976; ldv_43975: tmp___0 = b; b = b + 1; c = *tmp___0; printk("%02x", (unsigned int )c); cnt = cnt + 1U; if ((cnt & 15U) == 0U) { printk("\n"); } else { printk(" "); } ldv_43976: ; if (cnt < size) { goto ldv_43975; } else { } if ((cnt & 15U) != 0U) { ql_dbg(level, vha, id, "\n"); } else { } return; } } void disable_suitable_timer_8(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_8) { ldv_timer_state_8 = 0; return; } else { } return; } } void activate_pending_timer_8(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_8 == (unsigned long )timer) { if (ldv_timer_state_8 == 2 || pending_flag != 0) { ldv_timer_list_8 = timer; ldv_timer_list_8->data = data; ldv_timer_state_8 = 1; } else { } return; } else { } reg_timer_8(timer); ldv_timer_list_8->data = data; return; } } void choose_timer_8(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_8 = 2; return; } } int reg_timer_8(struct timer_list *timer ) { { ldv_timer_list_8 = timer; ldv_timer_state_8 = 1; return (0); } } int ldv_del_timer_39(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_40(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } extern void __might_sleep(char const * , int , int ) ; extern int strncmp(char const * , char const * , __kernel_size_t ) ; int ldv_del_timer_43(struct timer_list *ldv_func_arg1 ) ; __inline static void outw(unsigned short value , int port ) { { __asm__ volatile ("outw %w0, %w1": : "a" (value), "Nd" (port)); return; } } __inline static unsigned short inw(int port ) { unsigned short value ; { __asm__ volatile ("inw %w1, %w0": "=a" (value): "Nd" (port)); return (value); } } void activate_pending_timer_9(struct timer_list *timer , unsigned long data , int pending_flag ) ; int reg_timer_9(struct timer_list *timer ) ; void choose_timer_9(struct timer_list *timer ) ; void disable_suitable_timer_9(struct timer_list *timer ) ; extern int _cond_resched(void) ; int ldv_scsi_add_host_with_dma_44(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; extern void scsi_unblock_requests(struct Scsi_Host * ) ; extern void scsi_block_requests(struct Scsi_Host * ) ; extern struct fc_vport *fc_vport_create(struct Scsi_Host * , int , struct fc_vport_identifiers * ) ; int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha , struct qla_fcp_prio_cfg *pri_cfg , uint8_t flag ) ; static void qla2x00_lock_nvram_access(struct qla_hw_data *ha ) { uint16_t data ; struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; if (((ha->device_type & 1U) == 0U && (ha->device_type & 2U) == 0U) && (ha->device_type & 4U) == 0U) { data = readw((void const volatile *)(& reg->nvram)); goto ldv_43459; ldv_43458: __const_udelay(429500UL); data = readw((void const volatile *)(& reg->nvram)); ldv_43459: ; if ((int )((short )data) < 0) { goto ldv_43458; } else { } writew(1, (void volatile *)(& reg->u.isp2300.host_semaphore)); readw((void const volatile *)(& reg->u.isp2300.host_semaphore)); __const_udelay(21475UL); data = readw((void const volatile *)(& reg->u.isp2300.host_semaphore)); goto ldv_43462; ldv_43461: __const_udelay(429500UL); writew(1, (void volatile *)(& reg->u.isp2300.host_semaphore)); readw((void const volatile *)(& reg->u.isp2300.host_semaphore)); __const_udelay(21475UL); data = readw((void const volatile *)(& reg->u.isp2300.host_semaphore)); ldv_43462: ; if (((int )data & 1) == 0) { goto ldv_43461; } else { } } else { } return; } } static void qla2x00_unlock_nvram_access(struct qla_hw_data *ha ) { struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; if (((ha->device_type & 1U) == 0U && (ha->device_type & 2U) == 0U) && (ha->device_type & 4U) == 0U) { writew(0, (void volatile *)(& reg->u.isp2300.host_semaphore)); readw((void const volatile *)(& reg->u.isp2300.host_semaphore)); } else { } return; } } static void qla2x00_nv_write(struct qla_hw_data *ha , uint16_t data ) { struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; writew((int )((unsigned int )data | 16386U), (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); writew((int )((unsigned int )data | 16387U), (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); writew((int )((unsigned int )data | 16386U), (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); return; } } static uint16_t qla2x00_nvram_request(struct qla_hw_data *ha , uint32_t nv_cmd ) { uint8_t cnt ; struct device_reg_2xxx *reg ; uint16_t data ; uint16_t reg_data ; { reg = & (ha->iobase)->isp; data = 0U; nv_cmd = nv_cmd << 5; cnt = 0U; goto ldv_43482; ldv_43481: ; if ((int )nv_cmd < 0) { qla2x00_nv_write(ha, 4); } else { qla2x00_nv_write(ha, 0); } nv_cmd = nv_cmd << 1; cnt = (uint8_t )((int )cnt + 1); ldv_43482: ; if ((unsigned int )cnt <= 10U) { goto ldv_43481; } else { } cnt = 0U; goto ldv_43485; ldv_43484: writew(3, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); data = (int )data << 1U; reg_data = readw((void const volatile *)(& reg->nvram)); if (((int )reg_data & 8) != 0) { data = (uint16_t )((unsigned int )data | 1U); } else { } writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); cnt = (uint8_t )((int )cnt + 1); ldv_43485: ; if ((unsigned int )cnt <= 15U) { goto ldv_43484; } else { } writew(0, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); return (data); } } static uint16_t qla2x00_get_nvram_word(struct qla_hw_data *ha , uint32_t addr ) { uint16_t data ; uint32_t nv_cmd ; { nv_cmd = addr << 16; nv_cmd = nv_cmd | 100663296U; data = qla2x00_nvram_request(ha, nv_cmd); return (data); } } static void qla2x00_nv_deselect(struct qla_hw_data *ha ) { struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; writew(0, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); __const_udelay(42950UL); return; } } static void qla2x00_write_nvram_word(struct qla_hw_data *ha , uint32_t addr , uint16_t data ) { int count ; uint16_t word ; uint32_t nv_cmd ; uint32_t wait_cnt ; struct device_reg_2xxx *reg ; scsi_qla_host_t *vha ; void *tmp ; { reg = & (ha->iobase)->isp; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; qla2x00_nv_write(ha, 4); qla2x00_nv_write(ha, 0); qla2x00_nv_write(ha, 0); word = 0U; goto ldv_43509; ldv_43508: qla2x00_nv_write(ha, 4); word = (uint16_t )((int )word + 1); ldv_43509: ; if ((unsigned int )word <= 7U) { goto ldv_43508; } else { } qla2x00_nv_deselect(ha); nv_cmd = (addr << 16) | 83886080U; nv_cmd = (uint32_t )data | nv_cmd; nv_cmd = nv_cmd << 5; count = 0; goto ldv_43512; ldv_43511: ; if ((int )nv_cmd < 0) { qla2x00_nv_write(ha, 4); } else { qla2x00_nv_write(ha, 0); } nv_cmd = nv_cmd << 1; count = count + 1; ldv_43512: ; if (count <= 26) { goto ldv_43511; } else { } qla2x00_nv_deselect(ha); writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); wait_cnt = 20000U; ldv_43515: wait_cnt = wait_cnt - 1U; if (wait_cnt == 0U) { ql_dbg(8388608U, vha, 28813, "NVRAM didn\'t go ready...\n"); goto ldv_43514; } else { } __const_udelay(42950UL); word = readw((void const volatile *)(& reg->nvram)); if (((int )word & 8) == 0) { goto ldv_43515; } else { } ldv_43514: qla2x00_nv_deselect(ha); qla2x00_nv_write(ha, 4); count = 0; goto ldv_43517; ldv_43516: qla2x00_nv_write(ha, 0); count = count + 1; ldv_43517: ; if (count <= 9) { goto ldv_43516; } else { } qla2x00_nv_deselect(ha); return; } } static int qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha , uint32_t addr , uint16_t data , uint32_t tmo ) { int ret ; int count ; uint16_t word ; uint32_t nv_cmd ; struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; ret = 0; qla2x00_nv_write(ha, 4); qla2x00_nv_write(ha, 0); qla2x00_nv_write(ha, 0); word = 0U; goto ldv_43531; ldv_43530: qla2x00_nv_write(ha, 4); word = (uint16_t )((int )word + 1); ldv_43531: ; if ((unsigned int )word <= 7U) { goto ldv_43530; } else { } qla2x00_nv_deselect(ha); nv_cmd = (addr << 16) | 83886080U; nv_cmd = (uint32_t )data | nv_cmd; nv_cmd = nv_cmd << 5; count = 0; goto ldv_43534; ldv_43533: ; if ((int )nv_cmd < 0) { qla2x00_nv_write(ha, 4); } else { qla2x00_nv_write(ha, 0); } nv_cmd = nv_cmd << 1; count = count + 1; ldv_43534: ; if (count <= 26) { goto ldv_43533; } else { } qla2x00_nv_deselect(ha); writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); ldv_43537: __const_udelay(42950UL); word = readw((void const volatile *)(& reg->nvram)); tmo = tmo - 1U; if (tmo == 0U) { ret = 258; goto ldv_43536; } else { } if (((int )word & 8) == 0) { goto ldv_43537; } else { } ldv_43536: qla2x00_nv_deselect(ha); qla2x00_nv_write(ha, 4); count = 0; goto ldv_43539; ldv_43538: qla2x00_nv_write(ha, 0); count = count + 1; ldv_43539: ; if (count <= 9) { goto ldv_43538; } else { } qla2x00_nv_deselect(ha); return (ret); } } static int qla2x00_clear_nvram_protection(struct qla_hw_data *ha ) { int ret ; int stat ; struct device_reg_2xxx *reg ; uint32_t word ; uint32_t wait_cnt ; uint16_t wprot ; uint16_t wprot_old ; scsi_qla_host_t *vha ; void *tmp ; unsigned short tmp___0 ; { reg = & (ha->iobase)->isp; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ret = 258; wprot_old = qla2x00_get_nvram_word(ha, (uint32_t )ha->nvram_base); stat = qla2x00_write_nvram_word_tmo(ha, (uint32_t )ha->nvram_base, 4660, 100000U); wprot = qla2x00_get_nvram_word(ha, (uint32_t )ha->nvram_base); if (stat != 0 || (unsigned int )wprot != 4660U) { qla2x00_nv_write(ha, 4); qla2x00_nv_write(ha, 0); qla2x00_nv_write(ha, 0); word = 0U; goto ldv_43553; ldv_43552: qla2x00_nv_write(ha, 4); word = word + 1U; ldv_43553: ; if (word <= 7U) { goto ldv_43552; } else { } qla2x00_nv_deselect(ha); qla2x00_nv_write(ha, 8196); qla2x00_nv_write(ha, 8192); qla2x00_nv_write(ha, 8192); word = 0U; goto ldv_43556; ldv_43555: qla2x00_nv_write(ha, 8196); word = word + 1U; ldv_43556: ; if (word <= 7U) { goto ldv_43555; } else { } qla2x00_nv_deselect(ha); qla2x00_nv_write(ha, 8196); qla2x00_nv_write(ha, 8196); qla2x00_nv_write(ha, 8196); word = 0U; goto ldv_43559; ldv_43558: qla2x00_nv_write(ha, 8196); word = word + 1U; ldv_43559: ; if (word <= 7U) { goto ldv_43558; } else { } qla2x00_nv_deselect(ha); writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); wait_cnt = 20000U; ldv_43562: wait_cnt = wait_cnt - 1U; if (wait_cnt == 0U) { ql_dbg(8388608U, vha, 28814, "NVRAM didn\'t go ready...\n"); goto ldv_43561; } else { } __const_udelay(42950UL); tmp___0 = readw((void const volatile *)(& reg->nvram)); word = (uint32_t )tmp___0; if ((word & 8U) == 0U) { goto ldv_43562; } else { } ldv_43561: ; if (wait_cnt != 0U) { ret = 0; } else { } } else { qla2x00_write_nvram_word(ha, (uint32_t )ha->nvram_base, (int )wprot_old); } return (ret); } } static void qla2x00_set_nvram_protection(struct qla_hw_data *ha , int stat ) { struct device_reg_2xxx *reg ; uint32_t word ; uint32_t wait_cnt ; scsi_qla_host_t *vha ; void *tmp ; unsigned short tmp___0 ; { reg = & (ha->iobase)->isp; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; if (stat != 0) { return; } else { } qla2x00_nv_write(ha, 4); qla2x00_nv_write(ha, 0); qla2x00_nv_write(ha, 0); word = 0U; goto ldv_43572; ldv_43571: qla2x00_nv_write(ha, 4); word = word + 1U; ldv_43572: ; if (word <= 7U) { goto ldv_43571; } else { } qla2x00_nv_deselect(ha); qla2x00_nv_write(ha, 8196); qla2x00_nv_write(ha, 8192); qla2x00_nv_write(ha, 8192); word = 0U; goto ldv_43575; ldv_43574: qla2x00_nv_write(ha, 8196); word = word + 1U; ldv_43575: ; if (word <= 7U) { goto ldv_43574; } else { } qla2x00_nv_deselect(ha); qla2x00_nv_write(ha, 8196); qla2x00_nv_write(ha, 8192); qla2x00_nv_write(ha, 8196); word = 0U; goto ldv_43578; ldv_43577: qla2x00_nv_write(ha, 8192); word = word + 1U; ldv_43578: ; if (word <= 7U) { goto ldv_43577; } else { } qla2x00_nv_deselect(ha); writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); wait_cnt = 20000U; ldv_43581: wait_cnt = wait_cnt - 1U; if (wait_cnt == 0U) { ql_dbg(8388608U, vha, 28815, "NVRAM didn\'t go ready...\n"); goto ldv_43580; } else { } __const_udelay(42950UL); tmp___0 = readw((void const volatile *)(& reg->nvram)); word = (uint32_t )tmp___0; if ((word & 8U) == 0U) { goto ldv_43581; } else { } ldv_43580: ; return; } } __inline static uint32_t flash_conf_addr(struct qla_hw_data *ha , uint32_t faddr ) { { return (ha->flash_conf_off | faddr); } } __inline static uint32_t flash_data_addr(struct qla_hw_data *ha , uint32_t faddr ) { { return (ha->flash_data_off | faddr); } } __inline static uint32_t nvram_conf_addr(struct qla_hw_data *ha , uint32_t naddr ) { { return (ha->nvram_conf_off | naddr); } } __inline static uint32_t nvram_data_addr(struct qla_hw_data *ha , uint32_t naddr ) { { return (ha->nvram_data_off | naddr); } } static uint32_t qla24xx_read_flash_dword(struct qla_hw_data *ha , uint32_t addr ) { int rval ; uint32_t cnt ; uint32_t data ; struct device_reg_24xx *reg ; unsigned int tmp ; { reg = & (ha->iobase)->isp24; writel(addr & 2147483647U, (void volatile *)(& reg->flash_addr)); rval = 0; cnt = 3000U; goto ldv_43608; ldv_43607: ; if (cnt != 0U) { __const_udelay(42950UL); } else { rval = 256; } __might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_sup.o.c.prepared", 769, 0); _cond_resched(); cnt = cnt - 1U; ldv_43608: tmp = readl((void const volatile *)(& reg->flash_addr)); if ((int )tmp >= 0 && rval == 0) { goto ldv_43607; } else { } data = 3735936685U; if (rval == 0) { data = readl((void const volatile *)(& reg->flash_data)); } else { } return (data); } } uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *vha , uint32_t *dwptr , uint32_t faddr , uint32_t dwords ) { uint32_t i ; struct qla_hw_data *ha ; uint32_t tmp ; { ha = vha->hw; i = 0U; goto ldv_43619; ldv_43618: tmp = flash_data_addr(ha, faddr); *(dwptr + (unsigned long )i) = qla24xx_read_flash_dword(ha, tmp); i = i + 1U; faddr = faddr + 1U; ldv_43619: ; if (i < dwords) { goto ldv_43618; } else { } return (dwptr); } } static int qla24xx_write_flash_dword(struct qla_hw_data *ha , uint32_t addr , uint32_t data ) { int rval ; uint32_t cnt ; struct device_reg_24xx *reg ; unsigned int tmp ; { reg = & (ha->iobase)->isp24; writel(data, (void volatile *)(& reg->flash_data)); readl((void const volatile *)(& reg->flash_data)); writel(addr | 2147483648U, (void volatile *)(& reg->flash_addr)); rval = 0; cnt = 500000U; goto ldv_43631; ldv_43630: ; if (cnt != 0U) { __const_udelay(42950UL); } else { rval = 256; } __might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_sup.o.c.prepared", 813, 0); _cond_resched(); cnt = cnt - 1U; ldv_43631: tmp = readl((void const volatile *)(& reg->flash_addr)); if ((int )tmp < 0 && rval == 0) { goto ldv_43630; } else { } return (rval); } } static void qla24xx_get_flash_manufacturer(struct qla_hw_data *ha , uint8_t *man_id , uint8_t *flash_id ) { uint32_t ids ; uint32_t tmp ; uint32_t tmp___0 ; { tmp = flash_conf_addr(ha, 939U); ids = qla24xx_read_flash_dword(ha, tmp); *man_id = (unsigned char )ids; *flash_id = (unsigned char )((int )((unsigned short )ids) >> 8); if (ids != 3735936685U && ((unsigned int )*man_id == 0U || (unsigned int )*flash_id == 0U)) { tmp___0 = flash_conf_addr(ha, 159U); ids = qla24xx_read_flash_dword(ha, tmp___0); *man_id = (unsigned char )ids; *flash_id = (unsigned char )((int )((unsigned short )ids) >> 8); } else { } return; } } static int qla2xxx_find_flt_start(scsi_qla_host_t *vha , uint32_t *start ) { char const *loc ; char const *locations[2U] ; uint32_t pcihdr ; uint32_t pcids ; uint32_t *dcode ; uint8_t *buf ; uint8_t *bcode ; uint8_t last_image ; uint16_t cnt ; uint16_t chksum ; uint16_t *wptr ; struct qla_flt_location *fltl ; struct qla_hw_data *ha ; struct req_que *req ; uint16_t *tmp ; { locations[0] = "DEF"; locations[1] = "PCI"; ha = vha->hw; req = *(ha->req_q_map); loc = locations[0]; *start = 0U; if ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) { *start = 70656U; } else if ((ha->device_type & 2048U) != 0U) { *start = 328704U; } else if ((ha->device_type & 8192U) != 0U) { *start = 885760U; } else if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { *start = 1033216U; goto end; } else if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { *start = 1033216U; goto end; } else { } buf = (uint8_t *)req->ring; dcode = (uint32_t *)req->ring; pcihdr = 0U; last_image = 1U; ldv_43658: qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 32U); bcode = buf + ((unsigned long )pcihdr & 3UL); if ((unsigned int )*bcode != 85U || (unsigned int )*(bcode + 1UL) != 170U) { goto end; } else { } pcids = (uint32_t )(((int )*(bcode + 25UL) << 8) | (int )*(bcode + 24UL)) + pcihdr; qla24xx_read_flash_data(vha, dcode, pcids >> 2, 32U); bcode = buf + ((unsigned long )pcihdr & 3UL); if ((((unsigned int )*bcode != 80U || (unsigned int )*(bcode + 1UL) != 67U) || (unsigned int )*(bcode + 2UL) != 73U) || (unsigned int )*(bcode + 3UL) != 82U) { goto end; } else { } last_image = (unsigned int )*(bcode + 21UL) & 128U; pcihdr = (uint32_t )((((int )*(bcode + 17UL) << 8) | (int )*(bcode + 16UL)) * 512) + pcihdr; if ((unsigned int )last_image == 0U) { goto ldv_43658; } else { } fltl = (struct qla_flt_location *)req->ring; qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 4U); if ((((unsigned int )fltl->sig[0] != 81U || (unsigned int )fltl->sig[1] != 70U) || (unsigned int )fltl->sig[2] != 76U) || (unsigned int )fltl->sig[3] != 84U) { goto end; } else { } wptr = (uint16_t *)req->ring; cnt = 8U; chksum = 0U; goto ldv_43661; ldv_43660: tmp = wptr; wptr = wptr + 1; chksum = (int )*tmp + (int )chksum; cnt = (uint16_t )((int )cnt - 1); ldv_43661: ; if ((unsigned int )cnt != 0U) { goto ldv_43660; } else { } if ((unsigned int )chksum != 0U) { ql_log(0U, vha, 69, "Inconsistent FLTL detected: checksum=0x%x.\n", (int )chksum); ql_dump_buffer(1073872896U, vha, 270, buf, 16U); return (258); } else { } loc = locations[1]; *start = (uint32_t )((((int )fltl->start_hi << 16) | (int )fltl->start_lo) >> 2); end: ql_dbg(1073741824U, vha, 70, "FLTL[%s] = 0x%x.\n", loc, *start); return (0); } } static void qla2xxx_get_flt_info(scsi_qla_host_t *vha , uint32_t flt_addr ) { char const *loc ; char const *locations[2U] ; uint32_t def_fw[3U] ; uint32_t def_boot[3U] ; uint32_t def_vpd_nvram[3U] ; uint32_t def_vpd0[3U] ; uint32_t def_vpd1[3U] ; uint32_t def_nvram0[3U] ; uint32_t def_nvram1[3U] ; uint32_t def_fdt[3U] ; uint32_t def_npiv_conf0[3U] ; uint32_t def_npiv_conf1[3U] ; uint32_t fcp_prio_cfg0[3U] ; uint32_t fcp_prio_cfg1[3U] ; uint32_t def ; uint16_t *wptr ; uint16_t cnt ; uint16_t chksum ; uint32_t start ; struct qla_flt_header *flt ; struct qla_flt_region *region ; struct qla_hw_data *ha ; struct req_que *req ; uint16_t *tmp ; { locations[0] = "DEF"; locations[1] = "FLT"; def_fw[0] = 131072U; def_fw[1] = 131072U; def_fw[2] = 655360U; def_boot[0] = 0U; def_boot[1] = 0U; def_boot[2] = 524288U; def_vpd_nvram[0] = 294912U; def_vpd_nvram[1] = 294912U; def_vpd_nvram[2] = 851968U; def_vpd0[0] = 0U; def_vpd0[1] = 0U; def_vpd0[2] = 851968U; def_vpd1[0] = 0U; def_vpd1[1] = 0U; def_vpd1[2] = 852992U; def_nvram0[0] = 0U; def_nvram0[1] = 0U; def_nvram0[2] = 852096U; def_nvram1[0] = 0U; def_nvram1[1] = 0U; def_nvram1[2] = 852352U; def_fdt[0] = 69632U; def_fdt[1] = 327680U; def_fdt[2] = 884736U; def_npiv_conf0[0] = 90112U; def_npiv_conf0[1] = 376832U; def_npiv_conf0[2] = 856064U; def_npiv_conf1[0] = 94208U; def_npiv_conf1[1] = 380928U; def_npiv_conf1[2] = 860160U; fcp_prio_cfg0[0] = 65536U; fcp_prio_cfg0[1] = 245760U; fcp_prio_cfg0[2] = 0U; fcp_prio_cfg1[0] = 73728U; fcp_prio_cfg1[1] = 253952U; fcp_prio_cfg1[2] = 0U; ha = vha->hw; req = *(ha->req_q_map); def = 0U; if ((ha->device_type & 2048U) != 0U) { def = 1U; } else if ((ha->device_type & 8192U) != 0U) { def = 2U; } else { } ha->flt_region_fcp_prio = *((unsigned long *)ha + 2UL) != 0UL ? fcp_prio_cfg0[def] : fcp_prio_cfg1[def]; ha->flt_region_flt = flt_addr; wptr = (uint16_t *)req->ring; flt = (struct qla_flt_header *)req->ring; region = (struct qla_flt_region *)flt + 1U; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)req->ring, flt_addr << 2, 4096U); if ((unsigned int )*wptr == 65535U) { goto no_flash_data; } else { } if ((unsigned int )flt->version != 1U) { ql_log(1U, vha, 71, "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", (int )flt->version, (int )flt->length, (int )flt->checksum); goto no_flash_data; } else { } cnt = (uint16_t )(((unsigned long )flt->length + 8UL) >> 1); chksum = 0U; goto ldv_43692; ldv_43691: tmp = wptr; wptr = wptr + 1; chksum = (int )*tmp + (int )chksum; cnt = (uint16_t )((int )cnt - 1); ldv_43692: ; if ((unsigned int )cnt != 0U) { goto ldv_43691; } else { } if ((unsigned int )chksum != 0U) { ql_log(0U, vha, 72, "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", (int )flt->version, (int )flt->length, (int )flt->checksum); goto no_flash_data; } else { } loc = locations[1]; cnt = (uint16_t )((unsigned int )flt->length / 16U); goto ldv_43718; ldv_43717: start = region->start >> 2; ql_dbg(1073741824U, vha, 73, "FLT[%02x]: start=0x%x end=0x%x size=0x%x.\n", region->code & 255U, start, region->end >> 2, region->size); switch (region->code & 255U) { case 164U: ; if ((ha->device_type & 65536U) == 0U) { goto ldv_43695; } else { } ha->flt_region_fw = start; goto ldv_43695; case 1U: ; if ((ha->device_type & 65536U) != 0U) { goto ldv_43695; } else { } ha->flt_region_fw = start; goto ldv_43695; case 7U: ha->flt_region_boot = start; goto ldv_43695; case 20U: ; if ((ha->device_type & 65536U) != 0U) { goto ldv_43695; } else { } ha->flt_region_vpd_nvram = start; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { goto ldv_43695; } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { ha->flt_region_vpd = start; } else { } goto ldv_43695; case 22U: ; if (((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) || (ha->device_type & 65536U) != 0U) { goto ldv_43695; } else { } if (*((unsigned long *)ha + 2UL) == 0UL) { ha->flt_region_vpd = start; } else { } goto ldv_43695; case 21U: ; if ((ha->device_type & 65536U) != 0U) { goto ldv_43695; } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { ha->flt_region_nvram = start; } else { } goto ldv_43695; case 23U: ; if ((ha->device_type & 65536U) != 0U) { goto ldv_43695; } else { } if (*((unsigned long *)ha + 2UL) == 0UL) { ha->flt_region_nvram = start; } else { } goto ldv_43695; case 26U: ha->flt_region_fdt = start; goto ldv_43695; case 41U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { ha->flt_region_npiv_conf = start; } else { } goto ldv_43695; case 42U: ; if (*((unsigned long *)ha + 2UL) == 0UL) { ha->flt_region_npiv_conf = start; } else { } goto ldv_43695; case 47U: ha->flt_region_gold_fw = start; goto ldv_43695; case 135U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { ha->flt_region_fcp_prio = start; } else { } goto ldv_43695; case 136U: ; if (*((unsigned long *)ha + 2UL) == 0UL) { ha->flt_region_fcp_prio = start; } else { } goto ldv_43695; case 120U: ha->flt_region_boot = start; goto ldv_43695; case 162U: ; if ((ha->device_type & 262144U) != 0U) { ha->flt_region_boot = start; } else { } goto ldv_43695; case 116U: ha->flt_region_fw = start; goto ldv_43695; case 151U: ; if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { ha->flt_region_fw = start; } else { } goto ldv_43695; case 117U: ha->flt_region_gold_fw = start; goto ldv_43695; case 114U: ha->flt_region_bootload = start; goto ldv_43695; case 129U: ; if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { ha->flt_region_vpd = start; } else { } goto ldv_43695; case 170U: ; if ((ha->device_type & 65536U) == 0U && (ha->device_type & 262144U) == 0U) { goto ldv_43695; } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { ha->flt_region_nvram = start; } else { } goto ldv_43695; case 172U: ; if ((ha->device_type & 65536U) == 0U && (ha->device_type & 262144U) == 0U) { goto ldv_43695; } else { } if (*((unsigned long *)ha + 2UL) == 0UL) { ha->flt_region_nvram = start; } else { } goto ldv_43695; } ldv_43695: cnt = (uint16_t )((int )cnt - 1); region = region + 1; ldv_43718: ; if ((unsigned int )cnt != 0U) { goto ldv_43717; } else { } goto done; no_flash_data: loc = locations[0]; ha->flt_region_fw = def_fw[def]; ha->flt_region_boot = def_boot[def]; ha->flt_region_vpd_nvram = def_vpd_nvram[def]; ha->flt_region_vpd = *((unsigned long *)ha + 2UL) != 0UL ? def_vpd0[def] : def_vpd1[def]; ha->flt_region_nvram = *((unsigned long *)ha + 2UL) != 0UL ? def_nvram0[def] : def_nvram1[def]; ha->flt_region_fdt = def_fdt[def]; ha->flt_region_npiv_conf = *((unsigned long *)ha + 2UL) != 0UL ? def_npiv_conf0[def] : def_npiv_conf1[def]; done: ql_dbg(1073741824U, vha, 74, "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n", loc, ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram, ha->flt_region_vpd, ha->flt_region_nvram, ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf, ha->flt_region_fcp_prio); return; } } static void qla2xxx_get_fdt_info(scsi_qla_host_t *vha ) { char const *loc ; char const *locations[2U] ; uint16_t cnt ; uint16_t chksum ; uint16_t *wptr ; struct qla_fdt_layout *fdt ; uint8_t man_id ; uint8_t flash_id ; uint16_t mid ; uint16_t fid ; struct qla_hw_data *ha ; struct req_que *req ; uint16_t *tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; { locations[0] = "MID"; locations[1] = "FDT"; mid = 0U; fid = 0U; ha = vha->hw; req = *(ha->req_q_map); wptr = (uint16_t *)req->ring; fdt = (struct qla_fdt_layout *)req->ring; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)req->ring, ha->flt_region_fdt << 2, 4096U); if ((unsigned int )*wptr == 65535U) { goto no_flash_data; } else { } if ((((unsigned int )fdt->sig[0] != 81U || (unsigned int )fdt->sig[1] != 76U) || (unsigned int )fdt->sig[2] != 73U) || (unsigned int )fdt->sig[3] != 68U) { goto no_flash_data; } else { } cnt = 0U; chksum = 0U; goto ldv_43738; ldv_43737: tmp = wptr; wptr = wptr + 1; chksum = (int )*tmp + (int )chksum; cnt = (uint16_t )((int )cnt + 1); ldv_43738: ; if ((unsigned int )cnt <= 63U) { goto ldv_43737; } else { } if ((unsigned int )chksum != 0U) { ql_dbg(1073741824U, vha, 76, "Inconsistent FDT detected: checksum=0x%x id=%c version0x%x.\n", (int )chksum, (int )fdt->sig[0], (int )fdt->version); ql_dump_buffer(1073872896U, vha, 275, (uint8_t *)fdt, 128U); goto no_flash_data; } else { } loc = locations[1]; mid = fdt->man_id; fid = fdt->id; ha->fdt_wrt_disable = (uint32_t )fdt->wrt_disable_bits; ha->fdt_wrt_enable = (uint32_t )fdt->wrt_enable_bits; ha->fdt_wrt_sts_reg_cmd = (uint32_t )fdt->wrt_sts_reg_cmd; if ((ha->device_type & 262144U) != 0U) { ha->fdt_erase_cmd = (uint32_t )fdt->erase_cmd; } else { ha->fdt_erase_cmd = flash_conf_addr(ha, (uint32_t )((int )fdt->erase_cmd | 768)); } ha->fdt_block_size = fdt->block_size; if ((unsigned int )fdt->unprotect_sec_cmd != 0U) { ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, (uint32_t )((int )fdt->unprotect_sec_cmd | 768)); if ((unsigned int )fdt->protect_sec_cmd != 0U) { tmp___0 = flash_conf_addr(ha, (uint32_t )((int )fdt->protect_sec_cmd | 768)); ha->fdt_protect_sec_cmd = tmp___0; } else { tmp___1 = flash_conf_addr(ha, 822U); ha->fdt_protect_sec_cmd = tmp___1; } } else { } goto done; no_flash_data: loc = locations[0]; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { ha->fdt_block_size = 65536U; goto done; } else { } qla24xx_get_flash_manufacturer(ha, & man_id, & flash_id); mid = (uint16_t )man_id; fid = (uint16_t )flash_id; ha->fdt_wrt_disable = 156U; ha->fdt_erase_cmd = flash_conf_addr(ha, 984U); switch ((int )man_id) { case 191: ; if ((unsigned int )flash_id == 142U) { ha->fdt_block_size = 65536U; } else { ha->fdt_block_size = 32768U; } if ((unsigned int )flash_id == 128U) { ha->fdt_erase_cmd = flash_conf_addr(ha, 850U); } else { } goto ldv_43742; case 19: ha->fdt_block_size = 65536U; goto ldv_43742; case 31: ha->fdt_block_size = 4096U; ha->fdt_erase_cmd = flash_conf_addr(ha, 800U); ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 825U); ha->fdt_protect_sec_cmd = flash_conf_addr(ha, 822U); goto ldv_43742; default: ha->fdt_block_size = 65536U; goto ldv_43742; } ldv_43742: ; done: ql_dbg(1073741824U, vha, 77, "FDT[%s]: (0x%x/0x%x) erase=0x%x pr=%x wrtd=0x%x blk=0x%x.\n", loc, (int )mid, (int )fid, ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, ha->fdt_wrt_disable, ha->fdt_block_size); return; } } static void qla2xxx_get_idc_param(scsi_qla_host_t *vha ) { uint32_t *wptr ; struct qla_hw_data *ha ; struct req_que *req ; uint32_t *tmp ; { ha = vha->hw; req = *(ha->req_q_map); if ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) { return; } else { } wptr = (uint32_t *)req->ring; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)req->ring, 4098140U, 8U); if (*wptr == 4294967295U) { ha->fcoe_dev_init_timeout = 30U; ha->fcoe_reset_timeout = 10U; } else { tmp = wptr; wptr = wptr + 1; ha->fcoe_dev_init_timeout = *tmp; ha->fcoe_reset_timeout = *wptr; } ql_dbg(1073741824U, vha, 78, "fcoe_dev_init_timeout=%d fcoe_reset_timeout=%d.\n", ha->fcoe_dev_init_timeout, ha->fcoe_reset_timeout); return; } } int qla2xxx_get_flash_info(scsi_qla_host_t *vha ) { int ret ; uint32_t flt_addr ; struct qla_hw_data *ha ; { ha = vha->hw; if (((((((ha->device_type & 128U) == 0U && (ha->device_type & 256U) == 0U) && ((ha->device_type & 512U) == 0U && (ha->device_type & 1024U) == 0U)) && (ha->device_type & 4096U) == 0U) && (ha->device_type & 2048U) == 0U) && ((((ha->device_type & 8192U) == 0U && (ha->device_type & 16384U) == 0U) && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U)) && (ha->device_type & 32768U) == 0U) { return (0); } else { } ret = qla2xxx_find_flt_start(vha, & flt_addr); if (ret != 0) { return (ret); } else { } qla2xxx_get_flt_info(vha, flt_addr); qla2xxx_get_fdt_info(vha); qla2xxx_get_idc_param(vha); return (0); } } void qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha ) { void *data ; uint16_t *wptr ; uint16_t cnt ; uint16_t chksum ; int i ; struct qla_npiv_header hdr ; struct qla_npiv_entry *entry ; struct qla_hw_data *ha ; uint16_t *tmp ; uint16_t flags ; struct fc_vport_identifiers vid ; struct fc_vport *vport ; size_t __len ; void *__ret ; { ha = vha->hw; if (((((((ha->device_type & 128U) == 0U && (ha->device_type & 256U) == 0U) && ((ha->device_type & 512U) == 0U && (ha->device_type & 1024U) == 0U)) && (ha->device_type & 4096U) == 0U) && (ha->device_type & 2048U) == 0U) && ((((ha->device_type & 8192U) == 0U && (ha->device_type & 16384U) == 0U) && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U)) && (ha->device_type & 32768U) == 0U) { return; } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { return; } else { } if ((ha->device_type & 262144U) != 0U) { return; } else { } (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)(& hdr), ha->flt_region_npiv_conf << 2, 16U); if ((unsigned int )hdr.version == 65535U) { return; } else { } if ((unsigned int )hdr.version != 1U) { ql_dbg(8388608U, vha, 28816, "Unsupported NPIV-Config detected: version=0x%x entries=0x%x checksum=0x%x.\n", (int )hdr.version, (int )hdr.entries, (int )hdr.checksum); return; } else { } data = kmalloc(16384UL, 208U); if ((unsigned long )data == (unsigned long )((void *)0)) { ql_log(1U, vha, 28817, "Unable to allocate memory for data.\n"); return; } else { } (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)data, ha->flt_region_npiv_conf << 2, 16384U); cnt = (uint16_t )(((unsigned long )hdr.entries * 24UL + 16UL) >> 1); wptr = (uint16_t *)data; chksum = 0U; goto ldv_43770; ldv_43769: tmp = wptr; wptr = wptr + 1; chksum = (int )*tmp + (int )chksum; cnt = (uint16_t )((int )cnt - 1); ldv_43770: ; if ((unsigned int )cnt != 0U) { goto ldv_43769; } else { } if ((unsigned int )chksum != 0U) { ql_dbg(8388608U, vha, 28818, "Inconsistent NPIV-Config detected: version=0x%x entries=0x%x checksum=0x%x.\n", (int )hdr.version, (int )hdr.entries, (int )hdr.checksum); goto done; } else { } entry = (struct qla_npiv_entry *)data + 16U; cnt = hdr.entries; i = 0; goto ldv_43781; ldv_43780: __len = 24UL; if (__len > 63UL) { __ret = __memcpy((void *)ha->npiv_info + (unsigned long )i, (void const *)entry, __len); } else { __ret = __builtin_memcpy((void *)ha->npiv_info + (unsigned long )i, (void const *)entry, __len); } flags = entry->flags; if ((unsigned int )flags == 65535U) { goto ldv_43779; } else { } if (((int )flags & 1) == 0) { goto ldv_43779; } else { } memset((void *)(& vid), 0, 96UL); vid.roles = 2U; vid.vport_type = 7; vid.disable = 0; vid.port_name = wwn_to_u64((u8 *)(& entry->port_name)); vid.node_name = wwn_to_u64((u8 *)(& entry->node_name)); ql_dbg(8388608U, vha, 28819, "NPIV[%02x]: wwpn=%llx wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", (int )cnt, vid.port_name, vid.node_name, (int )entry->vf_id, (int )entry->q_qos, (int )entry->f_qos); if (i <= 31) { vport = fc_vport_create(vha->host, 0, & vid); if ((unsigned long )vport == (unsigned long )((struct fc_vport *)0)) { ql_log(1U, vha, 28820, "NPIV-Config Failed to create vport [%02x]: wwpn=%llx wwnn=%llx.\n", (int )cnt, vid.port_name, vid.node_name); } else { } } else { } ldv_43779: cnt = (uint16_t )((int )cnt - 1); entry = entry + 1; i = i + 1; ldv_43781: ; if ((unsigned int )cnt != 0U) { goto ldv_43780; } else { } done: kfree((void const *)data); return; } } static int qla24xx_unprotect_flash(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct device_reg_24xx *reg ; int tmp ; unsigned int tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; if (*((unsigned long *)ha + 2UL) != 0UL) { tmp = qla81xx_fac_do_write_enable(vha, 1); return (tmp); } else { } tmp___0 = readl((void const volatile *)(& reg->ctrl_status)); writel(tmp___0 | 2U, (void volatile *)(& reg->ctrl_status)); readl((void const volatile *)(& reg->ctrl_status)); if (ha->fdt_wrt_disable == 0U) { goto done; } else { } tmp___1 = flash_conf_addr(ha, 257U); qla24xx_write_flash_dword(ha, tmp___1, 0U); tmp___2 = flash_conf_addr(ha, 257U); qla24xx_write_flash_dword(ha, tmp___2, 0U); done: ; return (0); } } static int qla24xx_protect_flash(scsi_qla_host_t *vha ) { uint32_t cnt ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; int tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; unsigned int tmp___3 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; if (*((unsigned long *)ha + 2UL) != 0UL) { tmp = qla81xx_fac_do_write_enable(vha, 0); return (tmp); } else { } if (ha->fdt_wrt_disable == 0U) { goto skip_wrt_protect; } else { } tmp___0 = flash_conf_addr(ha, 257U); qla24xx_write_flash_dword(ha, tmp___0, ha->fdt_wrt_disable); cnt = 300U; goto ldv_43797; ldv_43796: __const_udelay(42950UL); cnt = cnt - 1U; ldv_43797: ; if (cnt != 0U) { tmp___1 = flash_conf_addr(ha, 5U); tmp___2 = qla24xx_read_flash_dword(ha, tmp___1); if ((int )tmp___2 & 1) { goto ldv_43796; } else { goto ldv_43798; } } else { } ldv_43798: ; skip_wrt_protect: tmp___3 = readl((void const volatile *)(& reg->ctrl_status)); writel(tmp___3 & 4294967293U, (void volatile *)(& reg->ctrl_status)); readl((void const volatile *)(& reg->ctrl_status)); return (0); } } static int qla24xx_erase_sector(scsi_qla_host_t *vha , uint32_t fdata ) { struct qla_hw_data *ha ; uint32_t start ; uint32_t finish ; uint32_t tmp ; uint32_t tmp___0 ; int tmp___1 ; int tmp___2 ; { ha = vha->hw; if (*((unsigned long *)ha + 2UL) != 0UL) { start = fdata >> 2; finish = ((ha->fdt_block_size >> 2) + start) - 1U; tmp = flash_data_addr(ha, finish); tmp___0 = flash_data_addr(ha, start); tmp___1 = qla81xx_fac_erase_sector(vha, tmp___0, tmp); return (tmp___1); } else { } tmp___2 = qla24xx_write_flash_dword(ha, ha->fdt_erase_cmd, ((fdata & 65280U) | ((fdata << 16) & 16711680U)) | ((fdata >> 16) & 255U)); return (tmp___2); } } static int qla24xx_write_flash_data(scsi_qla_host_t *vha , uint32_t *dwptr , uint32_t faddr , uint32_t dwords ) { int ret ; uint32_t liter ; uint32_t sec_mask ; uint32_t rest_addr ; uint32_t fdata ; dma_addr_t optrom_dma ; void *optrom ; struct qla_hw_data *ha ; size_t __len ; void *__ret ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; { optrom = (void *)0; ha = vha->hw; if (((((ha->device_type & 2048U) != 0U || (ha->device_type & 8192U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) && (faddr & 4095U) == 0U) && dwords > 1024U) { optrom = dma_alloc_attrs(& (ha->pdev)->dev, 4096UL, & optrom_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )optrom == (unsigned long )((void *)0)) { ql_log(1U, vha, 28821, "Unable to allocate memory for optrom burst write (%x KB).\n", 4); } else { } } else { } rest_addr = (ha->fdt_block_size >> 2) - 1U; sec_mask = ~ rest_addr; ret = qla24xx_unprotect_flash(vha); if (ret != 0) { ql_log(1U, vha, 28822, "Unable to unprotect flash for update.\n"); goto done; } else { } liter = 0U; goto ldv_43827; ldv_43826: fdata = (faddr & sec_mask) << 2; if ((faddr & rest_addr) == 0U) { if (ha->fdt_unprotect_sec_cmd != 0U) { qla24xx_write_flash_dword(ha, ha->fdt_unprotect_sec_cmd, ((fdata & 65280U) | ((fdata << 16) & 16711680U)) | ((fdata >> 16) & 255U)); } else { } ret = qla24xx_erase_sector(vha, fdata); if (ret != 0) { ql_dbg(8388608U, vha, 28679, "Unable to erase erase sector: address=%x.\n", faddr); goto ldv_43821; } else { } } else { } if ((unsigned long )optrom != (unsigned long )((void *)0) && liter + 1024U <= dwords) { __len = 4096UL; if (__len > 63UL) { __ret = __memcpy(optrom, (void const *)dwptr, __len); } else { __ret = __builtin_memcpy(optrom, (void const *)dwptr, __len); } tmp = flash_data_addr(ha, faddr); ret = qla2x00_load_ram(vha, optrom_dma, tmp, 1024U); if (ret != 0) { tmp___0 = flash_data_addr(ha, faddr); ql_log(1U, vha, 28823, "Unable to burst-write optrom segment (%x/%x/%llx).\n", ret, tmp___0, optrom_dma); ql_log(1U, vha, 28824, "Reverting to slow-write.\n"); dma_free_attrs(& (ha->pdev)->dev, 4096UL, optrom, optrom_dma, (struct dma_attrs *)0); optrom = (void *)0; } else { liter = liter + 1023U; faddr = faddr + 1023U; dwptr = dwptr + 1023UL; goto ldv_43825; } } else { } tmp___1 = flash_data_addr(ha, faddr); ret = qla24xx_write_flash_dword(ha, tmp___1, *dwptr); if (ret != 0) { ql_dbg(8388608U, vha, 28678, "Unable to program flash address=%x data=%x.\n", faddr, *dwptr); goto ldv_43821; } else { } if (ha->fdt_unprotect_sec_cmd != 0U && (faddr & rest_addr) == rest_addr) { qla24xx_write_flash_dword(ha, ha->fdt_protect_sec_cmd, ((fdata & 65280U) | ((fdata << 16) & 16711680U)) | ((fdata >> 16) & 255U)); } else { } ldv_43825: liter = liter + 1U; faddr = faddr + 1U; dwptr = dwptr + 1; ldv_43827: ; if (liter < dwords) { goto ldv_43826; } else { } ldv_43821: ret = qla24xx_protect_flash(vha); if (ret != 0) { ql_log(1U, vha, 28825, "Unable to protect flash after update.\n"); } else { } done: ; if ((unsigned long )optrom != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 4096UL, optrom, optrom_dma, (struct dma_attrs *)0); } else { } return (ret); } } uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) { uint32_t i ; uint16_t *wptr ; struct qla_hw_data *ha ; { ha = vha->hw; wptr = (uint16_t *)buf; qla2x00_lock_nvram_access(ha); i = 0U; goto ldv_43838; ldv_43837: *(wptr + (unsigned long )i) = qla2x00_get_nvram_word(ha, naddr); i = i + 1U; naddr = naddr + 1U; ldv_43838: ; if (bytes >> 1 > i) { goto ldv_43837; } else { } qla2x00_unlock_nvram_access(ha); return (buf); } } uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) { uint32_t i ; uint32_t *dwptr ; struct qla_hw_data *ha ; uint32_t tmp ; { ha = vha->hw; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return (buf); } else { } dwptr = (uint32_t *)buf; i = 0U; goto ldv_43850; ldv_43849: tmp = nvram_data_addr(ha, naddr); *(dwptr + (unsigned long )i) = qla24xx_read_flash_dword(ha, tmp); i = i + 1U; naddr = naddr + 1U; ldv_43850: ; if (bytes >> 2 > i) { goto ldv_43849; } else { } return (buf); } } int qla2x00_write_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) { int ret ; int stat ; uint32_t i ; uint16_t *wptr ; unsigned long flags ; struct qla_hw_data *ha ; raw_spinlock_t *tmp ; { ha = vha->hw; ret = 0; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); qla2x00_lock_nvram_access(ha); stat = qla2x00_clear_nvram_protection(ha); wptr = (uint16_t *)buf; i = 0U; goto ldv_43868; ldv_43867: qla2x00_write_nvram_word(ha, naddr, (int )*wptr); wptr = wptr + 1; i = i + 1U; naddr = naddr + 1U; ldv_43868: ; if (bytes >> 1 > i) { goto ldv_43867; } else { } qla2x00_set_nvram_protection(ha, stat); qla2x00_unlock_nvram_access(ha); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (ret); } } int qla24xx_write_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) { int ret ; uint32_t i ; uint32_t *dwptr ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; unsigned int tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; unsigned int tmp___4 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; ret = 0; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return (ret); } else { } tmp = readl((void const volatile *)(& reg->ctrl_status)); writel(tmp | 2U, (void volatile *)(& reg->ctrl_status)); readl((void const volatile *)(& reg->ctrl_status)); tmp___0 = nvram_conf_addr(ha, 257U); qla24xx_write_flash_dword(ha, tmp___0, 0U); tmp___1 = nvram_conf_addr(ha, 257U); qla24xx_write_flash_dword(ha, tmp___1, 0U); dwptr = (uint32_t *)buf; i = 0U; goto ldv_43883; ldv_43882: tmp___2 = nvram_data_addr(ha, naddr); ret = qla24xx_write_flash_dword(ha, tmp___2, *dwptr); if (ret != 0) { ql_dbg(8388608U, vha, 28826, "Unable to program nvram address=%x data=%x.\n", naddr, *dwptr); goto ldv_43881; } else { } i = i + 1U; naddr = naddr + 1U; dwptr = dwptr + 1; ldv_43883: ; if (bytes >> 2 > i) { goto ldv_43882; } else { } ldv_43881: tmp___3 = nvram_conf_addr(ha, 257U); qla24xx_write_flash_dword(ha, tmp___3, 140U); tmp___4 = readl((void const volatile *)(& reg->ctrl_status)); writel(tmp___4 & 4294967293U, (void volatile *)(& reg->ctrl_status)); readl((void const volatile *)(& reg->ctrl_status)); return (ret); } } uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) { uint32_t i ; uint32_t *dwptr ; struct qla_hw_data *ha ; uint32_t tmp ; { ha = vha->hw; dwptr = (uint32_t *)buf; i = 0U; goto ldv_43894; ldv_43893: tmp = flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr); *(dwptr + (unsigned long )i) = qla24xx_read_flash_dword(ha, tmp); i = i + 1U; naddr = naddr + 1U; ldv_43894: ; if (bytes >> 2 > i) { goto ldv_43893; } else { } return (buf); } } int qla25xx_write_nvram_data(scsi_qla_host_t *vha , uint8_t *buf , uint32_t naddr , uint32_t bytes ) { struct qla_hw_data *ha ; uint8_t *dbuf ; void *tmp ; size_t __len ; void *__ret ; { ha = vha->hw; tmp = vmalloc(65536UL); dbuf = (uint8_t *)tmp; if ((unsigned long )dbuf == (unsigned long )((uint8_t *)0U)) { return (259); } else { } (*((ha->isp_ops)->read_optrom))(vha, dbuf, ha->flt_region_vpd_nvram << 2, 65536U); __len = (size_t )bytes; __ret = __builtin_memcpy((void *)dbuf + (unsigned long )(naddr << 2), (void const *)buf, __len); (*((ha->isp_ops)->write_optrom))(vha, dbuf, ha->flt_region_vpd_nvram << 2, 65536U); vfree((void const *)dbuf); return (0); } } __inline static void qla2x00_flip_colors(struct qla_hw_data *ha , uint16_t *pflags ) { { if ((ha->device_type & 16U) != 0U) { if ((unsigned int )ha->beacon_color_state == 7U) { ha->beacon_color_state = 0U; *pflags = 0U; } else { ha->beacon_color_state = 7U; *pflags = 193U; } } else if ((unsigned int )ha->beacon_color_state == 1U) { ha->beacon_color_state = 0U; *pflags = 0U; } else { ha->beacon_color_state = 1U; *pflags = 64U; } return; } } void qla2x00_beacon_blink(struct scsi_qla_host *vha ) { uint16_t gpio_enable ; uint16_t gpio_data ; uint16_t led_color ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; { led_color = 0U; ha = vha->hw; reg = & (ha->iobase)->isp; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if (ha->pio_address != 0ULL) { gpio_enable = inw((int )((unsigned int )ha->pio_address + 206U)); gpio_data = inw((int )((unsigned int )ha->pio_address + 204U)); } else { gpio_enable = readw((void const volatile *)(& reg->gpioe)); gpio_data = readw((void const volatile *)(& reg->gpiod)); } gpio_enable = (uint16_t )((unsigned int )gpio_enable | 192U); if (ha->pio_address != 0ULL) { outw((int )gpio_enable, (int )((unsigned int )ha->pio_address + 206U)); } else { writew((int )gpio_enable, (void volatile *)(& reg->gpioe)); readw((void const volatile *)(& reg->gpioe)); } qla2x00_flip_colors(ha, & led_color); gpio_data = (unsigned int )gpio_data & 65343U; gpio_data = (uint16_t )((int )gpio_data | (int )led_color); if (ha->pio_address != 0ULL) { outw((int )gpio_data, (int )((unsigned int )ha->pio_address + 204U)); } else { writew((int )gpio_data, (void volatile *)(& reg->gpiod)); readw((void const volatile *)(& reg->gpiod)); } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } int qla2x00_beacon_on(struct scsi_qla_host *vha ) { uint16_t gpio_enable ; uint16_t gpio_data ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; int tmp ; raw_spinlock_t *tmp___0 ; { ha = vha->hw; reg = & (ha->iobase)->isp; ha->fw_options[1] = (unsigned int )ha->fw_options[1] & 65279U; ha->fw_options[1] = (uint16_t )((unsigned int )ha->fw_options[1] | 64U); tmp = qla2x00_set_fw_options(vha, (uint16_t *)(& ha->fw_options)); if (tmp != 0) { ql_log(1U, vha, 28827, "Unable to update fw options (beacon on).\n"); return (258); } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); if (ha->pio_address != 0ULL) { gpio_enable = inw((int )((unsigned int )ha->pio_address + 206U)); gpio_data = inw((int )((unsigned int )ha->pio_address + 204U)); } else { gpio_enable = readw((void const volatile *)(& reg->gpioe)); gpio_data = readw((void const volatile *)(& reg->gpiod)); } gpio_enable = (uint16_t )((unsigned int )gpio_enable | 192U); if (ha->pio_address != 0ULL) { outw((int )gpio_enable, (int )((unsigned int )ha->pio_address + 206U)); } else { writew((int )gpio_enable, (void volatile *)(& reg->gpioe)); readw((void const volatile *)(& reg->gpioe)); } gpio_data = (unsigned int )gpio_data & 65343U; if (ha->pio_address != 0ULL) { outw((int )gpio_data, (int )((unsigned int )ha->pio_address + 204U)); } else { writew((int )gpio_data, (void volatile *)(& reg->gpiod)); readw((void const volatile *)(& reg->gpiod)); } spin_unlock_irqrestore(& ha->hardware_lock, flags); ha->beacon_blink_led = 1U; ha->beacon_color_state = 0U; return (0); } } int qla2x00_beacon_off(struct scsi_qla_host *vha ) { int rval ; struct qla_hw_data *ha ; { rval = 0; ha = vha->hw; ha->beacon_blink_led = 0U; if ((ha->device_type & 16U) != 0U) { ha->beacon_color_state = 7U; } else { ha->beacon_color_state = 1U; } (*((ha->isp_ops)->beacon_blink))(vha); ha->fw_options[1] = (unsigned int )ha->fw_options[1] & 65279U; ha->fw_options[1] = (unsigned int )ha->fw_options[1] & 65471U; rval = qla2x00_set_fw_options(vha, (uint16_t *)(& ha->fw_options)); if (rval != 0) { ql_log(1U, vha, 28828, "Unable to update fw options (beacon off).\n"); } else { } return (rval); } } __inline static void qla24xx_flip_colors(struct qla_hw_data *ha , uint16_t *pflags ) { { if ((unsigned int )ha->beacon_color_state == 7U) { ha->beacon_color_state = 0U; *pflags = 0U; } else { ha->beacon_color_state = 7U; *pflags = 20U; } return; } } void qla24xx_beacon_blink(struct scsi_qla_host *vha ) { uint16_t led_color ; uint32_t gpio_data ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; raw_spinlock_t *tmp ; { led_color = 0U; ha = vha->hw; reg = & (ha->iobase)->isp24; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); gpio_data = readl((void const volatile *)(& reg->gpiod)); gpio_data = gpio_data | 1835008U; writel(gpio_data, (void volatile *)(& reg->gpiod)); gpio_data = readl((void const volatile *)(& reg->gpiod)); qla24xx_flip_colors(ha, & led_color); gpio_data = gpio_data & 4294967267U; gpio_data = (uint32_t )led_color | gpio_data; writel(gpio_data, (void volatile *)(& reg->gpiod)); gpio_data = readl((void const volatile *)(& reg->gpiod)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static uint32_t qla83xx_select_led_port(struct qla_hw_data *ha ) { uint32_t led_select_value ; { led_select_value = 0U; if ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) { goto out; } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { led_select_value = 2102048U; } else { led_select_value = 2102056U; } out: ; return (led_select_value); } } void qla83xx_beacon_blink(struct scsi_qla_host *vha ) { uint32_t led_select_value ; struct qla_hw_data *ha ; uint16_t led_cfg[6U] ; uint16_t orig_led_cfg[6U] ; uint32_t led_10_value ; uint32_t led_43_value ; int rval ; { ha = vha->hw; if (((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U) && (ha->device_type & 8192U) == 0U) { return; } else { } if ((unsigned int )ha->beacon_blink_led == 0U) { return; } else { } if ((ha->device_type & 32768U) != 0U) { led_select_value = qla83xx_select_led_port(ha); qla83xx_wr_reg(vha, led_select_value, 1073750016U); qla83xx_wr_reg(vha, led_select_value + 4U, 1073750016U); msleep(1000U); qla83xx_wr_reg(vha, led_select_value, 1073758208U); qla83xx_wr_reg(vha, led_select_value + 4U, 1073758208U); } else if ((ha->device_type & 65536U) != 0U) { led_select_value = qla83xx_select_led_port(ha); qla83xx_rd_reg(vha, led_select_value, & led_10_value); qla83xx_rd_reg(vha, led_select_value + 16U, & led_43_value); qla83xx_wr_reg(vha, led_select_value, 32784384U); msleep(500U); qla83xx_wr_reg(vha, led_select_value, 1073742324U); msleep(1000U); qla83xx_wr_reg(vha, led_select_value, led_10_value); qla83xx_wr_reg(vha, led_select_value + 16U, led_43_value); } else if ((ha->device_type & 8192U) != 0U) { rval = qla81xx_get_led_config(vha, (uint16_t *)(& orig_led_cfg)); if (rval == 0) { if ((ha->device_type & 8192U) != 0U) { led_cfg[0] = 16384U; led_cfg[1] = 8192U; led_cfg[2] = 0U; led_cfg[3] = 0U; led_cfg[4] = 0U; led_cfg[5] = 0U; } else { led_cfg[0] = 16384U; led_cfg[1] = 16384U; led_cfg[2] = 16384U; led_cfg[3] = 8192U; led_cfg[4] = 0U; led_cfg[5] = 8192U; } rval = qla81xx_set_led_config(vha, (uint16_t *)(& led_cfg)); msleep(1000U); if ((ha->device_type & 8192U) != 0U) { led_cfg[0] = 16384U; led_cfg[1] = 8192U; led_cfg[2] = 0U; } else { led_cfg[0] = 16384U; led_cfg[1] = 8192U; led_cfg[2] = 16384U; led_cfg[3] = 16384U; led_cfg[4] = 0U; led_cfg[5] = 8192U; } rval = qla81xx_set_led_config(vha, (uint16_t *)(& led_cfg)); } else { } qla81xx_set_led_config(vha, (uint16_t *)(& orig_led_cfg)); } else { } return; } } int qla24xx_beacon_on(struct scsi_qla_host *vha ) { uint32_t gpio_data ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; int tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return (0); } else { } if ((ha->device_type & 65536U) != 0U || (ha->device_type & 8192U) != 0U) { goto skip_gpio; } else { } if ((unsigned int )ha->beacon_blink_led == 0U) { ha->fw_options[1] = (uint16_t )((unsigned int )ha->fw_options[1] | 64U); tmp = qla2x00_set_fw_options(vha, (uint16_t *)(& ha->fw_options)); if (tmp != 0) { return (258); } else { } tmp___0 = qla2x00_get_fw_options(vha, (uint16_t *)(& ha->fw_options)); if (tmp___0 != 0) { ql_log(1U, vha, 28681, "Unable to update fw options (beacon on).\n"); return (258); } else { } if ((ha->device_type & 32768U) != 0U) { goto skip_gpio; } else { } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); gpio_data = readl((void const volatile *)(& reg->gpiod)); gpio_data = gpio_data | 1835008U; writel(gpio_data, (void volatile *)(& reg->gpiod)); readl((void const volatile *)(& reg->gpiod)); spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } ha->beacon_color_state = 0U; skip_gpio: ha->beacon_blink_led = 1U; return (0); } } int qla24xx_beacon_off(struct scsi_qla_host *vha ) { uint32_t gpio_data ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_24xx *reg ; raw_spinlock_t *tmp ; int tmp___0 ; int tmp___1 ; { ha = vha->hw; reg = & (ha->iobase)->isp24; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return (0); } else { } ha->beacon_blink_led = 0U; if ((ha->device_type & 32768U) != 0U) { goto set_fw_options; } else { } if ((ha->device_type & 65536U) != 0U || (ha->device_type & 8192U) != 0U) { return (0); } else { } ha->beacon_color_state = 7U; (*((ha->isp_ops)->beacon_blink))(vha); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); gpio_data = readl((void const volatile *)(& reg->gpiod)); gpio_data = gpio_data & 4293132287U; writel(gpio_data, (void volatile *)(& reg->gpiod)); readl((void const volatile *)(& reg->gpiod)); spin_unlock_irqrestore(& ha->hardware_lock, flags); set_fw_options: ha->fw_options[1] = (unsigned int )ha->fw_options[1] & 65471U; tmp___0 = qla2x00_set_fw_options(vha, (uint16_t *)(& ha->fw_options)); if (tmp___0 != 0) { ql_log(1U, vha, 28749, "Unable to update fw options (beacon on).\n"); return (258); } else { } tmp___1 = qla2x00_get_fw_options(vha, (uint16_t *)(& ha->fw_options)); if (tmp___1 != 0) { ql_log(1U, vha, 28750, "Unable to update fw options (beacon on).\n"); return (258); } else { } return (0); } } static void qla2x00_flash_enable(struct qla_hw_data *ha ) { uint16_t data ; struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; data = readw((void const volatile *)(& reg->ctrl_status)); data = (uint16_t )((unsigned int )data | 2U); writew((int )data, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); return; } } static void qla2x00_flash_disable(struct qla_hw_data *ha ) { uint16_t data ; struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; data = readw((void const volatile *)(& reg->ctrl_status)); data = (unsigned int )data & 65533U; writew((int )data, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); return; } } static uint8_t qla2x00_read_flash_byte(struct qla_hw_data *ha , uint32_t addr ) { uint16_t data ; uint16_t bank_select ; struct device_reg_2xxx *reg ; uint16_t data2 ; { reg = & (ha->iobase)->isp; bank_select = readw((void const volatile *)(& reg->ctrl_status)); if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { bank_select = (unsigned int )bank_select & 65287U; bank_select = ((unsigned int )((uint16_t )(addr >> 12)) & 240U) | (unsigned int )bank_select; bank_select = (uint16_t )((unsigned int )bank_select | 8U); writew((int )bank_select, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); writew((int )((unsigned short )addr), (void volatile *)(& reg->flash_address)); data = readw((void const volatile *)(& reg->flash_data)); return ((uint8_t )data); } else { } if ((addr & 65536U) != 0U && ((int )bank_select & 8) == 0) { bank_select = (uint16_t )((unsigned int )bank_select | 8U); writew((int )bank_select, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); } else if ((addr & 65536U) == 0U && ((int )bank_select & 8) != 0) { bank_select = (unsigned int )bank_select & 65527U; writew((int )bank_select, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); } else { } if (ha->pio_address != 0ULL) { outw((int )((unsigned short )addr), (int )ha->pio_address); ldv_44009: data = inw((int )((unsigned int )ha->pio_address + 2U)); __asm__ volatile ("": : : "memory"); cpu_relax(); data2 = inw((int )((unsigned int )ha->pio_address + 2U)); if ((int )data != (int )data2) { goto ldv_44009; } else { } } else { writew((int )((unsigned short )addr), (void volatile *)(& reg->flash_address)); data = qla2x00_debounce_register___0((uint16_t volatile *)(& reg->flash_data)); } return ((uint8_t )data); } } static void qla2x00_write_flash_byte(struct qla_hw_data *ha , uint32_t addr , uint8_t data ) { uint16_t bank_select ; struct device_reg_2xxx *reg ; { reg = & (ha->iobase)->isp; bank_select = readw((void const volatile *)(& reg->ctrl_status)); if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { bank_select = (unsigned int )bank_select & 65287U; bank_select = ((unsigned int )((uint16_t )(addr >> 12)) & 240U) | (unsigned int )bank_select; bank_select = (uint16_t )((unsigned int )bank_select | 8U); writew((int )bank_select, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); writew((int )((unsigned short )addr), (void volatile *)(& reg->flash_address)); readw((void const volatile *)(& reg->ctrl_status)); writew((int )data, (void volatile *)(& reg->flash_data)); readw((void const volatile *)(& reg->ctrl_status)); return; } else { } if ((addr & 65536U) != 0U && ((int )bank_select & 8) == 0) { bank_select = (uint16_t )((unsigned int )bank_select | 8U); writew((int )bank_select, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); } else if ((addr & 65536U) == 0U && ((int )bank_select & 8) != 0) { bank_select = (unsigned int )bank_select & 65527U; writew((int )bank_select, (void volatile *)(& reg->ctrl_status)); readw((void const volatile *)(& reg->ctrl_status)); } else { } if (ha->pio_address != 0ULL) { outw((int )((unsigned short )addr), (int )ha->pio_address); outw((int )data, (int )((unsigned int )ha->pio_address + 2U)); } else { writew((int )((unsigned short )addr), (void volatile *)(& reg->flash_address)); readw((void const volatile *)(& reg->ctrl_status)); writew((int )data, (void volatile *)(& reg->flash_data)); readw((void const volatile *)(& reg->ctrl_status)); } return; } } static int qla2x00_poll_flash(struct qla_hw_data *ha , uint32_t addr , uint8_t poll_data , uint8_t man_id , uint8_t flash_id ) { int status ; uint8_t flash_data ; uint32_t cnt ; { status = 1; poll_data = (unsigned int )poll_data & 128U; cnt = 3000000U; goto ldv_44031; ldv_44030: flash_data = qla2x00_read_flash_byte(ha, addr); if (((int )flash_data & 128) == (int )poll_data) { status = 0; goto ldv_44028; } else { } if ((unsigned int )man_id != 64U && (unsigned int )man_id != 218U) { if (((int )flash_data & 32) != 0 && cnt > 2U) { cnt = 2U; } else { } } else { } __const_udelay(42950UL); __asm__ volatile ("": : : "memory"); __might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_sup.o.c.prepared", 2361, 0); _cond_resched(); cnt = cnt - 1U; ldv_44031: ; if (cnt != 0U) { goto ldv_44030; } else { } ldv_44028: ; return (status); } } static int qla2x00_program_flash_address(struct qla_hw_data *ha , uint32_t addr , uint8_t data , uint8_t man_id , uint8_t flash_id ) { int tmp ; { if ((ha->device_type & 536870912U) != 0U) { qla2x00_write_flash_byte(ha, 2730U, 170); qla2x00_write_flash_byte(ha, 1365U, 85); qla2x00_write_flash_byte(ha, 2730U, 160); qla2x00_write_flash_byte(ha, addr, (int )data); } else if ((unsigned int )man_id == 218U && (unsigned int )flash_id == 193U) { qla2x00_write_flash_byte(ha, addr, (int )data); if ((addr & 126U) != 0U) { return (0); } else { } } else { qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 160); qla2x00_write_flash_byte(ha, addr, (int )data); } __const_udelay(644250UL); tmp = qla2x00_poll_flash(ha, addr, (int )data, (int )man_id, (int )flash_id); return (tmp); } } static int qla2x00_erase_flash(struct qla_hw_data *ha , uint8_t man_id , uint8_t flash_id ) { int tmp ; { if ((ha->device_type & 536870912U) != 0U) { qla2x00_write_flash_byte(ha, 2730U, 170); qla2x00_write_flash_byte(ha, 1365U, 85); qla2x00_write_flash_byte(ha, 2730U, 128); qla2x00_write_flash_byte(ha, 2730U, 170); qla2x00_write_flash_byte(ha, 1365U, 85); qla2x00_write_flash_byte(ha, 2730U, 16); } else { qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 128); qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 16); } __const_udelay(644250UL); tmp = qla2x00_poll_flash(ha, 0U, 128, (int )man_id, (int )flash_id); return (tmp); } } static int qla2x00_erase_flash_sector(struct qla_hw_data *ha , uint32_t addr , uint32_t sec_mask , uint8_t man_id , uint8_t flash_id ) { int tmp ; { qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 128); qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); if ((unsigned int )man_id == 31U && (unsigned int )flash_id == 19U) { qla2x00_write_flash_byte(ha, addr & sec_mask, 16); } else { qla2x00_write_flash_byte(ha, addr & sec_mask, 48); } __const_udelay(644250UL); tmp = qla2x00_poll_flash(ha, addr, 128, (int )man_id, (int )flash_id); return (tmp); } } static void qla2x00_get_flash_manufacturer(struct qla_hw_data *ha , uint8_t *man_id , uint8_t *flash_id ) { { qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 144); *man_id = qla2x00_read_flash_byte(ha, 0U); *flash_id = qla2x00_read_flash_byte(ha, 1U); qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 240); return; } } static void qla2x00_read_flash_data(struct qla_hw_data *ha , uint8_t *tmp_buf , uint32_t saddr , uint32_t length ) { struct device_reg_2xxx *reg ; uint32_t midpoint ; uint32_t ilength ; uint8_t data ; { reg = & (ha->iobase)->isp; midpoint = length / 2U; writew(0, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); ilength = 0U; goto ldv_44068; ldv_44067: ; if (ilength == midpoint) { writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); } else { } data = qla2x00_read_flash_byte(ha, saddr); if (saddr % 100U != 0U) { __const_udelay(42950UL); } else { } *tmp_buf = data; __might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_sup.o.c.prepared", 2510, 0); _cond_resched(); saddr = saddr + 1U; ilength = ilength + 1U; tmp_buf = tmp_buf + 1; ldv_44068: ; if (ilength < length) { goto ldv_44067; } else { } return; } } __inline static void qla2x00_suspend_hba(struct scsi_qla_host *vha ) { int cnt ; unsigned long flags ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; raw_spinlock_t *tmp ; unsigned short tmp___0 ; { ha = vha->hw; reg = & (ha->iobase)->isp; scsi_block_requests(vha->host); (*((ha->isp_ops)->disable_intrs))(ha); set_bit(3L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writew(8192, (void volatile *)(& reg->hccr)); readw((void const volatile *)(& reg->hccr)); if (((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) || (ha->device_type & 4U) != 0U) { cnt = 0; goto ldv_44082; ldv_44081: tmp___0 = readw((void const volatile *)(& reg->hccr)); if (((int )tmp___0 & 32) != 0) { goto ldv_44080; } else { } __const_udelay(429500UL); cnt = cnt + 1; ldv_44082: ; if (cnt <= 29999) { goto ldv_44081; } else { } ldv_44080: ; } else { __const_udelay(42950UL); } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } __inline static void qla2x00_resume_hba(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; clear_bit(3L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); return; } } uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { uint32_t addr ; uint32_t midpoint ; uint8_t *data ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; { ha = vha->hw; reg = & (ha->iobase)->isp; qla2x00_suspend_hba(vha); midpoint = ha->optrom_size / 2U; qla2x00_flash_enable(ha); writew(0, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); addr = offset; data = buf; goto ldv_44099; ldv_44098: ; if (addr == midpoint) { writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); } else { } *data = qla2x00_read_flash_byte(ha, addr); addr = addr + 1U; data = data + 1; ldv_44099: ; if (addr < length) { goto ldv_44098; } else { } qla2x00_flash_disable(ha); qla2x00_resume_hba(vha); return (buf); } } int qla2x00_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { int rval ; uint8_t man_id ; uint8_t flash_id ; uint8_t sec_number ; uint8_t data ; uint16_t wd ; uint32_t addr ; uint32_t liter ; uint32_t sec_mask ; uint32_t rest_addr ; struct qla_hw_data *ha ; struct device_reg_2xxx *reg ; int tmp ; int tmp___0 ; int tmp___1 ; { ha = vha->hw; reg = & (ha->iobase)->isp; qla2x00_suspend_hba(vha); rval = 0; sec_number = 0U; writew(1, (void volatile *)(& reg->ctrl_status)); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & wd); qla2x00_flash_enable(ha); if ((ha->device_type & 536870912U) != 0U) { flash_id = 0U; man_id = flash_id; rest_addr = 65535U; sec_mask = 65536U; goto update_flash; } else { } qla2x00_get_flash_manufacturer(ha, & man_id, & flash_id); switch ((int )man_id) { case 32: ; if ((unsigned int )flash_id == 210U || (unsigned int )flash_id == 227U) { rest_addr = 65535U; sec_mask = 65536U; goto ldv_44121; } else { } rest_addr = 16383U; sec_mask = 114688U; goto ldv_44121; case 64: rest_addr = 511U; sec_mask = 130560U; goto ldv_44121; case 191: rest_addr = 4095U; sec_mask = 126976U; goto ldv_44121; case 218: rest_addr = 127U; sec_mask = 130944U; goto ldv_44121; case 194: ; if ((unsigned int )flash_id == 56U || (unsigned int )flash_id == 79U) { rest_addr = 65535U; sec_mask = 65536U; goto ldv_44121; } else { } case 31: ; if ((unsigned int )flash_id == 19U) { rest_addr = 2147483647U; sec_mask = 2147483648U; goto ldv_44121; } else { } case 1: ; if (((unsigned int )flash_id == 56U || (unsigned int )flash_id == 64U) || (unsigned int )flash_id == 79U) { rest_addr = 65535U; sec_mask = 65536U; goto ldv_44121; } else if ((unsigned int )flash_id == 62U) { rest_addr = 65535U; sec_mask = 65536U; goto ldv_44121; } else if ((unsigned int )flash_id == 32U || (unsigned int )flash_id == 110U) { rest_addr = 16383U; sec_mask = 114688U; goto ldv_44121; } else if ((unsigned int )flash_id == 109U) { rest_addr = 8191U; sec_mask = 122880U; goto ldv_44121; } else { } default: rest_addr = 16383U; sec_mask = 114688U; goto ldv_44121; } ldv_44121: ; update_flash: ; if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { tmp = qla2x00_erase_flash(ha, (int )man_id, (int )flash_id); if (tmp != 0) { rval = 258; goto ldv_44129; } else { } } else { } addr = offset; liter = 0U; goto ldv_44133; ldv_44132: data = *(buf + (unsigned long )liter); if ((addr & rest_addr) == 0U) { if ((ha->device_type & 16U) != 0U || (ha->device_type & 64U) != 0U) { if (addr > 65535U) { if (((addr >> 12) & 240U) != 0U && (((unsigned int )man_id == 1U && (unsigned int )flash_id == 62U) || ((unsigned int )man_id == 32U && (unsigned int )flash_id == 210U))) { sec_number = (uint8_t )((int )sec_number + 1); if ((unsigned int )sec_number == 1U) { rest_addr = 32767U; sec_mask = 98304U; } else if ((unsigned int )sec_number == 2U || (unsigned int )sec_number == 3U) { rest_addr = 8191U; sec_mask = 122880U; } else if ((unsigned int )sec_number == 4U) { rest_addr = 16383U; sec_mask = 114688U; } else { } } else { } } else { } } else if (ha->optrom_size / 2U == addr) { writew(2, (void volatile *)(& reg->nvram)); readw((void const volatile *)(& reg->nvram)); } else { } if ((unsigned int )flash_id == 218U && (unsigned int )man_id == 193U) { qla2x00_write_flash_byte(ha, 21845U, 170); qla2x00_write_flash_byte(ha, 10922U, 85); qla2x00_write_flash_byte(ha, 21845U, 160); } else if ((ha->device_type & 16U) == 0U && (ha->device_type & 64U) == 0U) { tmp___0 = qla2x00_erase_flash_sector(ha, addr, sec_mask, (int )man_id, (int )flash_id); if (tmp___0 != 0) { rval = 258; goto ldv_44130; } else { } if ((unsigned int )man_id == 1U && (unsigned int )flash_id == 109U) { sec_number = (uint8_t )((int )sec_number + 1); } else { } } else { } } else { } if ((unsigned int )man_id == 1U && (unsigned int )flash_id == 109U) { if ((unsigned int )sec_number == 1U && rest_addr - 1U == addr) { rest_addr = 4095U; sec_mask = 126976U; } else if ((unsigned int )sec_number == 3U && (addr & 32766U) != 0U) { rest_addr = 16383U; sec_mask = 114688U; } else { } } else { } tmp___1 = qla2x00_program_flash_address(ha, addr, (int )data, (int )man_id, (int )flash_id); if (tmp___1 != 0) { rval = 258; goto ldv_44130; } else { } __might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_sup.o.c.prepared", 2797, 0); _cond_resched(); liter = liter + 1U; addr = addr + 1U; ldv_44133: ; if (liter < length) { goto ldv_44132; } else { } ldv_44130: ; ldv_44129: qla2x00_flash_disable(ha); qla2x00_resume_hba(vha); return (rval); } } uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { struct qla_hw_data *ha ; { ha = vha->hw; scsi_block_requests(vha->host); set_bit(3L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); qla24xx_read_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2); clear_bit(3L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); scsi_unblock_requests(vha->host); return (buf); } } int qla24xx_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { int rval ; struct qla_hw_data *ha ; { ha = vha->hw; scsi_block_requests(vha->host); set_bit(3L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2); clear_bit(3L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); scsi_unblock_requests(vha->host); return (rval); } } uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { int rval ; dma_addr_t optrom_dma ; void *optrom ; uint8_t *pbuf ; uint32_t faddr ; uint32_t left ; uint32_t burst ; struct qla_hw_data *ha ; uint32_t tmp ; uint32_t tmp___0 ; size_t __len ; void *__ret ; uint8_t *tmp___1 ; { ha = vha->hw; if ((ha->device_type & 2048U) != 0U || (ha->device_type & 8192U) != 0U) { goto try_fast; } else { } if ((offset & 4095U) != 0U) { goto slow_read; } else { } if (length <= 4095U) { goto slow_read; } else { } try_fast: optrom = dma_alloc_attrs(& (ha->pdev)->dev, 4096UL, & optrom_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )optrom == (unsigned long )((void *)0)) { ql_log(1U, vha, 204, "Unable to allocate memory for optrom burst read (%x KB).\n", 4); goto slow_read; } else { } pbuf = buf; faddr = offset >> 2; left = length >> 2; burst = 1024U; goto ldv_44169; ldv_44168: ; if (burst > left) { burst = left; } else { } tmp = flash_data_addr(ha, faddr); rval = qla2x00_dump_ram(vha, optrom_dma, tmp, burst); if (rval != 0) { tmp___0 = flash_data_addr(ha, faddr); ql_log(1U, vha, 245, "Unable to burst-read optrom segment (%x/%x/%llx).\n", rval, tmp___0, optrom_dma); ql_log(1U, vha, 246, "Reverting to slow-read.\n"); dma_free_attrs(& (ha->pdev)->dev, 4096UL, optrom, optrom_dma, (struct dma_attrs *)0); goto slow_read; } else { } __len = (size_t )(burst * 4U); __ret = __builtin_memcpy((void *)pbuf, (void const *)optrom, __len); left = left - burst; faddr = faddr + burst; pbuf = pbuf + (unsigned long )(burst * 4U); ldv_44169: ; if (left != 0U) { goto ldv_44168; } else { } dma_free_attrs(& (ha->pdev)->dev, 4096UL, optrom, optrom_dma, (struct dma_attrs *)0); return (buf); slow_read: tmp___1 = qla24xx_read_optrom_data(vha, buf, offset, length); return (tmp___1); } } static void qla2x00_get_fcode_version(struct qla_hw_data *ha , uint32_t pcids ) { int ret ; uint32_t istart ; uint32_t iend ; uint32_t iter ; uint32_t vend ; uint8_t do_next ; uint8_t rbyte ; uint8_t *vbyte ; uint8_t tmp ; uint8_t tmp___0 ; uint8_t tmp___1 ; uint8_t tmp___2 ; uint8_t tmp___3 ; uint8_t tmp___4 ; uint8_t *tmp___5 ; { ret = 258; memset((void *)(& ha->fcode_revision), 0, 16UL); tmp = qla2x00_read_flash_byte(ha, pcids + 11U); tmp___0 = qla2x00_read_flash_byte(ha, pcids + 10U); istart = (uint32_t )(((int )tmp << 8) | (int )tmp___0) + pcids; iend = istart + 256U; do_next = 0U; iter = istart; goto ldv_44184; ldv_44183: iter = iter + 1U; tmp___3 = qla2x00_read_flash_byte(ha, iter); if ((unsigned int )tmp___3 == 47U) { tmp___2 = qla2x00_read_flash_byte(ha, iter + 2U); if ((unsigned int )tmp___2 == 47U) { do_next = (uint8_t )((int )do_next + 1); } else { tmp___1 = qla2x00_read_flash_byte(ha, iter + 3U); if ((unsigned int )tmp___1 == 47U) { do_next = (uint8_t )((int )do_next + 1); } else { } } } else { } ldv_44184: ; if (iter < iend && (unsigned int )do_next == 0U) { goto ldv_44183; } else { } if ((unsigned int )do_next == 0U) { goto ldv_44186; } else { } do_next = 0U; goto ldv_44188; ldv_44187: iter = iter - 1U; tmp___4 = qla2x00_read_flash_byte(ha, iter); if ((unsigned int )tmp___4 == 32U) { do_next = (uint8_t )((int )do_next + 1); } else { } ldv_44188: ; if (iter > istart && (unsigned int )do_next == 0U) { goto ldv_44187; } else { } if ((unsigned int )do_next == 0U) { goto ldv_44186; } else { } vend = iter - 1U; do_next = 0U; goto ldv_44191; ldv_44190: iter = iter - 1U; rbyte = qla2x00_read_flash_byte(ha, iter); if (((unsigned int )rbyte == 32U || (unsigned int )rbyte == 13U) || (unsigned int )rbyte == 16U) { do_next = (uint8_t )((int )do_next + 1); } else { } ldv_44191: ; if (iter > istart && (unsigned int )do_next == 0U) { goto ldv_44190; } else { } if ((unsigned int )do_next == 0U) { goto ldv_44186; } else { } iter = iter + 1U; if (vend != iter && vend - iter <= 15U) { vbyte = (uint8_t *)(& ha->fcode_revision); goto ldv_44194; ldv_44193: tmp___5 = vbyte; vbyte = vbyte + 1; *tmp___5 = qla2x00_read_flash_byte(ha, iter); iter = iter + 1U; ldv_44194: ; if (iter <= vend) { goto ldv_44193; } else { } ret = 0; } else { } ldv_44186: ; if (ret != 0) { memset((void *)(& ha->fcode_revision), 0, 16UL); } else { } return; } } int qla2x00_get_flash_version(scsi_qla_host_t *vha , void *mbuf ) { int ret ; uint8_t code_type ; uint8_t last_image ; uint32_t pcihdr ; uint32_t pcids ; uint8_t *dbyte ; uint16_t *dcode ; struct qla_hw_data *ha ; uint8_t tmp ; uint8_t tmp___0 ; uint8_t tmp___1 ; uint8_t tmp___2 ; uint8_t tmp___3 ; uint8_t tmp___4 ; uint8_t tmp___5 ; uint8_t tmp___6 ; uint8_t tmp___7 ; uint8_t tmp___8 ; uint8_t tmp___9 ; { ret = 0; ha = vha->hw; if (ha->pio_address == 0ULL || (unsigned long )mbuf == (unsigned long )((void *)0)) { return (258); } else { } memset((void *)(& ha->bios_revision), 0, 2UL); memset((void *)(& ha->efi_revision), 0, 2UL); memset((void *)(& ha->fcode_revision), 0, 16UL); memset((void *)(& ha->fw_revision), 0, 16UL); qla2x00_flash_enable(ha); pcihdr = 0U; last_image = 1U; ldv_44214: tmp = qla2x00_read_flash_byte(ha, pcihdr); if ((unsigned int )tmp != 85U) { ql_log(0U, vha, 80, "No matching ROM signature.\n"); ret = 258; goto ldv_44208; } else { tmp___0 = qla2x00_read_flash_byte(ha, pcihdr + 1U); if ((unsigned int )tmp___0 != 170U) { ql_log(0U, vha, 80, "No matching ROM signature.\n"); ret = 258; goto ldv_44208; } else { } } tmp___1 = qla2x00_read_flash_byte(ha, pcihdr + 25U); tmp___2 = qla2x00_read_flash_byte(ha, pcihdr + 24U); pcids = (uint32_t )(((int )tmp___1 << 8) | (int )tmp___2) + pcihdr; tmp___3 = qla2x00_read_flash_byte(ha, pcids); if ((unsigned int )tmp___3 != 80U) { ql_log(0U, vha, 81, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = 258; goto ldv_44208; } else { tmp___4 = qla2x00_read_flash_byte(ha, pcids + 1U); if ((unsigned int )tmp___4 != 67U) { ql_log(0U, vha, 81, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = 258; goto ldv_44208; } else { tmp___5 = qla2x00_read_flash_byte(ha, pcids + 2U); if ((unsigned int )tmp___5 != 73U) { ql_log(0U, vha, 81, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = 258; goto ldv_44208; } else { tmp___6 = qla2x00_read_flash_byte(ha, pcids + 3U); if ((unsigned int )tmp___6 != 82U) { ql_log(0U, vha, 81, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = 258; goto ldv_44208; } else { } } } } code_type = qla2x00_read_flash_byte(ha, pcids + 20U); switch ((int )code_type) { case 0: ha->bios_revision[0] = qla2x00_read_flash_byte(ha, pcids + 18U); ha->bios_revision[1] = qla2x00_read_flash_byte(ha, pcids + 19U); ql_dbg(1073741824U, vha, 82, "Read BIOS %d.%d.\n", (int )ha->bios_revision[1], (int )ha->bios_revision[0]); goto ldv_44210; case 1: qla2x00_get_fcode_version(ha, pcids); goto ldv_44210; case 3: ha->efi_revision[0] = qla2x00_read_flash_byte(ha, pcids + 18U); ha->efi_revision[1] = qla2x00_read_flash_byte(ha, pcids + 19U); ql_dbg(1073741824U, vha, 83, "Read EFI %d.%d.\n", (int )ha->efi_revision[1], (int )ha->efi_revision[0]); goto ldv_44210; default: ql_log(1U, vha, 84, "Unrecognized code type %x at pcids %x.\n", (int )code_type, pcids); goto ldv_44210; } ldv_44210: tmp___7 = qla2x00_read_flash_byte(ha, pcids + 21U); last_image = (unsigned int )tmp___7 & 128U; tmp___8 = qla2x00_read_flash_byte(ha, pcids + 17U); tmp___9 = qla2x00_read_flash_byte(ha, pcids + 16U); pcihdr = (uint32_t )((((int )tmp___8 << 8) | (int )tmp___9) * 512) + pcihdr; if ((unsigned int )last_image == 0U) { goto ldv_44214; } else { } ldv_44208: ; if ((ha->device_type & 16U) != 0U) { memset((void *)(& ha->fw_revision), 0, 16UL); dbyte = (uint8_t *)mbuf; memset((void *)dbyte, 0, 8UL); dcode = (uint16_t *)dbyte; qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4U + 10U, 8U); ql_dbg(1073872896U, vha, 266, "Dumping fw ver from flash:.\n"); ql_dump_buffer(1073872896U, vha, 267, dbyte, 8U); if (((((unsigned int )*dcode == 65535U && (unsigned int )*(dcode + 1UL) == 65535U) && (unsigned int )*(dcode + 2UL) == 65535U) && (unsigned int )*(dcode + 3UL) == 65535U) || ((((unsigned int )*dcode == 0U && (unsigned int )*(dcode + 1UL) == 0U) && (unsigned int )*(dcode + 2UL) == 0U) && (unsigned int )*(dcode + 3UL) == 0U)) { ql_log(1U, vha, 87, "Unrecognized fw revision at %x.\n", ha->flt_region_fw * 4U); } else { ha->fw_revision[0] = (uint32_t )(((int )*dbyte << 16) | (int )*(dbyte + 1UL)); ha->fw_revision[1] = (uint32_t )(((int )*(dbyte + 2UL) << 16) | (int )*(dbyte + 3UL)); ha->fw_revision[2] = (uint32_t )(((int )*(dbyte + 4UL) << 16) | (int )*(dbyte + 5UL)); ql_dbg(1073741824U, vha, 88, "FW Version: %d.%d.%d.\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2]); } } else { } qla2x00_flash_disable(ha); return (ret); } } int qla82xx_get_flash_version(scsi_qla_host_t *vha , void *mbuf ) { int ret ; uint32_t pcihdr ; uint32_t pcids ; uint32_t *dcode ; uint8_t *bcode ; uint8_t code_type ; uint8_t last_image ; struct qla_hw_data *ha ; { ret = 0; ha = vha->hw; if ((unsigned long )mbuf == (unsigned long )((void *)0)) { return (258); } else { } memset((void *)(& ha->bios_revision), 0, 2UL); memset((void *)(& ha->efi_revision), 0, 2UL); memset((void *)(& ha->fcode_revision), 0, 16UL); memset((void *)(& ha->fw_revision), 0, 16UL); dcode = (uint32_t *)mbuf; pcihdr = ha->flt_region_boot << 2; last_image = 1U; ldv_44233: (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)dcode, pcihdr, 128U); bcode = (uint8_t *)(mbuf + ((unsigned long )pcihdr & 3UL)); if ((unsigned int )*bcode != 85U || (unsigned int )*(bcode + 1UL) != 170U) { ql_log(0U, vha, 340, "No matching ROM signature.\n"); ret = 258; goto ldv_44227; } else { } pcids = (uint32_t )(((int )*(bcode + 25UL) << 8) | (int )*(bcode + 24UL)) + pcihdr; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)dcode, pcids, 128U); bcode = (uint8_t *)(mbuf + ((unsigned long )pcihdr & 3UL)); if ((((unsigned int )*bcode != 80U || (unsigned int )*(bcode + 1UL) != 67U) || (unsigned int )*(bcode + 2UL) != 73U) || (unsigned int )*(bcode + 3UL) != 82U) { ql_log(0U, vha, 341, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = 258; goto ldv_44227; } else { } code_type = *(bcode + 20UL); switch ((int )code_type) { case 0: ha->bios_revision[0] = *(bcode + 18UL); ha->bios_revision[1] = *(bcode + 19UL); ql_dbg(1073741824U, vha, 342, "Read BIOS %d.%d.\n", (int )ha->bios_revision[1], (int )ha->bios_revision[0]); goto ldv_44229; case 1: ha->fcode_revision[0] = *(bcode + 18UL); ha->fcode_revision[1] = *(bcode + 19UL); ql_dbg(1073741824U, vha, 343, "Read FCODE %d.%d.\n", (int )ha->fcode_revision[1], (int )ha->fcode_revision[0]); goto ldv_44229; case 3: ha->efi_revision[0] = *(bcode + 18UL); ha->efi_revision[1] = *(bcode + 19UL); ql_dbg(1073741824U, vha, 344, "Read EFI %d.%d.\n", (int )ha->efi_revision[1], (int )ha->efi_revision[0]); goto ldv_44229; default: ql_log(1U, vha, 345, "Unrecognized code type %x at pcids %x.\n", (int )code_type, pcids); goto ldv_44229; } ldv_44229: last_image = (unsigned int )*(bcode + 21UL) & 128U; pcihdr = (uint32_t )((((int )*(bcode + 17UL) << 8) | (int )*(bcode + 16UL)) * 512) + pcihdr; if ((unsigned int )last_image == 0U) { goto ldv_44233; } else { } ldv_44227: memset((void *)(& ha->fw_revision), 0, 16UL); dcode = (uint32_t *)mbuf; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)dcode, ha->flt_region_fw << 2, 32U); bcode = (uint8_t *)(mbuf + ((unsigned long )pcihdr & 3UL)); if ((((unsigned int )*bcode == 3U && (unsigned int )*(bcode + 1UL) == 0U) && (unsigned int )*(bcode + 2UL) == 64U) && (unsigned int )*(bcode + 3UL) == 64U) { ha->fw_revision[0] = (uint32_t )*(bcode + 4UL); ha->fw_revision[1] = (uint32_t )*(bcode + 5UL); ha->fw_revision[2] = (uint32_t )*(bcode + 6UL); ql_dbg(1073741824U, vha, 339, "Firmware revision %d.%d.%d\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2]); } else { } return (ret); } } int qla24xx_get_flash_version(scsi_qla_host_t *vha , void *mbuf ) { int ret ; uint32_t pcihdr ; uint32_t pcids ; uint32_t *dcode ; uint8_t *bcode ; uint8_t code_type ; uint8_t last_image ; int i ; struct qla_hw_data *ha ; __u32 tmp ; __u32 tmp___0 ; { ret = 0; ha = vha->hw; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { return (ret); } else { } if ((unsigned long )mbuf == (unsigned long )((void *)0)) { return (258); } else { } memset((void *)(& ha->bios_revision), 0, 2UL); memset((void *)(& ha->efi_revision), 0, 2UL); memset((void *)(& ha->fcode_revision), 0, 16UL); memset((void *)(& ha->fw_revision), 0, 16UL); dcode = (uint32_t *)mbuf; pcihdr = ha->flt_region_boot << 2; last_image = 1U; ldv_44253: qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 32U); bcode = (uint8_t *)(mbuf + ((unsigned long )pcihdr & 3UL)); if ((unsigned int )*bcode != 85U || (unsigned int )*(bcode + 1UL) != 170U) { ql_log(0U, vha, 89, "No matching ROM signature.\n"); ret = 258; goto ldv_44247; } else { } pcids = (uint32_t )(((int )*(bcode + 25UL) << 8) | (int )*(bcode + 24UL)) + pcihdr; qla24xx_read_flash_data(vha, dcode, pcids >> 2, 32U); bcode = (uint8_t *)(mbuf + ((unsigned long )pcihdr & 3UL)); if ((((unsigned int )*bcode != 80U || (unsigned int )*(bcode + 1UL) != 67U) || (unsigned int )*(bcode + 2UL) != 73U) || (unsigned int )*(bcode + 3UL) != 82U) { ql_log(0U, vha, 90, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = 258; goto ldv_44247; } else { } code_type = *(bcode + 20UL); switch ((int )code_type) { case 0: ha->bios_revision[0] = *(bcode + 18UL); ha->bios_revision[1] = *(bcode + 19UL); ql_dbg(1073741824U, vha, 91, "Read BIOS %d.%d.\n", (int )ha->bios_revision[1], (int )ha->bios_revision[0]); goto ldv_44249; case 1: ha->fcode_revision[0] = *(bcode + 18UL); ha->fcode_revision[1] = *(bcode + 19UL); ql_dbg(1073741824U, vha, 92, "Read FCODE %d.%d.\n", (int )ha->fcode_revision[1], (int )ha->fcode_revision[0]); goto ldv_44249; case 3: ha->efi_revision[0] = *(bcode + 18UL); ha->efi_revision[1] = *(bcode + 19UL); ql_dbg(1073741824U, vha, 93, "Read EFI %d.%d.\n", (int )ha->efi_revision[1], (int )ha->efi_revision[0]); goto ldv_44249; default: ql_log(1U, vha, 94, "Unrecognized code type %x at pcids %x.\n", (int )code_type, pcids); goto ldv_44249; } ldv_44249: last_image = (unsigned int )*(bcode + 21UL) & 128U; pcihdr = (uint32_t )((((int )*(bcode + 17UL) << 8) | (int )*(bcode + 16UL)) * 512) + pcihdr; if ((unsigned int )last_image == 0U) { goto ldv_44253; } else { } ldv_44247: memset((void *)(& ha->fw_revision), 0, 16UL); dcode = (uint32_t *)mbuf; qla24xx_read_flash_data(vha, dcode, ha->flt_region_fw + 4U, 4U); i = 0; goto ldv_44255; ldv_44254: tmp = __fswab32(*(dcode + (unsigned long )i)); *(dcode + (unsigned long )i) = tmp; i = i + 1; ldv_44255: ; if (i <= 3) { goto ldv_44254; } else { } if ((((*dcode == 4294967295U && *(dcode + 1UL) == 4294967295U) && *(dcode + 2UL) == 4294967295U) && *(dcode + 3UL) == 4294967295U) || (((*dcode == 0U && *(dcode + 1UL) == 0U) && *(dcode + 2UL) == 0U) && *(dcode + 3UL) == 0U)) { ql_log(1U, vha, 95, "Unrecognized fw revision at %x.\n", ha->flt_region_fw * 4U); } else { ha->fw_revision[0] = *dcode; ha->fw_revision[1] = *(dcode + 1UL); ha->fw_revision[2] = *(dcode + 2UL); ha->fw_revision[3] = *(dcode + 3UL); ql_dbg(1073741824U, vha, 96, "Firmware revision %d.%d.%d.%d.\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], ha->fw_revision[3]); } if ((ha->device_type & 8192U) == 0U) { return (ret); } else { } memset((void *)(& ha->gold_fw_version), 0, 16UL); dcode = (uint32_t *)mbuf; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)dcode, ha->flt_region_gold_fw << 2, 32U); if (((*(dcode + 4UL) == 4294967295U && *(dcode + 5UL) == 4294967295U) && *(dcode + 6UL) == 4294967295U) && *(dcode + 7UL) == 4294967295U) { ql_log(1U, vha, 86, "Unrecognized golden fw at 0x%x.\n", ha->flt_region_gold_fw * 4U); return (ret); } else { } i = 4; goto ldv_44258; ldv_44257: tmp___0 = __fswab32(*(dcode + (unsigned long )i)); ha->gold_fw_version[i + -4] = tmp___0; i = i + 1; ldv_44258: ; if (i <= 7) { goto ldv_44257; } else { } return (ret); } } static int qla2xxx_is_vpd_valid(uint8_t *pos , uint8_t *end ) { { if ((unsigned long )pos >= (unsigned long )end || (unsigned int )*pos != 130U) { return (0); } else { } pos = pos + (unsigned long )((int )*(pos + 1UL) + 3); if ((unsigned long )pos >= (unsigned long )end || (unsigned int )*pos != 144U) { return (0); } else { } pos = pos + (unsigned long )((int )*(pos + 1UL) + 3); if ((unsigned long )pos >= (unsigned long )end || (unsigned int )*pos != 120U) { return (0); } else { } return (1); } } int qla2xxx_get_vpd_field(scsi_qla_host_t *vha , char *key , char *str , size_t size ) { struct qla_hw_data *ha ; uint8_t *pos ; uint8_t *end ; int len ; int tmp ; size_t tmp___0 ; int tmp___1 ; int tmp___2 ; { ha = vha->hw; pos = (uint8_t *)ha->vpd; end = pos + (unsigned long )ha->vpd_size; len = 0; if ((ha->device_type & 134217728U) == 0U) { return (0); } else { tmp = qla2xxx_is_vpd_valid(pos, end); if (tmp == 0) { return (0); } else { } } goto ldv_44276; ldv_44275: len = (unsigned int )*pos == 130U ? (int )*(pos + 1UL) : (int )*(pos + 2UL); tmp___0 = strlen((char const *)key); tmp___1 = strncmp((char const *)pos, (char const *)key, tmp___0); if (tmp___1 == 0) { goto ldv_44274; } else { } if ((unsigned int )*pos != 144U && (unsigned int )*pos != 145U) { pos = pos + (unsigned long )len; } else { } pos = pos + 3UL; ldv_44276: ; if ((unsigned long )pos < (unsigned long )end && (unsigned int )*pos != 120U) { goto ldv_44275; } else { } ldv_44274: ; if ((unsigned long )(end + - ((unsigned long )len)) > (unsigned long )pos && (unsigned int )*pos != 120U) { tmp___2 = snprintf(str, size, "%.*s", len, pos + 3UL); return (tmp___2); } else { } return (0); } } int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha ) { int len ; int max_len ; uint32_t fcp_prio_addr ; struct qla_hw_data *ha ; void *tmp ; int tmp___0 ; int tmp___1 ; { ha = vha->hw; if ((unsigned long )ha->fcp_prio_cfg == (unsigned long )((struct qla_fcp_prio_cfg *)0)) { tmp = vmalloc(32768UL); ha->fcp_prio_cfg = (struct qla_fcp_prio_cfg *)tmp; if ((unsigned long )ha->fcp_prio_cfg == (unsigned long )((struct qla_fcp_prio_cfg *)0)) { ql_log(1U, vha, 213, "Unable to allocate memory for fcp priorty data (%x).\n", 32768); return (258); } else { } } else { } memset((void *)ha->fcp_prio_cfg, 0, 32768UL); fcp_prio_addr = ha->flt_region_fcp_prio; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->fcp_prio_cfg, fcp_prio_addr << 2, 16U); tmp___0 = qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0); if (tmp___0 == 0) { goto fail; } else { } fcp_prio_addr = fcp_prio_addr + 4U; len = (int )(ha->fcp_prio_cfg)->num_entries * 32; max_len = 32752; (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)(& (ha->fcp_prio_cfg)->entry), fcp_prio_addr << 2, (uint32_t )(len < max_len ? len : max_len)); tmp___1 = qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1); if (tmp___1 == 0) { goto fail; } else { } ha->flags.fcp_prio_enabled = 1U; return (0); fail: vfree((void const *)ha->fcp_prio_cfg); ha->fcp_prio_cfg = (struct qla_fcp_prio_cfg *)0; return (258); } } void activate_pending_timer_9(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_9 == (unsigned long )timer) { if (ldv_timer_state_9 == 2 || pending_flag != 0) { ldv_timer_list_9 = timer; ldv_timer_list_9->data = data; ldv_timer_state_9 = 1; } else { } return; } else { } reg_timer_9(timer); ldv_timer_list_9->data = data; return; } } int reg_timer_9(struct timer_list *timer ) { { ldv_timer_list_9 = timer; ldv_timer_state_9 = 1; return (0); } } void choose_timer_9(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_9 = 2; return; } } void disable_suitable_timer_9(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_9) { ldv_timer_state_9 = 0; return; } else { } return; } } int ldv_del_timer_43(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_44(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } extern long simple_strtol(char const * , char ** , unsigned int ) ; extern int sscanf(char const * , char const * , ...) ; extern ssize_t memory_read_from_buffer(void * , size_t , loff_t * , void const * , size_t ) ; int ldv_del_timer_47(struct timer_list *ldv_func_arg1 ) ; extern int sysfs_create_bin_file(struct kobject * , struct bin_attribute const * ) ; extern void sysfs_remove_bin_file(struct kobject * , struct bin_attribute const * ) ; extern size_t __VERIFIER_nondet_size_t(void) ; extern u32 __VERIFIER_nondet_u32(void) ; int reg_timer_10(struct timer_list *timer ) ; void activate_pending_timer_10(struct timer_list *timer , unsigned long data , int pending_flag ) ; void disable_suitable_timer_10(struct timer_list *timer ) ; void choose_timer_10(struct timer_list *timer ) ; extern bool capable(int ) ; extern int scsi_is_host_device(struct device const * ) ; __inline static struct Scsi_Host *dev_to_shost(struct device *dev ) { int tmp ; struct device const *__mptr ; { goto ldv_26897; ldv_26896: ; if ((unsigned long )dev->parent == (unsigned long )((struct device *)0)) { return ((struct Scsi_Host *)0); } else { } dev = dev->parent; ldv_26897: tmp = scsi_is_host_device((struct device const *)dev); if (tmp == 0) { goto ldv_26896; } else { } __mptr = (struct device const *)dev; return ((struct Scsi_Host *)__mptr + 0xfffffffffffffd38UL); } } int ldv_scsi_add_host_with_dma_48(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; int ldv_scsi_add_host_with_dma_49(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; void ldv_scsi_remove_host_50(struct Scsi_Host *shost ) ; static char const * const port_state_str___2[5U] = { "Unknown", "UNCONFIGURED", "DEAD", "LOST", "ONLINE"}; void qla24xx_deallocate_vp_id(scsi_qla_host_t *vha ) ; int qla24xx_disable_vp(scsi_qla_host_t *vha ) ; int qla24xx_enable_vp(scsi_qla_host_t *vha ) ; void qla2x00_vp_stop_timer(scsi_qla_host_t *vha ) ; int qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport ) ; scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *fc_vport ) ; int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha ) ; int qla25xx_delete_req_que(struct scsi_qla_host *vha , struct req_que *req ) ; uint32_t qlafx00_fw_state_show(struct device *dev , struct device_attribute *attr , char *buf ) ; void qlafx00_get_host_speed(struct Scsi_Host *shost ) ; int qla24xx_bsg_request(struct fc_bsg_job *bsg_job ) ; int qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job ) ; void qla82xx_md_prep(scsi_qla_host_t *vha ) ; void qla82xx_set_reset_owner(scsi_qla_host_t *vha ) ; uint32_t qla8044_rd_reg(struct qla_hw_data *ha , ulong addr ) ; void qla8044_wr_reg(struct qla_hw_data *ha , ulong addr , uint32_t val ) ; __inline static void qla2x00_set_fcport_state___1(fc_port_t *fcport , int state ) { int old_state ; { old_state = atomic_read((atomic_t const *)(& fcport->state)); atomic_set(& fcport->state, state); if (old_state != 0 && old_state != state) { ql_dbg(268435456U, fcport->vha, 8317, "FCPort state transitioned from %s to %s - portid=%02x%02x%02x.\n", port_state_str___2[old_state], port_state_str___2[state], (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } return; } } __inline static int qla2x00_reset_active(scsi_qla_host_t *vha ) { scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; { tmp = pci_get_drvdata((vha->hw)->pdev); base_vha = (scsi_qla_host_t *)tmp; tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___0 != 0) { tmp___5 = 1; } else { tmp___1 = constant_test_bit(3L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___1 != 0) { tmp___5 = 1; } else { tmp___2 = constant_test_bit(10L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___2 != 0) { tmp___5 = 1; } else { tmp___3 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___3 != 0) { tmp___5 = 1; } else { tmp___4 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___4 != 0) { tmp___5 = 1; } else { tmp___5 = 0; } } } } } return (tmp___5); } } void qlt_vport_create(struct scsi_qla_host *vha , struct qla_hw_data *ha ) ; static int qla24xx_vport_disable(struct fc_vport *fc_vport , bool disable ) ; static ssize_t qla2x00_sysfs_read_fw_dump(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; int rval ; ssize_t tmp___1 ; ssize_t tmp___2 ; ssize_t tmp___3 ; ssize_t tmp___4 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; rval = 0; if (ha->fw_dump_reading == 0 && ha->mctp_dump_reading == 0) { return (0L); } else { } if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { if ((loff_t )ha->md_template_size > off) { tmp___1 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->md_tmplt_hdr, (size_t )ha->md_template_size); rval = (int )tmp___1; return ((ssize_t )rval); } else { } off = off - (loff_t )ha->md_template_size; tmp___2 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->md_dump, (size_t )ha->md_dump_size); rval = (int )tmp___2; return ((ssize_t )rval); } else if (ha->mctp_dumped != 0 && ha->mctp_dump_reading != 0) { tmp___3 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->mctp_dump, 548964UL); return (tmp___3); } else if (ha->fw_dump_reading != 0) { tmp___4 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->fw_dump, (size_t )ha->fw_dump_len); return (tmp___4); } else { return (0L); } } } static ssize_t qla2x00_sysfs_write_fw_dump(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; int reading ; long tmp___1 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; if (off != 0LL) { return (0L); } else { } tmp___1 = simple_strtol((char const *)buf, (char **)0, 10U); reading = (int )tmp___1; switch (reading) { case 0: ; if (ha->fw_dump_reading == 0) { goto ldv_60823; } else { } ql_log(2U, vha, 28765, "Firmware dump cleared on (%ld).\n", vha->host_no); if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { qla82xx_md_free(vha); qla82xx_md_prep(vha); } else { } ha->fw_dump_reading = 0; ha->fw_dumped = 0; goto ldv_60823; case 1: ; if (ha->fw_dumped != 0 && ha->fw_dump_reading == 0) { ha->fw_dump_reading = 1; ql_log(2U, vha, 28766, "Raw firmware dump ready for read on (%ld).\n", vha->host_no); } else { } goto ldv_60823; case 2: qla2x00_alloc_fw_dump(vha); goto ldv_60823; case 3: ; if ((ha->device_type & 16384U) != 0U) { qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); qla82xx_set_reset_owner(vha); qla8044_idc_unlock(ha); } else { qla2x00_system_error(vha); } goto ldv_60823; case 4: ; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { if ((unsigned long )ha->md_tmplt_hdr != (unsigned long )((void *)0)) { ql_dbg(8388608U, vha, 28763, "MiniDump supported with this firmware.\n"); } else { ql_dbg(8388608U, vha, 28829, "MiniDump not supported with this firmware.\n"); } } else { } goto ldv_60823; case 5: ; if ((ha->device_type & 16384U) != 0U || (ha->device_type & 262144U) != 0U) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } goto ldv_60823; case 6: ; if (ha->mctp_dump_reading == 0) { goto ldv_60823; } else { } ql_log(2U, vha, 28865, "MCTP dump cleared on (%ld).\n", vha->host_no); ha->mctp_dump_reading = 0; ha->mctp_dumped = 0; goto ldv_60823; case 7: ; if (ha->mctp_dumped != 0 && ha->mctp_dump_reading == 0) { ha->mctp_dump_reading = 1; ql_log(2U, vha, 28866, "Raw mctp dump ready for read on (%ld).\n", vha->host_no); } else { } goto ldv_60823; } ldv_60823: ; return ((ssize_t )count); } } static struct bin_attribute sysfs_fw_dump_attr = {{"fw_dump", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, & qla2x00_sysfs_read_fw_dump, & qla2x00_sysfs_write_fw_dump, 0}; static ssize_t qla2x00_sysfs_read_nvram(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; bool tmp___1 ; int tmp___2 ; ssize_t tmp___3 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = capable(21); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0L); } else { } if ((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->nvram, ha->flt_region_nvram << 2, (uint32_t )ha->nvram_size); } else { } tmp___3 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->nvram, (size_t )ha->nvram_size); return (tmp___3); } } static ssize_t qla2x00_sysfs_write_nvram(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; uint16_t cnt ; bool tmp___1 ; int tmp___2 ; uint32_t *iter ; uint32_t chksum ; uint32_t *tmp___3 ; uint8_t *iter___0 ; uint8_t chksum___0 ; uint8_t *tmp___4 ; int tmp___5 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = capable(21); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (((tmp___2 || off != 0LL) || (size_t )ha->nvram_size != count) || (unsigned long )(ha->isp_ops)->write_nvram == (unsigned long )((int (*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0)) { return (-22L); } else { } if ((ha->device_type & 134217728U) != 0U) { iter = (uint32_t *)buf; chksum = 0U; cnt = 0U; goto ldv_60860; ldv_60859: tmp___3 = iter; iter = iter + 1; chksum = *tmp___3 + chksum; cnt = (uint16_t )((int )cnt + 1); ldv_60860: ; if ((size_t )cnt < (count >> 2) - 1UL) { goto ldv_60859; } else { } chksum = - chksum; *iter = chksum; } else { iter___0 = (uint8_t *)buf; chksum___0 = 0U; cnt = 0U; goto ldv_60865; ldv_60864: tmp___4 = iter___0; iter___0 = iter___0 + 1; chksum___0 = (int )*tmp___4 + (int )chksum___0; cnt = (uint16_t )((int )cnt + 1); ldv_60865: ; if ((size_t )cnt < count - 1UL) { goto ldv_60864; } else { } chksum___0 = - ((int )chksum___0); *iter___0 = chksum___0; } tmp___5 = qla2x00_wait_for_hba_online(vha); if (tmp___5 != 0) { ql_log(1U, vha, 28767, "HBA not online, failing NVRAM update.\n"); return (-11L); } else { } (*((ha->isp_ops)->write_nvram))(vha, (uint8_t *)buf, (uint32_t )ha->nvram_base, (uint32_t )count); (*((ha->isp_ops)->read_nvram))(vha, (uint8_t *)ha->nvram, (uint32_t )ha->nvram_base, (uint32_t )count); ql_dbg(8388608U, vha, 28768, "Setting ISP_ABORT_NEEDED\n"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); return ((ssize_t )count); } } static struct bin_attribute sysfs_nvram_attr = {{"nvram", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 512UL, 0, & qla2x00_sysfs_read_nvram, & qla2x00_sysfs_write_nvram, 0}; static ssize_t qla2x00_sysfs_read_optrom(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; ssize_t tmp___1 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; if (ha->optrom_state != 1) { return (0L); } else { } tmp___1 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->optrom_buffer, (size_t )ha->optrom_region_size); return (tmp___1); } } static ssize_t qla2x00_sysfs_write_optrom(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; size_t __len ; void *__ret ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; if (ha->optrom_state != 2) { return (-22L); } else { } if ((loff_t )ha->optrom_region_size < off) { return (-34L); } else { } if ((unsigned long long )off + (unsigned long long )count > (unsigned long long )ha->optrom_region_size) { count = (size_t )((loff_t )ha->optrom_region_size - off); } else { } __len = count; __ret = __builtin_memcpy((void *)ha->optrom_buffer + (unsigned long )off, (void const *)buf, __len); return ((ssize_t )count); } } static struct bin_attribute sysfs_optrom_attr = {{"optrom", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, & qla2x00_sysfs_read_optrom, & qla2x00_sysfs_write_optrom, 0}; static ssize_t qla2x00_sysfs_write_optrom_ctl(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; uint32_t start ; uint32_t size ; int val ; int valid ; int tmp___1 ; long tmp___2 ; int tmp___3 ; void *tmp___4 ; int tmp___5 ; void *tmp___6 ; int tmp___7 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; start = 0U; size = ha->optrom_size; if (off != 0LL) { return (-22L); } else { } tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { return (-11L); } else { } tmp___3 = sscanf((char const *)buf, "%d:%x:%x", & val, & start, & size); if (tmp___3 <= 0) { return (-22L); } else { } if (ha->optrom_size < start) { return (-22L); } else { } switch (val) { case 0: ; if (ha->optrom_state != 1 && ha->optrom_state != 2) { return (-22L); } else { } ha->optrom_state = 0; ql_dbg(8388608U, vha, 28769, "Freeing flash region allocation -- 0x%x bytes.\n", ha->optrom_region_size); vfree((void const *)ha->optrom_buffer); ha->optrom_buffer = (char *)0; goto ldv_60913; case 1: ; if (ha->optrom_state != 0) { return (-22L); } else { } ha->optrom_region_start = start; ha->optrom_region_size = start + size > ha->optrom_size ? ha->optrom_size - start : size; ha->optrom_state = 1; tmp___4 = vmalloc((unsigned long )ha->optrom_region_size); ha->optrom_buffer = (char *)tmp___4; if ((unsigned long )ha->optrom_buffer == (unsigned long )((char *)0)) { ql_log(1U, vha, 28770, "Unable to allocate memory for optrom retrieval (%x).\n", ha->optrom_region_size); ha->optrom_state = 0; return (-12L); } else { } tmp___5 = qla2x00_wait_for_hba_online(vha); if (tmp___5 != 0) { ql_log(1U, vha, 28771, "HBA not online, failing NVRAM update.\n"); return (-11L); } else { } ql_dbg(8388608U, vha, 28772, "Reading flash region -- 0x%x/0x%x.\n", ha->optrom_region_start, ha->optrom_region_size); memset((void *)ha->optrom_buffer, 0, (size_t )ha->optrom_region_size); (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); goto ldv_60913; case 2: ; if (ha->optrom_state != 0) { return (-22L); } else { } valid = 0; if (ha->optrom_size == 131072U && start == 0U) { valid = 1; } else if (ha->flt_region_boot * 4U == start || ha->flt_region_fw * 4U == start) { valid = 1; } else if (((((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) || ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U)) || (ha->device_type & 32768U) != 0U) { valid = 1; } else { } if (valid == 0) { ql_log(1U, vha, 28773, "Invalid start region 0x%x/0x%x.\n", start, size); return (-22L); } else { } ha->optrom_region_start = start; ha->optrom_region_size = start + size > ha->optrom_size ? ha->optrom_size - start : size; ha->optrom_state = 2; tmp___6 = vmalloc((unsigned long )ha->optrom_region_size); ha->optrom_buffer = (char *)tmp___6; if ((unsigned long )ha->optrom_buffer == (unsigned long )((char *)0)) { ql_log(1U, vha, 28774, "Unable to allocate memory for optrom update (%x)\n", ha->optrom_region_size); ha->optrom_state = 0; return (-12L); } else { } ql_dbg(8388608U, vha, 28775, "Staging flash region write -- 0x%x/0x%x.\n", ha->optrom_region_start, ha->optrom_region_size); memset((void *)ha->optrom_buffer, 0, (size_t )ha->optrom_region_size); goto ldv_60913; case 3: ; if (ha->optrom_state != 2) { return (-22L); } else { } tmp___7 = qla2x00_wait_for_hba_online(vha); if (tmp___7 != 0) { ql_log(1U, vha, 28776, "HBA not online, failing flash update.\n"); return (-11L); } else { } ql_dbg(8388608U, vha, 28777, "Writing flash region -- 0x%x/0x%x.\n", ha->optrom_region_start, ha->optrom_region_size); (*((ha->isp_ops)->write_optrom))(vha, (uint8_t *)ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); goto ldv_60913; default: ; return (-22L); } ldv_60913: ; return ((ssize_t )count); } } static struct bin_attribute sysfs_optrom_ctl_attr = {{"optrom_ctl", 128U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, 0, & qla2x00_sysfs_write_optrom_ctl, 0}; static ssize_t qla2x00_sysfs_read_vpd(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; ssize_t tmp___5 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { return (-11L); } else { } tmp___3 = capable(21); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return (-22L); } else { } if ((ha->device_type & 8192U) != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->vpd, ha->flt_region_vpd << 2, (uint32_t )ha->vpd_size); } else { } tmp___5 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->vpd, (size_t )ha->vpd_size); return (tmp___5); } } static ssize_t qla2x00_sysfs_write_vpd(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; uint8_t *tmp_data ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; int tmp___5 ; void *tmp___6 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { return (0L); } else { } tmp___3 = capable(21); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (((tmp___4 || off != 0LL) || (size_t )ha->vpd_size != count) || (unsigned long )(ha->isp_ops)->write_nvram == (unsigned long )((int (*)(struct scsi_qla_host * , uint8_t * , uint32_t , uint32_t ))0)) { return (0L); } else { } tmp___5 = qla2x00_wait_for_hba_online(vha); if (tmp___5 != 0) { ql_log(1U, vha, 28778, "HBA not online, failing VPD update.\n"); return (-11L); } else { } (*((ha->isp_ops)->write_nvram))(vha, (uint8_t *)buf, (uint32_t )ha->vpd_base, (uint32_t )count); (*((ha->isp_ops)->read_nvram))(vha, (uint8_t *)ha->vpd, (uint32_t )ha->vpd_base, (uint32_t )count); if ((ha->device_type & 134217728U) == 0U) { return (-22L); } else { } tmp___6 = vmalloc(256UL); tmp_data = (uint8_t *)tmp___6; if ((unsigned long )tmp_data == (unsigned long )((uint8_t *)0U)) { ql_log(1U, vha, 28779, "Unable to allocate memory for VPD information update.\n"); return (-12L); } else { } (*((ha->isp_ops)->get_flash_version))(vha, (void *)tmp_data); vfree((void const *)tmp_data); return ((ssize_t )count); } } static struct bin_attribute sysfs_vpd_attr = {{"vpd", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, & qla2x00_sysfs_read_vpd, & qla2x00_sysfs_write_vpd, 0}; static ssize_t qla2x00_sysfs_read_sfp(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; uint16_t iter ; uint16_t addr ; uint16_t offset ; int rval ; bool tmp___1 ; int tmp___2 ; size_t __len ; void *__ret ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = capable(21); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if ((tmp___2 || off != 0LL) || count != 512UL) { return (0L); } else { } if ((unsigned long )ha->sfp_data != (unsigned long )((void *)0)) { goto do_read; } else { } ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, 208U, & ha->sfp_data_dma); if ((unsigned long )ha->sfp_data == (unsigned long )((void *)0)) { ql_log(1U, vha, 28780, "Unable to allocate memory for SFP read-data.\n"); return (0L); } else { } do_read: memset(ha->sfp_data, 0, 64UL); addr = 160U; iter = 0U; offset = 0U; goto ldv_60966; ldv_60965: ; if ((unsigned int )iter == 4U) { addr = 162U; offset = 0U; } else { } rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, (uint8_t *)ha->sfp_data, (int )addr, (int )offset, 64, 0); if (rval != 0) { ql_log(1U, vha, 28781, "Unable to read SFP data (%x/%x/%x).\n", rval, (int )addr, (int )offset); return (-5L); } else { } __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)buf, (void const *)ha->sfp_data, __len); } else { __ret = __builtin_memcpy((void *)buf, (void const *)ha->sfp_data, __len); } buf = buf + 64UL; iter = (uint16_t )((int )iter + 1); offset = (unsigned int )offset + 64U; ldv_60966: ; if ((unsigned int )iter <= 7U) { goto ldv_60965; } else { } return ((ssize_t )count); } } static struct bin_attribute sysfs_sfp_attr = {{"sfp", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 512UL, 0, & qla2x00_sysfs_read_sfp, 0, 0}; static ssize_t qla2x00_sysfs_write_reset(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp___1 ; int type ; uint32_t idc_control ; uint8_t *tmp_data ; long tmp___2 ; uint32_t idc_control___0 ; int tmp___3 ; void *tmp___4 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp___1; tmp_data = (uint8_t *)0U; if (off != 0LL) { return (-22L); } else { } tmp___2 = simple_strtol((char const *)buf, (char **)0, 10U); type = (int )tmp___2; switch (type) { case 131676: ql_log(2U, vha, 28782, "Issuing ISP reset.\n"); scsi_block_requests(vha->host); if ((ha->device_type & 16384U) != 0U) { ha->flags.isp82xx_no_md_cap = 1U; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); idc_control = qla8044_rd_reg(ha, 14224UL); qla8044_wr_reg(ha, 14224UL, idc_control | 2U); qla82xx_set_reset_owner(vha); qla8044_idc_unlock(ha); } else { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); goto ldv_60986; case 131677: ; if ((ha->device_type & 8192U) == 0U && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) { return (-1L); } else { } ql_log(2U, vha, 28783, "Issuing MPI reset.\n"); if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { qla83xx_idc_lock(vha, 0); __qla83xx_get_idc_control(vha, & idc_control___0); idc_control___0 = idc_control___0 | 2U; __qla83xx_set_idc_control(vha, idc_control___0); qla83xx_wr_reg(vha, 571483012U, 4U); qla83xx_idc_audit(vha, 0); qla83xx_idc_unlock(vha, 0); goto ldv_60986; } else { qla2x00_wait_for_hba_online(vha); scsi_block_requests(vha->host); tmp___3 = qla81xx_restart_mpi_firmware(vha); if (tmp___3 != 0) { ql_log(1U, vha, 28784, "MPI reset failed.\n"); } else { } scsi_unblock_requests(vha->host); goto ldv_60986; } case 131678: ; if (((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U) || (unsigned long )vha != (unsigned long )base_vha) { ql_log(2U, vha, 28785, "FCoE ctx reset no supported.\n"); return (-1L); } else { } ql_log(2U, vha, 28786, "Issuing FCoE ctx reset.\n"); set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); qla2x00_wait_for_fcoe_ctx_reset(vha); goto ldv_60986; case 131679: ; if ((ha->device_type & 65536U) == 0U) { return (-1L); } else { } ql_log(2U, vha, 28860, "Disabling Reset by IDC control\n"); qla83xx_idc_lock(vha, 0); __qla83xx_get_idc_control(vha, & idc_control); idc_control = idc_control | 1U; __qla83xx_set_idc_control(vha, idc_control); qla83xx_idc_unlock(vha, 0); goto ldv_60986; case 131680: ; if ((ha->device_type & 65536U) == 0U) { return (-1L); } else { } ql_log(2U, vha, 28861, "Enabling Reset by IDC control\n"); qla83xx_idc_lock(vha, 0); __qla83xx_get_idc_control(vha, & idc_control); idc_control = idc_control & 4294967294U; __qla83xx_set_idc_control(vha, idc_control); qla83xx_idc_unlock(vha, 0); goto ldv_60986; case 131681: ql_dbg(8388608U, vha, 28896, "Updating cache versions without reset "); tmp___4 = vmalloc(256UL); tmp_data = (uint8_t *)tmp___4; if ((unsigned long )tmp_data == (unsigned long )((uint8_t *)0U)) { ql_log(1U, vha, 28897, "Unable to allocate memory for VPD information update.\n"); return (-12L); } else { } (*((ha->isp_ops)->get_flash_version))(vha, (void *)tmp_data); vfree((void const *)tmp_data); goto ldv_60986; } ldv_60986: ; return ((ssize_t )count); } } static struct bin_attribute sysfs_reset_attr = {{"reset", 128U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, 0, & qla2x00_sysfs_write_reset, 0}; static ssize_t qla2x00_sysfs_read_xgmac_stats(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; int rval ; uint16_t actual_size ; bool tmp___1 ; int tmp___2 ; size_t __len ; void *__ret ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = capable(21); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if ((tmp___2 || off != 0LL) || count > 4096UL) { return (0L); } else { } if ((unsigned long )ha->xgmac_data != (unsigned long )((void *)0)) { goto do_read; } else { } ha->xgmac_data = dma_alloc_attrs(& (ha->pdev)->dev, 4096UL, & ha->xgmac_data_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )ha->xgmac_data == (unsigned long )((void *)0)) { ql_log(1U, vha, 28790, "Unable to allocate memory for XGMAC read-data.\n"); return (0L); } else { } do_read: actual_size = 0U; memset(ha->xgmac_data, 0, 4096UL); rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 4096, & actual_size); if (rval != 0) { ql_log(1U, vha, 28791, "Unable to read XGMAC data (%x).\n", rval); count = 0UL; } else { } count = count < (size_t )actual_size ? count : (size_t )actual_size; __len = count; __ret = __builtin_memcpy((void *)buf, (void const *)ha->xgmac_data, __len); return ((ssize_t )count); } } static struct bin_attribute sysfs_xgmac_stats_attr = {{"xgmac_stats", 256U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, & qla2x00_sysfs_read_xgmac_stats, 0, 0}; static ssize_t qla2x00_sysfs_read_dcbx_tlv(struct file *filp , struct kobject *kobj , struct bin_attribute *bin_attr , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *vha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; void *tmp___0 ; struct qla_hw_data *ha ; int rval ; uint16_t actual_size ; bool tmp___1 ; int tmp___2 ; size_t __len ; void *__ret ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = shost_priv(tmp); vha = (struct scsi_qla_host *)tmp___0; ha = vha->hw; tmp___1 = capable(21); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if ((tmp___2 || off != 0LL) || count > 4096UL) { return (0L); } else { } if ((unsigned long )ha->dcbx_tlv != (unsigned long )((void *)0)) { goto do_read; } else { } ha->dcbx_tlv = dma_alloc_attrs(& (ha->pdev)->dev, 4096UL, & ha->dcbx_tlv_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )ha->dcbx_tlv == (unsigned long )((void *)0)) { ql_log(1U, vha, 28792, "Unable to allocate memory for DCBX TLV read-data.\n"); return (-12L); } else { } do_read: actual_size = 0U; memset(ha->dcbx_tlv, 0, 4096UL); rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 4096); if (rval != 0) { ql_log(1U, vha, 28793, "Unable to read DCBX TLV (%x).\n", rval); return (-5L); } else { } __len = count; __ret = __builtin_memcpy((void *)buf, (void const *)ha->dcbx_tlv, __len); return ((ssize_t )count); } } static struct bin_attribute sysfs_dcbx_tlv_attr = {{"dcbx_tlv", 256U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, & qla2x00_sysfs_read_dcbx_tlv, 0, 0}; static struct sysfs_entry bin_file_entries[10U] = { {(char *)"fw_dump", & sysfs_fw_dump_attr, 0}, {(char *)"nvram", & sysfs_nvram_attr, 0}, {(char *)"optrom", & sysfs_optrom_attr, 0}, {(char *)"optrom_ctl", & sysfs_optrom_ctl_attr, 0}, {(char *)"vpd", & sysfs_vpd_attr, 1}, {(char *)"sfp", & sysfs_sfp_attr, 1}, {(char *)"reset", & sysfs_reset_attr, 0}, {(char *)"xgmac_stats", & sysfs_xgmac_stats_attr, 3}, {(char *)"dcbx_tlv", & sysfs_dcbx_tlv_attr, 3}, {(char *)0, 0, 0}}; void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha ) { struct Scsi_Host *host ; struct sysfs_entry *iter ; int ret ; { host = vha->host; iter = (struct sysfs_entry *)(& bin_file_entries); goto ldv_61045; ldv_61044: ; if (iter->is4GBp_only != 0 && ((vha->hw)->device_type & 134217728U) == 0U) { goto ldv_61043; } else { } if (iter->is4GBp_only == 2 && ((vha->hw)->device_type & 2048U) == 0U) { goto ldv_61043; } else { } if (iter->is4GBp_only == 3 && (((((vha->hw)->device_type & 8192U) == 0U && ((vha->hw)->device_type & 16384U) == 0U) && ((vha->hw)->device_type & 65536U) == 0U) && ((vha->hw)->device_type & 262144U) == 0U)) { goto ldv_61043; } else { } ret = sysfs_create_bin_file(& host->shost_gendev.kobj, (struct bin_attribute const *)iter->attr); if (ret != 0) { ql_log(1U, vha, 243, "Unable to create sysfs %s binary attribute (%d).\n", iter->name, ret); } else { ql_dbg(1073741824U, vha, 244, "Successfully created sysfs %s binary attribure.\n", iter->name); } ldv_61043: iter = iter + 1; ldv_61045: ; if ((unsigned long )iter->name != (unsigned long )((char *)0)) { goto ldv_61044; } else { } return; } } void qla2x00_free_sysfs_attr(scsi_qla_host_t *vha ) { struct Scsi_Host *host ; struct sysfs_entry *iter ; struct qla_hw_data *ha ; { host = vha->host; ha = vha->hw; iter = (struct sysfs_entry *)(& bin_file_entries); goto ldv_61055; ldv_61054: ; if (iter->is4GBp_only != 0 && (ha->device_type & 134217728U) == 0U) { goto ldv_61053; } else { } if (iter->is4GBp_only == 2 && (ha->device_type & 2048U) == 0U) { goto ldv_61053; } else { } if (iter->is4GBp_only == 3 && (((((vha->hw)->device_type & 8192U) == 0U && ((vha->hw)->device_type & 16384U) == 0U) && ((vha->hw)->device_type & 65536U) == 0U) && ((vha->hw)->device_type & 262144U) == 0U)) { goto ldv_61053; } else { } sysfs_remove_bin_file(& host->shost_gendev.kobj, (struct bin_attribute const *)iter->attr); ldv_61053: iter = iter + 1; ldv_61055: ; if ((unsigned long )iter->name != (unsigned long )((char *)0)) { goto ldv_61054; } else { } if ((unsigned int )ha->beacon_blink_led == 1U) { (*((ha->isp_ops)->beacon_off))(vha); } else { } return; } } static ssize_t qla2x00_drvr_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { int tmp ; { tmp = snprintf(buf, 4096UL, "%s\n", (char *)(& qla2x00_version_str)); return ((ssize_t )tmp); } } static ssize_t qla2x00_fw_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; char fw_str[128U] ; char *tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = (*((ha->isp_ops)->fw_version_str))(vha, (char *)(& fw_str)); tmp___1 = snprintf(buf, 4096UL, "%s\n", tmp___0); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_serial_num_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; uint32_t sn ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if (((vha->hw)->device_type & 131072U) != 0U) { tmp___0 = snprintf(buf, 4096UL, "%s\n", (uint8_t *)(& (vha->hw)->mr.serial_num)); return ((ssize_t )tmp___0); } else if ((ha->device_type & 134217728U) != 0U) { qla2xxx_get_vpd_field(vha, (char *)"SN", buf, 4096UL); tmp___1 = snprintf(buf, 4096UL, "%s\n", buf); return ((ssize_t )tmp___1); } else { } sn = (uint32_t )(((((int )ha->serial0 & 31) << 16) | ((int )ha->serial2 << 8)) | (int )ha->serial1); tmp___2 = snprintf(buf, 4096UL, "%c%05d\n", sn / 100000U + 65U, sn % 100000U); return ((ssize_t )tmp___2); } } static ssize_t qla2x00_isp_name_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; tmp___0 = snprintf(buf, 4096UL, "ISP%04X\n", (int )((vha->hw)->pdev)->device); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_isp_id_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if (((vha->hw)->device_type & 131072U) != 0U) { tmp___0 = snprintf(buf, 4096UL, "%s\n", (uint8_t *)(& (vha->hw)->mr.hw_version)); return ((ssize_t )tmp___0); } else { } tmp___1 = snprintf(buf, 4096UL, "%04x %04x %04x %04x\n", (int )ha->product_id[0], (int )ha->product_id[1], (int )ha->product_id[2], (int )ha->product_id[3]); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_model_name_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; if (((vha->hw)->device_type & 131072U) != 0U) { tmp___0 = snprintf(buf, 4096UL, "%s\n", (uint8_t *)(& (vha->hw)->mr.product_name)); return ((ssize_t )tmp___0); } else { } tmp___1 = snprintf(buf, 4096UL, "%s\n", (uint8_t *)(& (vha->hw)->model_number)); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_model_desc_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; tmp___0 = snprintf(buf, 4096UL, "%s\n", (unsigned long )(& (vha->hw)->model_desc) != (unsigned long )((char (*)[80])0) ? (char *)(& (vha->hw)->model_desc) : (char *)""); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_pci_info_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; char pci_info[30U] ; char *tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; tmp___0 = (*(((vha->hw)->isp_ops)->pci_info_str))(vha, (char *)(& pci_info)); tmp___1 = snprintf(buf, 4096UL, "%s\n", tmp___0); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_link_state_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int len ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; len = 0; tmp___7 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___7 == 2) { len = snprintf(buf, 4096UL, "Link Down\n"); } else { tmp___8 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___8 == 6) { len = snprintf(buf, 4096UL, "Link Down\n"); } else if ((vha->device_flags & 2U) != 0U) { len = snprintf(buf, 4096UL, "Link Down\n"); } else { tmp___5 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___5 != 5) { len = snprintf(buf, 4096UL, "Unknown Link State\n"); } else { tmp___6 = qla2x00_reset_active(vha); if (tmp___6 != 0) { len = snprintf(buf, 4096UL, "Unknown Link State\n"); } else { len = snprintf(buf, 4096UL, "Link Up - "); switch ((int )ha->current_topology) { case 1: tmp___0 = snprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "Loop\n"); len = tmp___0 + len; goto ldv_61135; case 4: tmp___1 = snprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "FL_Port\n"); len = tmp___1 + len; goto ldv_61135; case 2: tmp___2 = snprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "N_Port to N_Port\n"); len = tmp___2 + len; goto ldv_61135; case 8: tmp___3 = snprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "F_Port\n"); len = tmp___3 + len; goto ldv_61135; default: tmp___4 = snprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "Loop\n"); len = tmp___4 + len; goto ldv_61135; } ldv_61135: ; } } } } return ((ssize_t )len); } } static ssize_t qla2x00_zio_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int len ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; len = 0; switch ((int )(vha->hw)->zio_mode) { case 6: tmp___0 = snprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "Mode 6\n"); len = tmp___0 + len; goto ldv_61150; case 0: tmp___1 = snprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "Disabled\n"); len = tmp___1 + len; goto ldv_61150; } ldv_61150: ; return ((ssize_t )len); } } static ssize_t qla2x00_zio_store(struct device *dev , struct device_attribute *attr , char const *buf , size_t count ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int val ; uint16_t zio_mode ; int tmp___0 ; size_t tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; val = 0; if ((ha->device_type & 268435456U) == 0U) { return (-524L); } else { } tmp___0 = sscanf(buf, "%d", & val); if (tmp___0 != 1) { return (-22L); } else { } if (val != 0) { zio_mode = 6U; } else { zio_mode = 0U; } if ((unsigned int )zio_mode != 0U || (unsigned int )ha->zio_mode != 0U) { ha->zio_mode = zio_mode; set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } tmp___1 = strlen(buf); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_zio_timer_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; tmp___0 = snprintf(buf, 4096UL, "%d us\n", (int )(vha->hw)->zio_timer * 100); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_zio_timer_store(struct device *dev , struct device_attribute *attr , char const *buf , size_t count ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int val ; uint16_t zio_timer ; int tmp___0 ; size_t tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; val = 0; tmp___0 = sscanf(buf, "%d", & val); if (tmp___0 != 1) { return (-22L); } else { } if (val > 25500 || val <= 99) { return (-34L); } else { } zio_timer = (unsigned short )(val / 100); (vha->hw)->zio_timer = zio_timer; tmp___1 = strlen(buf); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_beacon_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int len ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; len = 0; if ((unsigned int )(vha->hw)->beacon_blink_led != 0U) { tmp___0 = snprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "Enabled\n"); len = tmp___0 + len; } else { tmp___1 = snprintf(buf + (unsigned long )len, 4096UL - (unsigned long )len, "Disabled\n"); len = tmp___1 + len; } return ((ssize_t )len); } } static ssize_t qla2x00_beacon_store(struct device *dev , struct device_attribute *attr , char const *buf , size_t count ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int val ; int rval ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; val = 0; if ((int )ha->device_type & 1 || (ha->device_type & 2U) != 0U) { return (-1L); } else { } tmp___0 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { ql_log(1U, vha, 28794, "Abort ISP active -- ignoring beacon request.\n"); return (-16L); } else { } tmp___1 = sscanf(buf, "%d", & val); if (tmp___1 != 1) { return (-22L); } else { } if (val != 0) { rval = (*((ha->isp_ops)->beacon_on))(vha); } else { rval = (*((ha->isp_ops)->beacon_off))(vha); } if (rval != 0) { count = 0UL; } else { } return ((ssize_t )count); } } static ssize_t qla2x00_optrom_bios_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = snprintf(buf, 4096UL, "%d.%02d\n", (int )ha->bios_revision[1], (int )ha->bios_revision[0]); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_optrom_efi_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = snprintf(buf, 4096UL, "%d.%02d\n", (int )ha->efi_revision[1], (int )ha->efi_revision[0]); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_optrom_fcode_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = snprintf(buf, 4096UL, "%d.%02d\n", (int )ha->fcode_revision[1], (int )ha->fcode_revision[0]); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_optrom_fw_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = snprintf(buf, 4096UL, "%d.%02d.%02d %d\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], ha->fw_revision[3]); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_optrom_gold_fw_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if ((ha->device_type & 8192U) == 0U && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) { tmp___0 = snprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = snprintf(buf, 4096UL, "%d.%02d.%02d (%d)\n", ha->gold_fw_version[0], ha->gold_fw_version[1], ha->gold_fw_version[2], ha->gold_fw_version[3]); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_total_isp_aborts_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; tmp___0 = snprintf(buf, 4096UL, "%d\n", vha->qla_stats.total_isp_aborts); return ((ssize_t )tmp___0); } } static ssize_t qla24xx_84xx_fw_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { int rval ; uint16_t status[2U] ; scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { rval = 0; status[0] = 0U; status[1] = 0U; __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if ((ha->device_type & 4096U) == 0U) { tmp___0 = snprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } if ((ha->cs84xx)->op_fw_version == 0U) { rval = qla84xx_verify_chip(vha, (uint16_t *)(& status)); } else { } if (rval == 0 && (unsigned int )status[0] == 0U) { tmp___1 = snprintf(buf, 4096UL, "%u\n", (ha->cs84xx)->op_fw_version); return ((ssize_t )tmp___1); } else { } tmp___2 = snprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___2); } } static ssize_t qla2x00_mpi_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if (((ha->device_type & 8192U) == 0U && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U) { tmp___0 = snprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = snprintf(buf, 4096UL, "%d.%02d.%02d (%x)\n", (int )ha->mpi_version[0], (int )ha->mpi_version[1], (int )ha->mpi_version[2], ha->mpi_capabilities); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_phy_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if ((ha->device_type & 8192U) == 0U && (ha->device_type & 65536U) == 0U) { tmp___0 = snprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = snprintf(buf, 4096UL, "%d.%02d.%02d\n", (int )ha->phy_version[0], (int )ha->phy_version[1], (int )ha->phy_version[2]); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_flash_block_size_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = snprintf(buf, 4096UL, "0x%x\n", ha->fdt_block_size); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_vlan_id_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; if (((((vha->hw)->device_type & 8192U) == 0U && ((vha->hw)->device_type & 16384U) == 0U) && ((vha->hw)->device_type & 65536U) == 0U) && ((vha->hw)->device_type & 262144U) == 0U) { tmp___0 = snprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = snprintf(buf, 4096UL, "%d\n", (int )vha->fcoe_vlan_id); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_vn_port_mac_address_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; if (((((vha->hw)->device_type & 8192U) == 0U && ((vha->hw)->device_type & 16384U) == 0U) && ((vha->hw)->device_type & 65536U) == 0U) && ((vha->hw)->device_type & 262144U) == 0U) { tmp___0 = snprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = snprintf(buf, 4096UL, "%pMR\n", (uint8_t *)(& vha->fcoe_vn_port_mac)); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_fabric_param_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; tmp___0 = snprintf(buf, 4096UL, "%d\n", (int )(vha->hw)->switch_cap); return ((ssize_t )tmp___0); } } static ssize_t qla2x00_thermal_temp_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; uint16_t temp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; temp = 0U; tmp___0 = qla2x00_reset_active(vha); if (tmp___0 != 0) { ql_log(1U, vha, 28892, "ISP reset active.\n"); goto done; } else { } if (*((unsigned long *)vha->hw + 2UL) != 0UL) { ql_log(1U, vha, 28893, "PCI EEH busy.\n"); goto done; } else { } tmp___2 = qla2x00_get_thermal_temp(vha, & temp); if (tmp___2 == 0) { tmp___1 = snprintf(buf, 4096UL, "%d\n", (int )temp); return ((ssize_t )tmp___1); } else { } done: tmp___3 = snprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___3); } } static ssize_t qla2x00_fw_state_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int rval ; uint16_t state[5U] ; uint32_t pstate ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; rval = 258; if (((vha->hw)->device_type & 131072U) != 0U) { pstate = qlafx00_fw_state_show(dev, attr, buf); tmp___0 = snprintf(buf, 4096UL, "0x%x\n", pstate); return ((ssize_t )tmp___0); } else { } tmp___1 = qla2x00_reset_active(vha); if (tmp___1 != 0) { ql_log(1U, vha, 28796, "ISP reset active.\n"); } else if (*((unsigned long *)vha->hw + 2UL) == 0UL) { rval = qla2x00_get_firmware_state(vha, (uint16_t *)(& state)); } else { } if (rval != 0) { memset((void *)(& state), -1, 10UL); } else { } tmp___2 = snprintf(buf, 4096UL, "0x%x 0x%x 0x%x 0x%x 0x%x\n", (int )state[0], (int )state[1], (int )state[2], (int )state[3], (int )state[4]); return ((ssize_t )tmp___2); } } static ssize_t qla2x00_diag_requests_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; if (((vha->hw)->device_type & 2048U) == 0U && ((vha->hw)->device_type & 32768U) == 0U) { tmp___0 = snprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = snprintf(buf, 4096UL, "%llu\n", vha->bidi_stats.io_count); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_diag_megabytes_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; if (((vha->hw)->device_type & 2048U) == 0U && ((vha->hw)->device_type & 32768U) == 0U) { tmp___0 = snprintf(buf, 4096UL, "\n"); return ((ssize_t )tmp___0); } else { } tmp___1 = snprintf(buf, 4096UL, "%llu\n", vha->bidi_stats.transfer_bytes >> 20); return ((ssize_t )tmp___1); } } static ssize_t qla2x00_fw_dump_size_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; struct qla_hw_data *ha ; uint32_t size ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; if (ha->fw_dumped == 0) { size = 0U; } else if ((ha->device_type & 16384U) != 0U) { size = ha->md_template_size + ha->md_dump_size; } else { size = ha->fw_dump_len; } tmp___0 = snprintf(buf, 4096UL, "%d\n", size); return ((ssize_t )tmp___0); } } static struct device_attribute dev_attr_driver_version = {{"driver_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_drvr_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fw_version = {{"fw_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_fw_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_serial_num = {{"serial_num", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_serial_num_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_isp_name = {{"isp_name", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_isp_name_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_isp_id = {{"isp_id", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_isp_id_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_model_name = {{"model_name", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_model_name_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_model_desc = {{"model_desc", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_model_desc_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_pci_info = {{"pci_info", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_pci_info_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_link_state = {{"link_state", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_link_state_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_zio = {{"zio", 420U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_zio_show, & qla2x00_zio_store}; static struct device_attribute dev_attr_zio_timer = {{"zio_timer", 420U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_zio_timer_show, & qla2x00_zio_timer_store}; static struct device_attribute dev_attr_beacon = {{"beacon", 420U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_beacon_show, & qla2x00_beacon_store}; static struct device_attribute dev_attr_optrom_bios_version = {{"optrom_bios_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_optrom_bios_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_optrom_efi_version = {{"optrom_efi_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_optrom_efi_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_optrom_fcode_version = {{"optrom_fcode_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_optrom_fcode_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_optrom_fw_version = {{"optrom_fw_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_optrom_fw_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_optrom_gold_fw_version = {{"optrom_gold_fw_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_optrom_gold_fw_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_84xx_fw_version = {{"84xx_fw_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla24xx_84xx_fw_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_total_isp_aborts = {{"total_isp_aborts", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_total_isp_aborts_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_mpi_version = {{"mpi_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_mpi_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_phy_version = {{"phy_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_phy_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_flash_block_size = {{"flash_block_size", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_flash_block_size_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_vlan_id = {{"vlan_id", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_vlan_id_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_vn_port_mac_address = {{"vn_port_mac_address", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_vn_port_mac_address_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fabric_param = {{"fabric_param", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_fabric_param_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fw_state = {{"fw_state", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_fw_state_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_thermal_temp = {{"thermal_temp", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_thermal_temp_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_diag_requests = {{"diag_requests", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_diag_requests_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_diag_megabytes = {{"diag_megabytes", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_diag_megabytes_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fw_dump_size = {{"fw_dump_size", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla2x00_fw_dump_size_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; struct device_attribute *qla2x00_host_attrs[31U] = { & dev_attr_driver_version, & dev_attr_fw_version, & dev_attr_serial_num, & dev_attr_isp_name, & dev_attr_isp_id, & dev_attr_model_name, & dev_attr_model_desc, & dev_attr_pci_info, & dev_attr_link_state, & dev_attr_zio, & dev_attr_zio_timer, & dev_attr_beacon, & dev_attr_optrom_bios_version, & dev_attr_optrom_efi_version, & dev_attr_optrom_fcode_version, & dev_attr_optrom_fw_version, & dev_attr_84xx_fw_version, & dev_attr_total_isp_aborts, & dev_attr_mpi_version, & dev_attr_phy_version, & dev_attr_flash_block_size, & dev_attr_vlan_id, & dev_attr_vn_port_mac_address, & dev_attr_fabric_param, & dev_attr_fw_state, & dev_attr_optrom_gold_fw_version, & dev_attr_thermal_temp, & dev_attr_diag_requests, & dev_attr_diag_megabytes, & dev_attr_fw_dump_size, (struct device_attribute *)0}; static void qla2x00_get_host_port_id(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; ((struct fc_host_attrs *)shost->shost_data)->port_id = (u32 )((((int )vha->d_id.b.domain << 16) | ((int )vha->d_id.b.area << 8)) | (int )vha->d_id.b.al_pa); return; } } static void qla2x00_get_host_speed(struct Scsi_Host *shost ) { struct qla_hw_data *ha ; void *tmp ; u32 speed ; { tmp = shost_priv(shost); ha = ((struct scsi_qla_host *)tmp)->hw; speed = 0U; if ((ha->device_type & 131072U) != 0U) { qlafx00_get_host_speed(shost); return; } else { } switch ((int )ha->link_data_rate) { case 0: speed = 1U; goto ldv_61407; case 1: speed = 2U; goto ldv_61407; case 3: speed = 8U; goto ldv_61407; case 4: speed = 16U; goto ldv_61407; case 19: speed = 4U; goto ldv_61407; case 5: speed = 32U; goto ldv_61407; } ldv_61407: ((struct fc_host_attrs *)shost->shost_data)->speed = speed; return; } } static void qla2x00_get_host_port_type(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; uint32_t port_type ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; port_type = 0U; if ((unsigned int )vha->vp_idx != 0U) { ((struct fc_host_attrs *)shost->shost_data)->port_type = 7; return; } else { } switch ((int )(vha->hw)->current_topology) { case 1: port_type = 5U; goto ldv_61419; case 4: port_type = 4U; goto ldv_61419; case 2: port_type = 6U; goto ldv_61419; case 8: port_type = 3U; goto ldv_61419; } ldv_61419: ((struct fc_host_attrs *)shost->shost_data)->port_type = (enum fc_port_type )port_type; return; } } static void qla2x00_get_starget_node_name(struct scsi_target *starget ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; scsi_qla_host_t *vha ; void *tmp___0 ; fc_port_t *fcport ; u64 node_name ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { tmp = dev_to_shost(starget->dev.parent); host = tmp; tmp___0 = shost_priv(host); vha = (scsi_qla_host_t *)tmp___0; node_name = 0ULL; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_61436; ldv_61435: ; if ((unsigned long )fcport->rport != (unsigned long )((struct fc_rport *)0) && starget->id == (fcport->rport)->scsi_target_id) { node_name = wwn_to_u64((u8 *)(& fcport->node_name)); goto ldv_61434; } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_61436: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_61435; } else { } ldv_61434: ((struct fc_starget_attrs *)(& starget->starget_data))->node_name = node_name; return; } } static void qla2x00_get_starget_port_name(struct scsi_target *starget ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; scsi_qla_host_t *vha ; void *tmp___0 ; fc_port_t *fcport ; u64 port_name ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { tmp = dev_to_shost(starget->dev.parent); host = tmp; tmp___0 = shost_priv(host); vha = (scsi_qla_host_t *)tmp___0; port_name = 0ULL; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_61450; ldv_61449: ; if ((unsigned long )fcport->rport != (unsigned long )((struct fc_rport *)0) && starget->id == (fcport->rport)->scsi_target_id) { port_name = wwn_to_u64((u8 *)(& fcport->port_name)); goto ldv_61448; } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_61450: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_61449; } else { } ldv_61448: ((struct fc_starget_attrs *)(& starget->starget_data))->port_name = port_name; return; } } static void qla2x00_get_starget_port_id(struct scsi_target *starget ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; scsi_qla_host_t *vha ; void *tmp___0 ; fc_port_t *fcport ; uint32_t port_id ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { tmp = dev_to_shost(starget->dev.parent); host = tmp; tmp___0 = shost_priv(host); vha = (scsi_qla_host_t *)tmp___0; port_id = 4294967295U; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_61464; ldv_61463: ; if ((unsigned long )fcport->rport != (unsigned long )((struct fc_rport *)0) && starget->id == (fcport->rport)->scsi_target_id) { port_id = (uint32_t )((((int )fcport->d_id.b.domain << 16) | ((int )fcport->d_id.b.area << 8)) | (int )fcport->d_id.b.al_pa); goto ldv_61462; } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_61464: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_61463; } else { } ldv_61462: ((struct fc_starget_attrs *)(& starget->starget_data))->port_id = port_id; return; } } static void qla2x00_set_rport_loss_tmo(struct fc_rport *rport , uint32_t timeout ) { { if (timeout != 0U) { rport->dev_loss_tmo = timeout; } else { rport->dev_loss_tmo = 1U; } return; } } static void qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; fc_port_t *fcport ; unsigned long flags ; raw_spinlock_t *tmp___0 ; struct fc_rport *tmp___1 ; int tmp___2 ; int tmp___3 ; long tmp___4 ; { tmp = dev_to_shost(rport->dev.parent); host = tmp; fcport = *((fc_port_t **)rport->dd_data); if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return; } else { } qla2x00_set_fcport_state___1(fcport, 2); tmp___0 = spinlock_check(host->host_lock); flags = _raw_spin_lock_irqsave(tmp___0); tmp___1 = (struct fc_rport *)0; fcport->drport = tmp___1; fcport->rport = tmp___1; *((fc_port_t **)rport->dd_data) = (fc_port_t *)0; spin_unlock_irqrestore(host->host_lock, flags); tmp___2 = constant_test_bit(3L, (unsigned long const volatile *)(& (fcport->vha)->dpc_flags)); if (tmp___2 != 0) { return; } else { } tmp___3 = pci_channel_offline(((fcport->vha)->hw)->pdev); tmp___4 = ldv__builtin_expect(tmp___3 != 0, 0L); if (tmp___4 != 0L) { qla2x00_abort_all_cmds(fcport->vha, 65536); return; } else { } return; } } static void qla2x00_terminate_rport_io(struct fc_rport *rport ) { fc_port_t *fcport ; int tmp ; int tmp___0 ; long tmp___1 ; { fcport = *((fc_port_t **)rport->dd_data); if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return; } else { } tmp = constant_test_bit(3L, (unsigned long const volatile *)(& (fcport->vha)->dpc_flags)); if (tmp != 0) { return; } else { } tmp___0 = pci_channel_offline(((fcport->vha)->hw)->pdev); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 0L); if (tmp___1 != 0L) { qla2x00_abort_all_cmds(fcport->vha, 65536); return; } else { } if ((unsigned int )fcport->loop_id != 4096U) { if ((((fcport->vha)->hw)->device_type & 134217728U) != 0U) { (*((((fcport->vha)->hw)->isp_ops)->fabric_logout))(fcport->vha, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { qla2x00_port_logout(fcport->vha, fcport); } } else { } return; } } static int qla2x00_issue_lip(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; if (((vha->hw)->device_type & 131072U) != 0U) { return (0); } else { } qla2x00_loop_reset(vha); return (0); } } static struct fc_host_statistics *qla2x00_get_fc_host_stats(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; struct scsi_qla_host *base_vha ; void *tmp___0 ; int rval ; struct link_statistics *stats ; dma_addr_t stats_dma ; struct fc_host_statistics *pfc_host_stat ; int tmp___1 ; int tmp___2 ; long tmp___3 ; int tmp___4 ; void *tmp___5 ; int tmp___6 ; u64 tmp___7 ; uint32_t __base ; uint32_t __rem ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp___0; pfc_host_stat = & vha->fc_host_stat; memset((void *)pfc_host_stat, -1, 232UL); if (((vha->hw)->device_type & 131072U) != 0U) { goto done; } else { } tmp___1 = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { goto done; } else { } tmp___2 = pci_channel_offline(ha->pdev); tmp___3 = ldv__builtin_expect(tmp___2 != 0, 0L); if (tmp___3 != 0L) { goto done; } else { } tmp___4 = qla2x00_reset_active(vha); if (tmp___4 != 0) { goto done; } else { } tmp___5 = dma_pool_alloc(ha->s_dma_pool, 208U, & stats_dma); stats = (struct link_statistics *)tmp___5; if ((unsigned long )stats == (unsigned long )((struct link_statistics *)0)) { ql_log(1U, vha, 28797, "Failed to allocate memory for stats.\n"); goto done; } else { } memset((void *)stats, 0, 256UL); rval = 258; if ((ha->device_type & 134217728U) != 0U) { rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma); } else { tmp___6 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___6 == 5 && (unsigned int )ha->dpc_active == 0U) { rval = qla2x00_get_link_status(base_vha, (int )base_vha->loop_id, stats, stats_dma); } else { } } if (rval != 0) { goto done_free; } else { } pfc_host_stat->link_failure_count = (u64 )stats->link_fail_cnt; pfc_host_stat->loss_of_sync_count = (u64 )stats->loss_sync_cnt; pfc_host_stat->loss_of_signal_count = (u64 )stats->loss_sig_cnt; pfc_host_stat->prim_seq_protocol_err_count = (u64 )stats->prim_seq_err_cnt; pfc_host_stat->invalid_tx_word_count = (u64 )stats->inval_xmit_word_cnt; pfc_host_stat->invalid_crc_count = (u64 )stats->inval_crc_cnt; if ((ha->device_type & 134217728U) != 0U) { pfc_host_stat->lip_count = (u64 )stats->lip_cnt; pfc_host_stat->tx_frames = (u64 )stats->tx_frames; pfc_host_stat->rx_frames = (u64 )stats->rx_frames; pfc_host_stat->dumped_frames = (u64 )stats->discarded_frames; pfc_host_stat->nos_count = (u64 )stats->nos_rcvd; pfc_host_stat->error_frames = (u64 )(stats->dropped_frames + stats->discarded_frames); pfc_host_stat->rx_words = vha->qla_stats.input_bytes; pfc_host_stat->tx_words = vha->qla_stats.output_bytes; } else { } pfc_host_stat->fcp_control_requests = (u64 )vha->qla_stats.control_requests; pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests; pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests; pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; tmp___7 = get_jiffies_64(); pfc_host_stat->seconds_since_last_reset = tmp___7 - vha->qla_stats.jiffies_at_last_reset; __base = 250U; __rem = (uint32_t )(pfc_host_stat->seconds_since_last_reset % (u64 )__base); pfc_host_stat->seconds_since_last_reset = pfc_host_stat->seconds_since_last_reset / (u64 )__base; done_free: dma_pool_free(ha->s_dma_pool, (void *)stats, stats_dma); done: ; return (pfc_host_stat); } } static void qla2x00_reset_host_stats(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; memset((void *)(& vha->fc_host_stat), 0, 232UL); vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); return; } } static void qla2x00_get_host_symbolic_name(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; qla2x00_get_sym_node_name(vha, (uint8_t *)(& ((struct fc_host_attrs *)shost->shost_data)->symbolic_name)); return; } } static void qla2x00_set_host_system_hostname(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; set_bit(12L, (unsigned long volatile *)(& vha->dpc_flags)); return; } } static void qla2x00_get_host_fabric_name(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; uint8_t node_name[8U] ; u64 fabric_name ; u64 tmp___0 ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; node_name[0] = 255U; node_name[1] = 255U; node_name[2] = 255U; node_name[3] = 255U; node_name[4] = 255U; node_name[5] = 255U; node_name[6] = 255U; node_name[7] = 255U; tmp___0 = wwn_to_u64((u8 *)(& node_name)); fabric_name = tmp___0; if ((int )vha->device_flags & 1) { fabric_name = wwn_to_u64((u8 *)(& vha->fabric_node_name)); } else { } ((struct fc_host_attrs *)shost->shost_data)->fabric_name = fabric_name; return; } } static void qla2x00_get_host_port_state(struct Scsi_Host *shost ) { scsi_qla_host_t *vha ; void *tmp ; struct scsi_qla_host *base_vha ; void *tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = shost_priv(shost); vha = (scsi_qla_host_t *)tmp; tmp___0 = pci_get_drvdata((vha->hw)->pdev); base_vha = (struct scsi_qla_host *)tmp___0; if (*((unsigned long *)base_vha + 19UL) == 0UL) { ((struct fc_host_attrs *)shost->shost_data)->port_state = 3; return; } else { } tmp___1 = atomic_read((atomic_t const *)(& base_vha->loop_state)); switch (tmp___1) { case 4: ((struct fc_host_attrs *)shost->shost_data)->port_state = 6; goto ldv_61525; case 2: tmp___2 = constant_test_bit(4L, (unsigned long const volatile *)(& base_vha->dpc_flags)); if (tmp___2 != 0) { ((struct fc_host_attrs *)shost->shost_data)->port_state = 6; } else { ((struct fc_host_attrs *)shost->shost_data)->port_state = 7; } goto ldv_61525; case 6: ((struct fc_host_attrs *)shost->shost_data)->port_state = 7; goto ldv_61525; case 5: ((struct fc_host_attrs *)shost->shost_data)->port_state = 2; goto ldv_61525; default: ((struct fc_host_attrs *)shost->shost_data)->port_state = 0; goto ldv_61525; } ldv_61525: ; return; } } static int qla24xx_vport_create(struct fc_vport *fc_vport , bool disable ) { int ret ; uint8_t qos ; scsi_qla_host_t *base_vha ; void *tmp ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; uint16_t options ; int cnt ; struct req_que *req ; int tmp___0 ; int tmp___1 ; int prot ; int guard ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { ret = 0; qos = 0U; tmp = shost_priv(fc_vport->shost); base_vha = (scsi_qla_host_t *)tmp; vha = (scsi_qla_host_t *)0; ha = base_vha->hw; options = 0U; req = *(ha->req_q_map); ret = qla24xx_vport_create_req_sanity_check(fc_vport); if (ret != 0) { ql_log(1U, vha, 28798, "Vport sanity check failed, status %x\n", ret); return (ret); } else { } vha = qla24xx_create_vhost(fc_vport); if ((unsigned long )vha == (unsigned long )((scsi_qla_host_t *)0)) { ql_log(1U, vha, 28799, "Vport create host failed.\n"); return (9); } else { } if ((int )disable) { atomic_set(& vha->vp_state, 0); fc_vport_set_state(fc_vport, 2); } else { atomic_set(& vha->vp_state, 2); } ql_log(2U, vha, 28800, "VP entry id %d assigned.\n", (int )vha->vp_idx); atomic_set(& vha->loop_state, 2); vha->vp_err_state = 1U; vha->vp_prev_err_state = 0U; tmp___0 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___0 == 2) { goto _L; } else { tmp___1 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___1 == 6) { _L: /* CIL Label */ ql_dbg(8388608U, vha, 28801, "Vport loop state is not UP.\n"); atomic_set(& vha->loop_state, 6); if (! disable) { fc_vport_set_state(fc_vport, 3); } else { } } else { } } if ((ha->device_type & 33554432U) != 0U && ql2xenabledif != 0) { if (((int )ha->fw_attributes & 16) != 0) { prot = 0; vha->flags.difdix_supported = 1U; ql_dbg(8388608U, vha, 28802, "Registered for DIF/DIX type 1 and 3 protection.\n"); if (ql2xenabledif == 1) { prot = 8; } else { } scsi_host_set_prot(vha->host, (unsigned int )(prot | 119)); guard = 1; if (((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) && ql2xenabledif > 1) { guard = guard | 2; } else { } scsi_host_set_guard(vha->host, (int )((unsigned char )guard)); } else { vha->flags.difdix_supported = 0U; } } else { } tmp___2 = ldv_scsi_add_host_with_dma_49(vha->host, & fc_vport->dev, & (ha->pdev)->dev); if (tmp___2 != 0) { ql_dbg(8388608U, vha, 28803, "scsi_add_host failure for VP[%d].\n", (int )vha->vp_idx); goto vport_create_failed_2; } else { } ((struct fc_host_attrs *)(vha->host)->shost_data)->dev_loss_tmo = (u32 )ha->port_down_retry_count; ((struct fc_host_attrs *)(vha->host)->shost_data)->node_name = wwn_to_u64((u8 *)(& vha->node_name)); ((struct fc_host_attrs *)(vha->host)->shost_data)->port_name = wwn_to_u64((u8 *)(& vha->port_name)); ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_classes = ((struct fc_host_attrs *)(base_vha->host)->shost_data)->supported_classes; ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_speeds = ((struct fc_host_attrs *)(base_vha->host)->shost_data)->supported_speeds; qlt_vport_create(vha, ha); qla24xx_vport_disable(fc_vport, (int )disable); if (*((unsigned long *)ha + 2UL) != 0UL) { req = *(ha->req_q_map + 1UL); ql_dbg(1048576U, vha, 49152, "Request queue %p attached with VP[%d], cpu affinity =%d\n", req, (int )vha->vp_idx, (int )ha->flags.cpu_affinity_enabled); goto vport_queue; } else if (ql2xmaxqueues == 1 || (unsigned long )ha->npiv_info == (unsigned long )((struct qla_npiv_entry *)0)) { goto vport_queue; } else { } cnt = 0; goto ldv_61548; ldv_61547: tmp___3 = memcmp((void const *)(& (ha->npiv_info + (unsigned long )cnt)->port_name), (void const *)(& vha->port_name), 8UL); if (tmp___3 == 0) { tmp___4 = memcmp((void const *)(& (ha->npiv_info + (unsigned long )cnt)->node_name), (void const *)(& vha->node_name), 8UL); if (tmp___4 == 0) { qos = (ha->npiv_info + (unsigned long )cnt)->q_qos; goto ldv_61546; } else { } } else { } cnt = cnt + 1; ldv_61548: ; if ((int )ha->nvram_npiv_size > cnt) { goto ldv_61547; } else { } ldv_61546: ; if ((unsigned int )qos != 0U) { ret = qla25xx_create_req_que(ha, (int )options, (int )((uint8_t )vha->vp_idx), 0, 0, (int )qos); if (ret == 0) { ql_log(1U, vha, 28804, "Can\'t create request queue for VP[%d]\n", (int )vha->vp_idx); } else { ql_dbg(1048576U, vha, 49153, "Request Que:%d Q0s: %d) created for VP[%d]\n", ret, (int )qos, (int )vha->vp_idx); ql_dbg(8388608U, vha, 28805, "Request Que:%d Q0s: %d) created for VP[%d]\n", ret, (int )qos, (int )vha->vp_idx); req = *(ha->req_q_map + (unsigned long )ret); } } else { } vport_queue: vha->req = req; return (0); vport_create_failed_2: qla24xx_disable_vp(vha); qla24xx_deallocate_vp_id(vha); scsi_host_put(vha->host); return (9); } } static int qla24xx_vport_delete(struct fc_vport *fc_vport ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; uint16_t id ; int tmp ; int tmp___0 ; int tmp___1 ; long tmp___2 ; int tmp___3 ; { vha = (scsi_qla_host_t *)fc_vport->dd_data; ha = vha->hw; id = vha->vp_idx; goto ldv_61556; ldv_61555: msleep(1000U); ldv_61556: tmp = constant_test_bit(5L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp != 0) { goto ldv_61555; } else { tmp___0 = constant_test_bit(13L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { goto ldv_61555; } else { goto ldv_61557; } } ldv_61557: qla24xx_disable_vp(vha); vha->flags.delete_progress = 1U; fc_remove_host(vha->host); ldv_scsi_remove_host_50(vha->host); qla24xx_deallocate_vp_id(vha); if (vha->timer_active != 0U) { qla2x00_vp_stop_timer(vha); ql_dbg(8388608U, vha, 28806, "Timer for the VP[%d] has stopped\n", (int )vha->vp_idx); } else { } tmp___1 = atomic_read((atomic_t const *)(& vha->vref_count)); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_attr.o.c.prepared"), "i" (2309), "i" (12UL)); ldv_61558: ; goto ldv_61558; } else { } qla2x00_free_fcports(vha); mutex_lock_nested(& ha->vport_lock, 0U); ha->cur_vport_count = ha->cur_vport_count - 1; clear_bit((long )vha->vp_idx, (unsigned long volatile *)(& ha->vp_idx_map)); mutex_unlock(& ha->vport_lock); if ((unsigned int )(vha->req)->id != 0U && *((unsigned long *)ha + 2UL) == 0UL) { tmp___3 = qla25xx_delete_req_que(vha, vha->req); if (tmp___3 != 0) { ql_log(1U, vha, 28807, "Queue delete failed.\n"); } else { } } else { } ql_log(2U, vha, 28808, "VP[%d] deleted.\n", (int )id); scsi_host_put(vha->host); return (0); } } static int qla24xx_vport_disable(struct fc_vport *fc_vport , bool disable ) { scsi_qla_host_t *vha ; { vha = (scsi_qla_host_t *)fc_vport->dd_data; if ((int )disable) { qla24xx_disable_vp(vha); } else { qla24xx_enable_vp(vha); } return (0); } } struct fc_function_template qla2xxx_transport_functions = {0, & qla2x00_set_rport_loss_tmo, & qla2x00_get_starget_node_name, & qla2x00_get_starget_port_name, & qla2x00_get_starget_port_id, & qla2x00_get_host_port_id, & qla2x00_get_host_port_type, & qla2x00_get_host_port_state, 0, & qla2x00_get_host_speed, & qla2x00_get_host_fabric_name, & qla2x00_get_host_symbolic_name, & qla2x00_set_host_system_hostname, & qla2x00_get_fc_host_stats, & qla2x00_reset_host_stats, & qla2x00_issue_lip, & qla2x00_dev_loss_tmo_callbk, & qla2x00_terminate_rport_io, 0, & qla24xx_vport_create, & qla24xx_vport_disable, & qla24xx_vport_delete, 0, 0, & qla24xx_bsg_request, & qla24xx_bsg_timeout, 8U, 0U, 0U, (unsigned char)0, 1U, 1U, 1U, 1U, 1U, 1U, 1U, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, (unsigned char)0, 1U, 1U, 1U, 1U, (unsigned char)0}; struct fc_function_template qla2xxx_transport_vport_functions = {0, & qla2x00_set_rport_loss_tmo, & qla2x00_get_starget_node_name, & qla2x00_get_starget_port_name, & qla2x00_get_starget_port_id, & qla2x00_get_host_port_id, & qla2x00_get_host_port_type, & qla2x00_get_host_port_state, 0, & qla2x00_get_host_speed, & qla2x00_get_host_fabric_name, & qla2x00_get_host_symbolic_name, & qla2x00_set_host_system_hostname, & qla2x00_get_fc_host_stats, & qla2x00_reset_host_stats, & qla2x00_issue_lip, & qla2x00_dev_loss_tmo_callbk, & qla2x00_terminate_rport_io, 0, 0, 0, 0, 0, 0, & qla24xx_bsg_request, & qla24xx_bsg_timeout, 8U, 0U, 0U, (unsigned char)0, 1U, 1U, 1U, 1U, 1U, 1U, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, (unsigned char)0, 1U, 1U, 1U, 1U, (unsigned char)0}; void qla2x00_init_host_attr(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; u32 speed ; { ha = vha->hw; speed = 0U; ((struct fc_host_attrs *)(vha->host)->shost_data)->dev_loss_tmo = (u32 )ha->port_down_retry_count; ((struct fc_host_attrs *)(vha->host)->shost_data)->node_name = wwn_to_u64((u8 *)(& vha->node_name)); ((struct fc_host_attrs *)(vha->host)->shost_data)->port_name = wwn_to_u64((u8 *)(& vha->port_name)); ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_classes = (unsigned int )*((unsigned char *)ha + 3808UL) != 0U ? 12U : 8U; ((struct fc_host_attrs *)(vha->host)->shost_data)->max_npiv_vports = ha->max_npiv_vports; ((struct fc_host_attrs *)(vha->host)->shost_data)->npiv_vports_inuse = (u16 )ha->cur_vport_count; if ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { speed = 4U; } else if ((ha->device_type & 32768U) != 0U) { speed = 56U; } else if ((ha->device_type & 2048U) != 0U) { speed = 27U; } else if ((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) { speed = 11U; } else if (((((ha->device_type & 4U) != 0U || (ha->device_type & 8U) != 0U) || (ha->device_type & 16U) != 0U) || (ha->device_type & 32U) != 0U) || (ha->device_type & 64U) != 0U) { speed = 3U; } else if ((ha->device_type & 131072U) != 0U) { speed = 27U; } else { speed = 1U; } ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_speeds = speed; return; } } extern int ldv_release_52(void) ; extern int ldv_release_58(void) ; extern int ldv_probe_56(void) ; extern int ldv_release_55(void) ; extern int ldv_probe_59(void) ; extern int ldv_probe_54(void) ; extern int ldv_release_54(void) ; extern int ldv_release_57(void) ; extern int ldv_probe_53(void) ; extern int ldv_release_53(void) ; extern int ldv_probe_51(void) ; extern int ldv_release_56(void) ; extern int ldv_probe_52(void) ; extern int ldv_release_59(void) ; extern int ldv_probe_55(void) ; extern int ldv_probe_58(void) ; extern int ldv_probe_57(void) ; extern int ldv_release_51(void) ; void ldv_initialize_bin_attribute_55(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(360UL); sysfs_vpd_attr_group0 = (struct file *)tmp; tmp___0 = ldv_zalloc(296UL); sysfs_vpd_attr_group2 = (struct kobject *)tmp___0; tmp___1 = ldv_zalloc(72UL); sysfs_vpd_attr_group1 = (struct bin_attribute *)tmp___1; return; } } int reg_timer_10(struct timer_list *timer ) { { ldv_timer_list_10 = timer; ldv_timer_state_10 = 1; return (0); } } void ldv_initialize_bin_attribute_58(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(360UL); sysfs_nvram_attr_group0 = (struct file *)tmp; tmp___0 = ldv_zalloc(296UL); sysfs_nvram_attr_group2 = (struct kobject *)tmp___0; tmp___1 = ldv_zalloc(72UL); sysfs_nvram_attr_group1 = (struct bin_attribute *)tmp___1; return; } } void ldv_initialize_bin_attribute_59(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(360UL); sysfs_fw_dump_attr_group0 = (struct file *)tmp; tmp___0 = ldv_zalloc(296UL); sysfs_fw_dump_attr_group2 = (struct kobject *)tmp___0; tmp___1 = ldv_zalloc(72UL); sysfs_fw_dump_attr_group1 = (struct bin_attribute *)tmp___1; return; } } void ldv_initialize_fc_function_template_19(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; { tmp = ldv_zalloc(3496UL); qla2xxx_transport_vport_functions_group0 = (struct Scsi_Host *)tmp; tmp___0 = ldv_zalloc(2168UL); qla2xxx_transport_vport_functions_group2 = (struct fc_rport *)tmp___0; tmp___1 = ldv_zalloc(184UL); qla2xxx_transport_vport_functions_group1 = (struct fc_bsg_job *)tmp___1; tmp___2 = ldv_zalloc(1552UL); qla2xxx_transport_vport_functions_group3 = (struct scsi_target *)tmp___2; return; } } void activate_pending_timer_10(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_10 == (unsigned long )timer) { if (ldv_timer_state_10 == 2 || pending_flag != 0) { ldv_timer_list_10 = timer; ldv_timer_list_10->data = data; ldv_timer_state_10 = 1; } else { } return; } else { } reg_timer_10(timer); ldv_timer_list_10->data = data; return; } } void ldv_initialize_device_attribute_41(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_zalloc(1376UL); dev_attr_zio_group0 = (struct device *)tmp; tmp___0 = ldv_zalloc(48UL); dev_attr_zio_group1 = (struct device_attribute *)tmp___0; return; } } void ldv_initialize_fc_function_template_20(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; void *tmp___3 ; { tmp = ldv_zalloc(3496UL); qla2xxx_transport_functions_group0 = (struct Scsi_Host *)tmp; tmp___0 = ldv_zalloc(2168UL); qla2xxx_transport_functions_group2 = (struct fc_rport *)tmp___0; tmp___1 = ldv_zalloc(184UL); qla2xxx_transport_functions_group1 = (struct fc_bsg_job *)tmp___1; tmp___2 = ldv_zalloc(1552UL); qla2xxx_transport_functions_group3 = (struct scsi_target *)tmp___2; tmp___3 = ldv_zalloc(1608UL); qla2xxx_transport_functions_group4 = (struct fc_vport *)tmp___3; return; } } void disable_suitable_timer_10(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_10) { ldv_timer_state_10 = 0; return; } else { } return; } } void ldv_initialize_device_attribute_40(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_zalloc(1376UL); dev_attr_zio_timer_group0 = (struct device *)tmp; tmp___0 = ldv_zalloc(48UL); dev_attr_zio_timer_group1 = (struct device_attribute *)tmp___0; return; } } void ldv_initialize_device_attribute_39(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_zalloc(1376UL); dev_attr_beacon_group0 = (struct device *)tmp; tmp___0 = ldv_zalloc(48UL); dev_attr_beacon_group1 = (struct device_attribute *)tmp___0; return; } } void choose_timer_10(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_10 = 2; return; } } void ldv_initialize_bin_attribute_57(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_zalloc(360UL); sysfs_optrom_attr_group0 = (struct file *)tmp; tmp___0 = ldv_zalloc(296UL); sysfs_optrom_attr_group2 = (struct kobject *)tmp___0; tmp___1 = ldv_zalloc(72UL); sysfs_optrom_attr_group1 = (struct bin_attribute *)tmp___1; return; } } void ldv_main_exported_33(void) { struct device_attribute *ldvarg2 ; void *tmp ; struct device *ldvarg0 ; void *tmp___0 ; char *ldvarg1 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(48UL); ldvarg2 = (struct device_attribute *)tmp; tmp___0 = ldv_zalloc(1376UL); ldvarg0 = (struct device *)tmp___0; tmp___1 = ldv_zalloc(1UL); ldvarg1 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_33 == 1) { qla24xx_84xx_fw_version_show(ldvarg0, ldvarg2, ldvarg1); ldv_state_variable_33 = 1; } else { } goto ldv_61655; default: ldv_stop(); } ldv_61655: ; return; } } void ldv_main_exported_32(void) { struct device_attribute *ldvarg5 ; void *tmp ; struct device *ldvarg3 ; void *tmp___0 ; char *ldvarg4 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(48UL); ldvarg5 = (struct device_attribute *)tmp; tmp___0 = ldv_zalloc(1376UL); ldvarg3 = (struct device *)tmp___0; tmp___1 = ldv_zalloc(1UL); ldvarg4 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_32 == 1) { qla2x00_total_isp_aborts_show(ldvarg3, ldvarg5, ldvarg4); ldv_state_variable_32 = 1; } else { } goto ldv_61664; default: ldv_stop(); } ldv_61664: ; return; } } void ldv_main_exported_21(void) { struct device *ldvarg33 ; void *tmp ; char *ldvarg34 ; void *tmp___0 ; struct device_attribute *ldvarg35 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1376UL); ldvarg33 = (struct device *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg34 = (char *)tmp___0; tmp___1 = ldv_zalloc(48UL); ldvarg35 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_21 == 1) { qla2x00_fw_dump_size_show(ldvarg33, ldvarg35, ldvarg34); ldv_state_variable_21 = 1; } else { } goto ldv_61673; default: ldv_stop(); } ldv_61673: ; return; } } void ldv_main_exported_26(void) { char *ldvarg80 ; void *tmp ; struct device *ldvarg79 ; void *tmp___0 ; struct device_attribute *ldvarg81 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1UL); ldvarg80 = (char *)tmp; tmp___0 = ldv_zalloc(1376UL); ldvarg79 = (struct device *)tmp___0; tmp___1 = ldv_zalloc(48UL); ldvarg81 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_26 == 1) { qla2x00_fabric_param_show(ldvarg79, ldvarg81, ldvarg80); ldv_state_variable_26 = 1; } else { } goto ldv_61682; default: ldv_stop(); } ldv_61682: ; return; } } void ldv_main_exported_30(void) { char *ldvarg386 ; void *tmp ; struct device_attribute *ldvarg387 ; void *tmp___0 ; struct device *ldvarg385 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1UL); ldvarg386 = (char *)tmp; tmp___0 = ldv_zalloc(48UL); ldvarg387 = (struct device_attribute *)tmp___0; tmp___1 = ldv_zalloc(1376UL); ldvarg385 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_30 == 1) { qla2x00_phy_version_show(ldvarg385, ldvarg387, ldvarg386); ldv_state_variable_30 = 1; } else { } goto ldv_61691; default: ldv_stop(); } ldv_61691: ; return; } } void ldv_main_exported_44(void) { char *ldvarg92 ; void *tmp ; struct device *ldvarg91 ; void *tmp___0 ; struct device_attribute *ldvarg93 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1UL); ldvarg92 = (char *)tmp; tmp___0 = ldv_zalloc(1376UL); ldvarg91 = (struct device *)tmp___0; tmp___1 = ldv_zalloc(48UL); ldvarg93 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_44 == 1) { qla2x00_model_desc_show(ldvarg91, ldvarg93, ldvarg92); ldv_state_variable_44 = 1; } else { } goto ldv_61700; default: ldv_stop(); } ldv_61700: ; return; } } void ldv_main_exported_55(void) { loff_t ldvarg99 ; loff_t tmp ; loff_t ldvarg96 ; loff_t tmp___0 ; size_t ldvarg94 ; size_t tmp___1 ; size_t ldvarg97 ; size_t tmp___2 ; char *ldvarg95 ; void *tmp___3 ; char *ldvarg98 ; void *tmp___4 ; int tmp___5 ; { tmp = __VERIFIER_nondet_loff_t(); ldvarg99 = tmp; tmp___0 = __VERIFIER_nondet_loff_t(); ldvarg96 = tmp___0; tmp___1 = __VERIFIER_nondet_size_t(); ldvarg94 = tmp___1; tmp___2 = __VERIFIER_nondet_size_t(); ldvarg97 = tmp___2; tmp___3 = ldv_zalloc(1UL); ldvarg95 = (char *)tmp___3; tmp___4 = ldv_zalloc(1UL); ldvarg98 = (char *)tmp___4; tmp___5 = __VERIFIER_nondet_int(); switch (tmp___5) { case 0: ; if (ldv_state_variable_55 == 2) { qla2x00_sysfs_write_vpd(sysfs_vpd_attr_group0, sysfs_vpd_attr_group2, sysfs_vpd_attr_group1, ldvarg98, ldvarg99, ldvarg97); ldv_state_variable_55 = 2; } else { } goto ldv_61712; case 1: ; if (ldv_state_variable_55 == 2) { qla2x00_sysfs_read_vpd(sysfs_vpd_attr_group0, sysfs_vpd_attr_group2, sysfs_vpd_attr_group1, ldvarg95, ldvarg96, ldvarg94); ldv_state_variable_55 = 2; } else { } goto ldv_61712; case 2: ; if (ldv_state_variable_55 == 2) { ldv_release_55(); ldv_state_variable_55 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_61712; case 3: ; if (ldv_state_variable_55 == 1) { ldv_probe_55(); ldv_state_variable_55 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_61712; default: ldv_stop(); } ldv_61712: ; return; } } void ldv_main_exported_25(void) { char *ldvarg389 ; void *tmp ; struct device *ldvarg388 ; void *tmp___0 ; struct device_attribute *ldvarg390 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1UL); ldvarg389 = (char *)tmp; tmp___0 = ldv_zalloc(1376UL); ldvarg388 = (struct device *)tmp___0; tmp___1 = ldv_zalloc(48UL); ldvarg390 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_25 == 1) { qla2x00_fw_state_show(ldvarg388, ldvarg390, ldvarg389); ldv_state_variable_25 = 1; } else { } goto ldv_61724; default: ldv_stop(); } ldv_61724: ; return; } } void ldv_main_exported_27(void) { struct device *ldvarg100 ; void *tmp ; char *ldvarg101 ; void *tmp___0 ; struct device_attribute *ldvarg102 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1376UL); ldvarg100 = (struct device *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg101 = (char *)tmp___0; tmp___1 = ldv_zalloc(48UL); ldvarg102 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_27 == 1) { qla2x00_vn_port_mac_address_show(ldvarg100, ldvarg102, ldvarg101); ldv_state_variable_27 = 1; } else { } goto ldv_61733; default: ldv_stop(); } ldv_61733: ; return; } } void ldv_main_exported_28(void) { struct device *ldvarg391 ; void *tmp ; char *ldvarg392 ; void *tmp___0 ; struct device_attribute *ldvarg393 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1376UL); ldvarg391 = (struct device *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg392 = (char *)tmp___0; tmp___1 = ldv_zalloc(48UL); ldvarg393 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_28 == 1) { qla2x00_vlan_id_show(ldvarg391, ldvarg393, ldvarg392); ldv_state_variable_28 = 1; } else { } goto ldv_61742; default: ldv_stop(); } ldv_61742: ; return; } } void ldv_main_exported_57(void) { size_t ldvarg103 ; size_t tmp ; char *ldvarg107 ; void *tmp___0 ; loff_t ldvarg105 ; loff_t tmp___1 ; size_t ldvarg106 ; size_t tmp___2 ; loff_t ldvarg108 ; loff_t tmp___3 ; char *ldvarg104 ; void *tmp___4 ; int tmp___5 ; { tmp = __VERIFIER_nondet_size_t(); ldvarg103 = tmp; tmp___0 = ldv_zalloc(1UL); ldvarg107 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_loff_t(); ldvarg105 = tmp___1; tmp___2 = __VERIFIER_nondet_size_t(); ldvarg106 = tmp___2; tmp___3 = __VERIFIER_nondet_loff_t(); ldvarg108 = tmp___3; tmp___4 = ldv_zalloc(1UL); ldvarg104 = (char *)tmp___4; tmp___5 = __VERIFIER_nondet_int(); switch (tmp___5) { case 0: ; if (ldv_state_variable_57 == 2) { qla2x00_sysfs_write_optrom(sysfs_optrom_attr_group0, sysfs_optrom_attr_group2, sysfs_optrom_attr_group1, ldvarg107, ldvarg108, ldvarg106); ldv_state_variable_57 = 2; } else { } goto ldv_61754; case 1: ; if (ldv_state_variable_57 == 2) { qla2x00_sysfs_read_optrom(sysfs_optrom_attr_group0, sysfs_optrom_attr_group2, sysfs_optrom_attr_group1, ldvarg104, ldvarg105, ldvarg103); ldv_state_variable_57 = 2; } else { } goto ldv_61754; case 2: ; if (ldv_state_variable_57 == 2) { ldv_release_57(); ldv_state_variable_57 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_61754; case 3: ; if (ldv_state_variable_57 == 1) { ldv_probe_57(); ldv_state_variable_57 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_61754; default: ldv_stop(); } ldv_61754: ; return; } } void ldv_main_exported_40(void) { char *ldvarg394 ; void *tmp ; char *ldvarg396 ; void *tmp___0 ; size_t ldvarg395 ; size_t tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1UL); ldvarg394 = (char *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg396 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_size_t(); ldvarg395 = tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_40 == 1) { qla2x00_zio_timer_store(dev_attr_zio_timer_group0, dev_attr_zio_timer_group1, (char const *)ldvarg396, ldvarg395); ldv_state_variable_40 = 1; } else { } goto ldv_61766; case 1: ; if (ldv_state_variable_40 == 1) { qla2x00_zio_timer_show(dev_attr_zio_timer_group0, dev_attr_zio_timer_group1, ldvarg394); ldv_state_variable_40 = 1; } else { } goto ldv_61766; default: ldv_stop(); } ldv_61766: ; return; } } void ldv_main_exported_20(void) { u32 ldvarg112 ; u32 tmp ; bool ldvarg110 ; bool ldvarg111 ; int tmp___0 ; { tmp = __VERIFIER_nondet_u32(); ldvarg112 = tmp; memset((void *)(& ldvarg110), 0, 1UL); memset((void *)(& ldvarg111), 0, 1UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_20 == 1) { qla2x00_issue_lip(qla2xxx_transport_functions_group0); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 1: ; if (ldv_state_variable_20 == 1) { qla2x00_set_rport_loss_tmo(qla2xxx_transport_functions_group2, ldvarg112); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 2: ; if (ldv_state_variable_20 == 1) { qla2x00_get_fc_host_stats(qla2xxx_transport_functions_group0); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 3: ; if (ldv_state_variable_20 == 1) { qla2x00_get_host_port_type(qla2xxx_transport_functions_group0); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 4: ; if (ldv_state_variable_20 == 1) { qla24xx_bsg_timeout(qla2xxx_transport_functions_group1); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 5: ; if (ldv_state_variable_20 == 1) { qla24xx_vport_delete(qla2xxx_transport_functions_group4); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 6: ; if (ldv_state_variable_20 == 1) { qla24xx_bsg_request(qla2xxx_transport_functions_group1); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 7: ; if (ldv_state_variable_20 == 1) { qla24xx_vport_disable(qla2xxx_transport_functions_group4, (int )ldvarg111); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 8: ; if (ldv_state_variable_20 == 1) { qla2x00_terminate_rport_io(qla2xxx_transport_functions_group2); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 9: ; if (ldv_state_variable_20 == 1) { qla2x00_get_host_port_state(qla2xxx_transport_functions_group0); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 10: ; if (ldv_state_variable_20 == 1) { qla2x00_get_starget_node_name(qla2xxx_transport_functions_group3); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 11: ; if (ldv_state_variable_20 == 1) { qla2x00_get_host_speed(qla2xxx_transport_functions_group0); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 12: ; if (ldv_state_variable_20 == 1) { qla2x00_get_starget_port_id(qla2xxx_transport_functions_group3); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 13: ; if (ldv_state_variable_20 == 1) { qla2x00_get_starget_port_name(qla2xxx_transport_functions_group3); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 14: ; if (ldv_state_variable_20 == 1) { qla2x00_dev_loss_tmo_callbk(qla2xxx_transport_functions_group2); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 15: ; if (ldv_state_variable_20 == 1) { qla2x00_reset_host_stats(qla2xxx_transport_functions_group0); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 16: ; if (ldv_state_variable_20 == 1) { qla2x00_get_host_symbolic_name(qla2xxx_transport_functions_group0); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 17: ; if (ldv_state_variable_20 == 1) { qla24xx_vport_create(qla2xxx_transport_functions_group4, (int )ldvarg110); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 18: ; if (ldv_state_variable_20 == 1) { qla2x00_set_host_system_hostname(qla2xxx_transport_functions_group0); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 19: ; if (ldv_state_variable_20 == 1) { qla2x00_get_host_fabric_name(qla2xxx_transport_functions_group0); ldv_state_variable_20 = 1; } else { } goto ldv_61776; case 20: ; if (ldv_state_variable_20 == 1) { qla2x00_get_host_port_id(qla2xxx_transport_functions_group0); ldv_state_variable_20 = 1; } else { } goto ldv_61776; default: ldv_stop(); } ldv_61776: ; return; } } void ldv_main_exported_59(void) { char *ldvarg437 ; void *tmp ; size_t ldvarg436 ; size_t tmp___0 ; char *ldvarg440 ; void *tmp___1 ; loff_t ldvarg438 ; loff_t tmp___2 ; size_t ldvarg439 ; size_t tmp___3 ; loff_t ldvarg441 ; loff_t tmp___4 ; int tmp___5 ; { tmp = ldv_zalloc(1UL); ldvarg437 = (char *)tmp; tmp___0 = __VERIFIER_nondet_size_t(); ldvarg436 = tmp___0; tmp___1 = ldv_zalloc(1UL); ldvarg440 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_loff_t(); ldvarg438 = tmp___2; tmp___3 = __VERIFIER_nondet_size_t(); ldvarg439 = tmp___3; tmp___4 = __VERIFIER_nondet_loff_t(); ldvarg441 = tmp___4; tmp___5 = __VERIFIER_nondet_int(); switch (tmp___5) { case 0: ; if (ldv_state_variable_59 == 2) { qla2x00_sysfs_write_fw_dump(sysfs_fw_dump_attr_group0, sysfs_fw_dump_attr_group2, sysfs_fw_dump_attr_group1, ldvarg440, ldvarg441, ldvarg439); ldv_state_variable_59 = 2; } else { } goto ldv_61808; case 1: ; if (ldv_state_variable_59 == 2) { qla2x00_sysfs_read_fw_dump(sysfs_fw_dump_attr_group0, sysfs_fw_dump_attr_group2, sysfs_fw_dump_attr_group1, ldvarg437, ldvarg438, ldvarg436); ldv_state_variable_59 = 2; } else { } goto ldv_61808; case 2: ; if (ldv_state_variable_59 == 2) { ldv_release_59(); ldv_state_variable_59 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_61808; case 3: ; if (ldv_state_variable_59 == 1) { ldv_probe_59(); ldv_state_variable_59 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_61808; default: ldv_stop(); } ldv_61808: ; return; } } void ldv_main_exported_49(void) { struct device *ldvarg442 ; void *tmp ; char *ldvarg443 ; void *tmp___0 ; struct device_attribute *ldvarg444 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1376UL); ldvarg442 = (struct device *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg443 = (char *)tmp___0; tmp___1 = ldv_zalloc(48UL); ldvarg444 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_49 == 1) { qla2x00_fw_version_show(ldvarg442, ldvarg444, ldvarg443); ldv_state_variable_49 = 1; } else { } goto ldv_61820; default: ldv_stop(); } ldv_61820: ; return; } } void ldv_main_exported_24(void) { char *ldvarg446 ; void *tmp ; struct device *ldvarg445 ; void *tmp___0 ; struct device_attribute *ldvarg447 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1UL); ldvarg446 = (char *)tmp; tmp___0 = ldv_zalloc(1376UL); ldvarg445 = (struct device *)tmp___0; tmp___1 = ldv_zalloc(48UL); ldvarg447 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_24 == 1) { qla2x00_thermal_temp_show(ldvarg445, ldvarg447, ldvarg446); ldv_state_variable_24 = 1; } else { } goto ldv_61829; default: ldv_stop(); } ldv_61829: ; return; } } void ldv_main_exported_31(void) { struct device_attribute *ldvarg115 ; void *tmp ; char *ldvarg114 ; void *tmp___0 ; struct device *ldvarg113 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(48UL); ldvarg115 = (struct device_attribute *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg114 = (char *)tmp___0; tmp___1 = ldv_zalloc(1376UL); ldvarg113 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_31 == 1) { qla2x00_mpi_version_show(ldvarg113, ldvarg115, ldvarg114); ldv_state_variable_31 = 1; } else { } goto ldv_61838; default: ldv_stop(); } ldv_61838: ; return; } } void ldv_main_exported_35(void) { struct device_attribute *ldvarg118 ; void *tmp ; char *ldvarg117 ; void *tmp___0 ; struct device *ldvarg116 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(48UL); ldvarg118 = (struct device_attribute *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg117 = (char *)tmp___0; tmp___1 = ldv_zalloc(1376UL); ldvarg116 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_35 == 1) { qla2x00_optrom_fw_version_show(ldvarg116, ldvarg118, ldvarg117); ldv_state_variable_35 = 1; } else { } goto ldv_61847; default: ldv_stop(); } ldv_61847: ; return; } } void ldv_main_exported_53(void) { struct bin_attribute *ldvarg451 ; void *tmp ; struct file *ldvarg450 ; void *tmp___0 ; char *ldvarg449 ; void *tmp___1 ; struct kobject *ldvarg452 ; void *tmp___2 ; loff_t ldvarg453 ; loff_t tmp___3 ; size_t ldvarg448 ; size_t tmp___4 ; int tmp___5 ; { tmp = ldv_zalloc(72UL); ldvarg451 = (struct bin_attribute *)tmp; tmp___0 = ldv_zalloc(360UL); ldvarg450 = (struct file *)tmp___0; tmp___1 = ldv_zalloc(1UL); ldvarg449 = (char *)tmp___1; tmp___2 = ldv_zalloc(296UL); ldvarg452 = (struct kobject *)tmp___2; tmp___3 = __VERIFIER_nondet_loff_t(); ldvarg453 = tmp___3; tmp___4 = __VERIFIER_nondet_size_t(); ldvarg448 = tmp___4; tmp___5 = __VERIFIER_nondet_int(); switch (tmp___5) { case 0: ; if (ldv_state_variable_53 == 2) { qla2x00_sysfs_write_reset(ldvarg450, ldvarg452, ldvarg451, ldvarg449, ldvarg453, ldvarg448); ldv_state_variable_53 = 2; } else { } goto ldv_61859; case 1: ; if (ldv_state_variable_53 == 2) { ldv_release_53(); ldv_state_variable_53 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_61859; case 2: ; if (ldv_state_variable_53 == 1) { ldv_probe_53(); ldv_state_variable_53 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_61859; default: ldv_stop(); } ldv_61859: ; return; } } void ldv_main_exported_48(void) { struct device *ldvarg119 ; void *tmp ; char *ldvarg120 ; void *tmp___0 ; struct device_attribute *ldvarg121 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1376UL); ldvarg119 = (struct device *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg120 = (char *)tmp___0; tmp___1 = ldv_zalloc(48UL); ldvarg121 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_48 == 1) { qla2x00_serial_num_show(ldvarg119, ldvarg121, ldvarg120); ldv_state_variable_48 = 1; } else { } goto ldv_61870; default: ldv_stop(); } ldv_61870: ; return; } } void ldv_main_exported_22(void) { struct device *ldvarg454 ; void *tmp ; char *ldvarg455 ; void *tmp___0 ; struct device_attribute *ldvarg456 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1376UL); ldvarg454 = (struct device *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg455 = (char *)tmp___0; tmp___1 = ldv_zalloc(48UL); ldvarg456 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_22 == 1) { qla2x00_diag_megabytes_show(ldvarg454, ldvarg456, ldvarg455); ldv_state_variable_22 = 1; } else { } goto ldv_61879; default: ldv_stop(); } ldv_61879: ; return; } } void ldv_main_exported_42(void) { struct device *ldvarg457 ; void *tmp ; struct device_attribute *ldvarg459 ; void *tmp___0 ; char *ldvarg458 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1376UL); ldvarg457 = (struct device *)tmp; tmp___0 = ldv_zalloc(48UL); ldvarg459 = (struct device_attribute *)tmp___0; tmp___1 = ldv_zalloc(1UL); ldvarg458 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_42 == 1) { qla2x00_link_state_show(ldvarg457, ldvarg459, ldvarg458); ldv_state_variable_42 = 1; } else { } goto ldv_61888; default: ldv_stop(); } ldv_61888: ; return; } } void ldv_main_exported_46(void) { struct device_attribute *ldvarg462 ; void *tmp ; char *ldvarg461 ; void *tmp___0 ; struct device *ldvarg460 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(48UL); ldvarg462 = (struct device_attribute *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg461 = (char *)tmp___0; tmp___1 = ldv_zalloc(1376UL); ldvarg460 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_46 == 1) { qla2x00_isp_id_show(ldvarg460, ldvarg462, ldvarg461); ldv_state_variable_46 = 1; } else { } goto ldv_61897; default: ldv_stop(); } ldv_61897: ; return; } } void ldv_main_exported_23(void) { char *ldvarg464 ; void *tmp ; struct device *ldvarg463 ; void *tmp___0 ; struct device_attribute *ldvarg465 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1UL); ldvarg464 = (char *)tmp; tmp___0 = ldv_zalloc(1376UL); ldvarg463 = (struct device *)tmp___0; tmp___1 = ldv_zalloc(48UL); ldvarg465 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_23 == 1) { qla2x00_diag_requests_show(ldvarg463, ldvarg465, ldvarg464); ldv_state_variable_23 = 1; } else { } goto ldv_61906; default: ldv_stop(); } ldv_61906: ; return; } } void ldv_main_exported_29(void) { struct device *ldvarg155 ; void *tmp ; struct device_attribute *ldvarg157 ; void *tmp___0 ; char *ldvarg156 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1376UL); ldvarg155 = (struct device *)tmp; tmp___0 = ldv_zalloc(48UL); ldvarg157 = (struct device_attribute *)tmp___0; tmp___1 = ldv_zalloc(1UL); ldvarg156 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_29 == 1) { qla2x00_flash_block_size_show(ldvarg155, ldvarg157, ldvarg156); ldv_state_variable_29 = 1; } else { } goto ldv_61915; default: ldv_stop(); } ldv_61915: ; return; } } void ldv_main_exported_50(void) { struct device *ldvarg158 ; void *tmp ; struct device_attribute *ldvarg160 ; void *tmp___0 ; char *ldvarg159 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1376UL); ldvarg158 = (struct device *)tmp; tmp___0 = ldv_zalloc(48UL); ldvarg160 = (struct device_attribute *)tmp___0; tmp___1 = ldv_zalloc(1UL); ldvarg159 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_50 == 1) { qla2x00_drvr_version_show(ldvarg158, ldvarg160, ldvarg159); ldv_state_variable_50 = 1; } else { } goto ldv_61924; default: ldv_stop(); } ldv_61924: ; return; } } void ldv_main_exported_39(void) { char *ldvarg161 ; void *tmp ; size_t ldvarg162 ; size_t tmp___0 ; char *ldvarg163 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1UL); ldvarg161 = (char *)tmp; tmp___0 = __VERIFIER_nondet_size_t(); ldvarg162 = tmp___0; tmp___1 = ldv_zalloc(1UL); ldvarg163 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_39 == 1) { qla2x00_beacon_store(dev_attr_beacon_group0, dev_attr_beacon_group1, (char const *)ldvarg163, ldvarg162); ldv_state_variable_39 = 1; } else { } goto ldv_61933; case 1: ; if (ldv_state_variable_39 == 1) { qla2x00_beacon_show(dev_attr_beacon_group0, dev_attr_beacon_group1, ldvarg161); ldv_state_variable_39 = 1; } else { } goto ldv_61933; default: ldv_stop(); } ldv_61933: ; return; } } void ldv_main_exported_36(void) { struct device *ldvarg466 ; void *tmp ; struct device_attribute *ldvarg468 ; void *tmp___0 ; char *ldvarg467 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1376UL); ldvarg466 = (struct device *)tmp; tmp___0 = ldv_zalloc(48UL); ldvarg468 = (struct device_attribute *)tmp___0; tmp___1 = ldv_zalloc(1UL); ldvarg467 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_36 == 1) { qla2x00_optrom_fcode_version_show(ldvarg466, ldvarg468, ldvarg467); ldv_state_variable_36 = 1; } else { } goto ldv_61943; default: ldv_stop(); } ldv_61943: ; return; } } void ldv_main_exported_51(void) { struct kobject *ldvarg473 ; void *tmp ; char *ldvarg470 ; void *tmp___0 ; struct bin_attribute *ldvarg472 ; void *tmp___1 ; struct file *ldvarg471 ; void *tmp___2 ; loff_t ldvarg474 ; loff_t tmp___3 ; size_t ldvarg469 ; size_t tmp___4 ; int tmp___5 ; { tmp = ldv_zalloc(296UL); ldvarg473 = (struct kobject *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg470 = (char *)tmp___0; tmp___1 = ldv_zalloc(72UL); ldvarg472 = (struct bin_attribute *)tmp___1; tmp___2 = ldv_zalloc(360UL); ldvarg471 = (struct file *)tmp___2; tmp___3 = __VERIFIER_nondet_loff_t(); ldvarg474 = tmp___3; tmp___4 = __VERIFIER_nondet_size_t(); ldvarg469 = tmp___4; tmp___5 = __VERIFIER_nondet_int(); switch (tmp___5) { case 0: ; if (ldv_state_variable_51 == 2) { qla2x00_sysfs_read_dcbx_tlv(ldvarg471, ldvarg473, ldvarg472, ldvarg470, ldvarg474, ldvarg469); ldv_state_variable_51 = 2; } else { } goto ldv_61955; case 1: ; if (ldv_state_variable_51 == 2) { ldv_release_51(); ldv_state_variable_51 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_61955; case 2: ; if (ldv_state_variable_51 == 1) { ldv_probe_51(); ldv_state_variable_51 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_61955; default: ldv_stop(); } ldv_61955: ; return; } } void ldv_main_exported_58(void) { loff_t ldvarg202 ; loff_t tmp ; char *ldvarg201 ; void *tmp___0 ; loff_t ldvarg199 ; loff_t tmp___1 ; char *ldvarg198 ; void *tmp___2 ; size_t ldvarg197 ; size_t tmp___3 ; size_t ldvarg200 ; size_t tmp___4 ; int tmp___5 ; { tmp = __VERIFIER_nondet_loff_t(); ldvarg202 = tmp; tmp___0 = ldv_zalloc(1UL); ldvarg201 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_loff_t(); ldvarg199 = tmp___1; tmp___2 = ldv_zalloc(1UL); ldvarg198 = (char *)tmp___2; tmp___3 = __VERIFIER_nondet_size_t(); ldvarg197 = tmp___3; tmp___4 = __VERIFIER_nondet_size_t(); ldvarg200 = tmp___4; tmp___5 = __VERIFIER_nondet_int(); switch (tmp___5) { case 0: ; if (ldv_state_variable_58 == 2) { qla2x00_sysfs_write_nvram(sysfs_nvram_attr_group0, sysfs_nvram_attr_group2, sysfs_nvram_attr_group1, ldvarg201, ldvarg202, ldvarg200); ldv_state_variable_58 = 2; } else { } goto ldv_61969; case 1: ; if (ldv_state_variable_58 == 2) { qla2x00_sysfs_read_nvram(sysfs_nvram_attr_group0, sysfs_nvram_attr_group2, sysfs_nvram_attr_group1, ldvarg198, ldvarg199, ldvarg197); ldv_state_variable_58 = 2; } else { } goto ldv_61969; case 2: ; if (ldv_state_variable_58 == 2) { ldv_release_58(); ldv_state_variable_58 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_61969; case 3: ; if (ldv_state_variable_58 == 1) { ldv_probe_58(); ldv_state_variable_58 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_61969; default: ldv_stop(); } ldv_61969: ; return; } } void ldv_main_exported_41(void) { char *ldvarg205 ; void *tmp ; char *ldvarg203 ; void *tmp___0 ; size_t ldvarg204 ; size_t tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1UL); ldvarg205 = (char *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg203 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_size_t(); ldvarg204 = tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_41 == 1) { qla2x00_zio_store(dev_attr_zio_group0, dev_attr_zio_group1, (char const *)ldvarg205, ldvarg204); ldv_state_variable_41 = 1; } else { } goto ldv_61981; case 1: ; if (ldv_state_variable_41 == 1) { qla2x00_zio_show(dev_attr_zio_group0, dev_attr_zio_group1, ldvarg203); ldv_state_variable_41 = 1; } else { } goto ldv_61981; default: ldv_stop(); } ldv_61981: ; return; } } void ldv_main_exported_47(void) { char *ldvarg476 ; void *tmp ; struct device *ldvarg475 ; void *tmp___0 ; struct device_attribute *ldvarg477 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1UL); ldvarg476 = (char *)tmp; tmp___0 = ldv_zalloc(1376UL); ldvarg475 = (struct device *)tmp___0; tmp___1 = ldv_zalloc(48UL); ldvarg477 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_47 == 1) { qla2x00_isp_name_show(ldvarg475, ldvarg477, ldvarg476); ldv_state_variable_47 = 1; } else { } goto ldv_61991; default: ldv_stop(); } ldv_61991: ; return; } } void ldv_main_exported_38(void) { char *ldvarg479 ; void *tmp ; struct device_attribute *ldvarg480 ; void *tmp___0 ; struct device *ldvarg478 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1UL); ldvarg479 = (char *)tmp; tmp___0 = ldv_zalloc(48UL); ldvarg480 = (struct device_attribute *)tmp___0; tmp___1 = ldv_zalloc(1376UL); ldvarg478 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_38 == 1) { qla2x00_optrom_bios_version_show(ldvarg478, ldvarg480, ldvarg479); ldv_state_variable_38 = 1; } else { } goto ldv_62000; default: ldv_stop(); } ldv_62000: ; return; } } void ldv_main_exported_52(void) { struct kobject *ldvarg210 ; void *tmp ; size_t ldvarg206 ; size_t tmp___0 ; char *ldvarg207 ; void *tmp___1 ; struct file *ldvarg208 ; void *tmp___2 ; loff_t ldvarg211 ; loff_t tmp___3 ; struct bin_attribute *ldvarg209 ; void *tmp___4 ; int tmp___5 ; { tmp = ldv_zalloc(296UL); ldvarg210 = (struct kobject *)tmp; tmp___0 = __VERIFIER_nondet_size_t(); ldvarg206 = tmp___0; tmp___1 = ldv_zalloc(1UL); ldvarg207 = (char *)tmp___1; tmp___2 = ldv_zalloc(360UL); ldvarg208 = (struct file *)tmp___2; tmp___3 = __VERIFIER_nondet_loff_t(); ldvarg211 = tmp___3; tmp___4 = ldv_zalloc(72UL); ldvarg209 = (struct bin_attribute *)tmp___4; tmp___5 = __VERIFIER_nondet_int(); switch (tmp___5) { case 0: ; if (ldv_state_variable_52 == 2) { qla2x00_sysfs_read_xgmac_stats(ldvarg208, ldvarg210, ldvarg209, ldvarg207, ldvarg211, ldvarg206); ldv_state_variable_52 = 2; } else { } goto ldv_62012; case 1: ; if (ldv_state_variable_52 == 2) { ldv_release_52(); ldv_state_variable_52 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_62012; case 2: ; if (ldv_state_variable_52 == 1) { ldv_probe_52(); ldv_state_variable_52 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_62012; default: ldv_stop(); } ldv_62012: ; return; } } void ldv_main_exported_34(void) { struct device_attribute *ldvarg483 ; void *tmp ; char *ldvarg482 ; void *tmp___0 ; struct device *ldvarg481 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(48UL); ldvarg483 = (struct device_attribute *)tmp; tmp___0 = ldv_zalloc(1UL); ldvarg482 = (char *)tmp___0; tmp___1 = ldv_zalloc(1376UL); ldvarg481 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_34 == 1) { qla2x00_optrom_gold_fw_version_show(ldvarg481, ldvarg483, ldvarg482); ldv_state_variable_34 = 1; } else { } goto ldv_62023; default: ldv_stop(); } ldv_62023: ; return; } } void ldv_main_exported_56(void) { struct file *ldvarg216 ; void *tmp ; loff_t ldvarg219 ; loff_t tmp___0 ; size_t ldvarg214 ; size_t tmp___1 ; struct bin_attribute *ldvarg217 ; void *tmp___2 ; struct kobject *ldvarg218 ; void *tmp___3 ; char *ldvarg215 ; void *tmp___4 ; int tmp___5 ; { tmp = ldv_zalloc(360UL); ldvarg216 = (struct file *)tmp; tmp___0 = __VERIFIER_nondet_loff_t(); ldvarg219 = tmp___0; tmp___1 = __VERIFIER_nondet_size_t(); ldvarg214 = tmp___1; tmp___2 = ldv_zalloc(72UL); ldvarg217 = (struct bin_attribute *)tmp___2; tmp___3 = ldv_zalloc(296UL); ldvarg218 = (struct kobject *)tmp___3; tmp___4 = ldv_zalloc(1UL); ldvarg215 = (char *)tmp___4; tmp___5 = __VERIFIER_nondet_int(); switch (tmp___5) { case 0: ; if (ldv_state_variable_56 == 2) { qla2x00_sysfs_write_optrom_ctl(ldvarg216, ldvarg218, ldvarg217, ldvarg215, ldvarg219, ldvarg214); ldv_state_variable_56 = 2; } else { } goto ldv_62035; case 1: ; if (ldv_state_variable_56 == 2) { ldv_release_56(); ldv_state_variable_56 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_62035; case 2: ; if (ldv_state_variable_56 == 1) { ldv_probe_56(); ldv_state_variable_56 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_62035; default: ldv_stop(); } ldv_62035: ; return; } } void ldv_main_exported_37(void) { struct device *ldvarg484 ; void *tmp ; struct device_attribute *ldvarg486 ; void *tmp___0 ; char *ldvarg485 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1376UL); ldvarg484 = (struct device *)tmp; tmp___0 = ldv_zalloc(48UL); ldvarg486 = (struct device_attribute *)tmp___0; tmp___1 = ldv_zalloc(1UL); ldvarg485 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_37 == 1) { qla2x00_optrom_efi_version_show(ldvarg484, ldvarg486, ldvarg485); ldv_state_variable_37 = 1; } else { } goto ldv_62046; default: ldv_stop(); } ldv_62046: ; return; } } void ldv_main_exported_45(void) { char *ldvarg260 ; void *tmp ; struct device_attribute *ldvarg261 ; void *tmp___0 ; struct device *ldvarg259 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1UL); ldvarg260 = (char *)tmp; tmp___0 = ldv_zalloc(48UL); ldvarg261 = (struct device_attribute *)tmp___0; tmp___1 = ldv_zalloc(1376UL); ldvarg259 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_45 == 1) { qla2x00_model_name_show(ldvarg259, ldvarg261, ldvarg260); ldv_state_variable_45 = 1; } else { } goto ldv_62055; default: ldv_stop(); } ldv_62055: ; return; } } void ldv_main_exported_19(void) { u32 ldvarg262 ; u32 tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_u32(); ldvarg262 = tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_19 == 1) { qla2x00_issue_lip(qla2xxx_transport_vport_functions_group0); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 1: ; if (ldv_state_variable_19 == 1) { qla2x00_set_rport_loss_tmo(qla2xxx_transport_vport_functions_group2, ldvarg262); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 2: ; if (ldv_state_variable_19 == 1) { qla2x00_get_fc_host_stats(qla2xxx_transport_vport_functions_group0); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 3: ; if (ldv_state_variable_19 == 1) { qla2x00_get_host_port_type(qla2xxx_transport_vport_functions_group0); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 4: ; if (ldv_state_variable_19 == 1) { qla24xx_bsg_timeout(qla2xxx_transport_vport_functions_group1); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 5: ; if (ldv_state_variable_19 == 1) { qla24xx_bsg_request(qla2xxx_transport_vport_functions_group1); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 6: ; if (ldv_state_variable_19 == 1) { qla2x00_terminate_rport_io(qla2xxx_transport_vport_functions_group2); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 7: ; if (ldv_state_variable_19 == 1) { qla2x00_get_host_port_state(qla2xxx_transport_vport_functions_group0); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 8: ; if (ldv_state_variable_19 == 1) { qla2x00_get_starget_node_name(qla2xxx_transport_vport_functions_group3); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 9: ; if (ldv_state_variable_19 == 1) { qla2x00_get_host_speed(qla2xxx_transport_vport_functions_group0); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 10: ; if (ldv_state_variable_19 == 1) { qla2x00_get_starget_port_id(qla2xxx_transport_vport_functions_group3); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 11: ; if (ldv_state_variable_19 == 1) { qla2x00_get_starget_port_name(qla2xxx_transport_vport_functions_group3); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 12: ; if (ldv_state_variable_19 == 1) { qla2x00_dev_loss_tmo_callbk(qla2xxx_transport_vport_functions_group2); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 13: ; if (ldv_state_variable_19 == 1) { qla2x00_reset_host_stats(qla2xxx_transport_vport_functions_group0); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 14: ; if (ldv_state_variable_19 == 1) { qla2x00_get_host_symbolic_name(qla2xxx_transport_vport_functions_group0); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 15: ; if (ldv_state_variable_19 == 1) { qla2x00_set_host_system_hostname(qla2xxx_transport_vport_functions_group0); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 16: ; if (ldv_state_variable_19 == 1) { qla2x00_get_host_fabric_name(qla2xxx_transport_vport_functions_group0); ldv_state_variable_19 = 1; } else { } goto ldv_62062; case 17: ; if (ldv_state_variable_19 == 1) { qla2x00_get_host_port_id(qla2xxx_transport_vport_functions_group0); ldv_state_variable_19 = 1; } else { } goto ldv_62062; default: ldv_stop(); } ldv_62062: ; return; } } void ldv_main_exported_43(void) { struct device *ldvarg487 ; void *tmp ; struct device_attribute *ldvarg489 ; void *tmp___0 ; char *ldvarg488 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_zalloc(1376UL); ldvarg487 = (struct device *)tmp; tmp___0 = ldv_zalloc(48UL); ldvarg489 = (struct device_attribute *)tmp___0; tmp___1 = ldv_zalloc(1UL); ldvarg488 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_43 == 1) { qla2x00_pci_info_show(ldvarg487, ldvarg489, ldvarg488); ldv_state_variable_43 = 1; } else { } goto ldv_62088; default: ldv_stop(); } ldv_62088: ; return; } } void ldv_main_exported_54(void) { char *ldvarg265 ; void *tmp ; struct kobject *ldvarg268 ; void *tmp___0 ; struct bin_attribute *ldvarg267 ; void *tmp___1 ; loff_t ldvarg269 ; loff_t tmp___2 ; size_t ldvarg264 ; size_t tmp___3 ; struct file *ldvarg266 ; void *tmp___4 ; int tmp___5 ; { tmp = ldv_zalloc(1UL); ldvarg265 = (char *)tmp; tmp___0 = ldv_zalloc(296UL); ldvarg268 = (struct kobject *)tmp___0; tmp___1 = ldv_zalloc(72UL); ldvarg267 = (struct bin_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_loff_t(); ldvarg269 = tmp___2; tmp___3 = __VERIFIER_nondet_size_t(); ldvarg264 = tmp___3; tmp___4 = ldv_zalloc(360UL); ldvarg266 = (struct file *)tmp___4; tmp___5 = __VERIFIER_nondet_int(); switch (tmp___5) { case 0: ; if (ldv_state_variable_54 == 2) { qla2x00_sysfs_read_sfp(ldvarg266, ldvarg268, ldvarg267, ldvarg265, ldvarg269, ldvarg264); ldv_state_variable_54 = 2; } else { } goto ldv_62100; case 1: ; if (ldv_state_variable_54 == 2) { ldv_release_54(); ldv_state_variable_54 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_62100; case 2: ; if (ldv_state_variable_54 == 1) { ldv_probe_54(); ldv_state_variable_54 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_62100; default: ldv_stop(); } ldv_62100: ; return; } } int ldv_del_timer_47(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_48(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_49(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___1 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } void ldv_scsi_remove_host_50(struct Scsi_Host *shost ) { { scsi_remove_host(shost); ldv_state_variable_72 = 0; return; } } int ldv_del_timer_55(struct timer_list *ldv_func_arg1 ) ; int ldv_del_timer_sync_57(struct timer_list *ldv_func_arg1 ) ; void disable_suitable_timer_11(struct timer_list *timer ) ; void choose_timer_11(struct timer_list *timer ) ; int reg_timer_11(struct timer_list *timer ) ; void activate_pending_timer_11(struct timer_list *timer , unsigned long data , int pending_flag ) ; int ldv_scsi_add_host_with_dma_56(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; __inline static void u64_to_wwn(u64 inm , u8 *wwn ) { { *wwn = (u8 )(inm >> 56); *(wwn + 1UL) = (u8 )(inm >> 48); *(wwn + 2UL) = (u8 )(inm >> 40); *(wwn + 3UL) = (u8 )(inm >> 32); *(wwn + 4UL) = (u8 )(inm >> 24); *(wwn + 5UL) = (u8 )(inm >> 16); *(wwn + 6UL) = (u8 )(inm >> 8); *(wwn + 7UL) = (u8 )inm; return; } } static char const * const port_state_str___3[5U] = { "Unknown", "UNCONFIGURED", "DEAD", "LOST", "ONLINE"}; __inline static void qla2x00_set_fcport_state___2(fc_port_t *fcport , int state ) { int old_state ; { old_state = atomic_read((atomic_t const *)(& fcport->state)); atomic_set(& fcport->state, state); if (old_state != 0 && old_state != state) { ql_dbg(268435456U, fcport->vha, 8317, "FCPort state transitioned from %s to %s - portid=%02x%02x%02x.\n", port_state_str___3[old_state], port_state_str___3[state], (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } return; } } void qla2x00_vp_stop_timer(scsi_qla_host_t *vha ) { { if ((unsigned int )vha->vp_idx != 0U && vha->timer_active != 0U) { ldv_del_timer_sync_57(& vha->timer); vha->timer_active = 0U; } else { } return; } } static uint32_t qla24xx_allocate_vp_id(scsi_qla_host_t *vha ) { uint32_t vp_id ; struct qla_hw_data *ha ; unsigned long flags ; unsigned long tmp ; raw_spinlock_t *tmp___0 ; { ha = vha->hw; mutex_lock_nested(& ha->vport_lock, 0U); tmp = find_first_zero_bit((unsigned long const *)(& ha->vp_idx_map), (unsigned long )((int )ha->max_npiv_vports + 1)); vp_id = (uint32_t )tmp; if ((uint32_t )ha->max_npiv_vports < vp_id) { ql_dbg(262144U, vha, 40960, "vp_id %d is bigger than max-supported %d.\n", vp_id, (int )ha->max_npiv_vports); mutex_unlock(& ha->vport_lock); return (vp_id); } else { } set_bit((long )vp_id, (unsigned long volatile *)(& ha->vp_idx_map)); ha->num_vhosts = (uint16_t )((int )ha->num_vhosts + 1); vha->vp_idx = (uint16_t )vp_id; tmp___0 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___0); list_add_tail(& vha->list, & ha->vp_list); qlt_update_vp_map(vha, 1); spin_unlock_irqrestore(& ha->vport_slock, flags); mutex_unlock(& ha->vport_lock); return (vp_id); } } void qla24xx_deallocate_vp_id(scsi_qla_host_t *vha ) { uint16_t vp_id ; struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; int tmp___1 ; { ha = vha->hw; flags = 0UL; mutex_lock_nested(& ha->vport_lock, 0U); tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); goto ldv_60798; ldv_60797: spin_unlock_irqrestore(& ha->vport_slock, flags); msleep(500U); tmp___0 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___0); ldv_60798: tmp___1 = atomic_read((atomic_t const *)(& vha->vref_count)); if (tmp___1 != 0) { goto ldv_60797; } else { } list_del(& vha->list); qlt_update_vp_map(vha, 3); spin_unlock_irqrestore(& ha->vport_slock, flags); vp_id = vha->vp_idx; ha->num_vhosts = (uint16_t )((int )ha->num_vhosts - 1); clear_bit((long )vp_id, (unsigned long volatile *)(& ha->vp_idx_map)); mutex_unlock(& ha->vport_lock); return; } } static scsi_qla_host_t *qla24xx_find_vhost_by_name(struct qla_hw_data *ha , uint8_t *port_name ) { scsi_qla_host_t *vha ; struct scsi_qla_host *tvha ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp___0 ; struct list_head const *__mptr___1 ; { tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)ha->vp_list.next; vha = (scsi_qla_host_t *)__mptr; __mptr___0 = (struct list_head const *)vha->list.next; tvha = (scsi_qla_host_t *)__mptr___0; goto ldv_60817; ldv_60816: tmp___0 = memcmp((void const *)port_name, (void const *)(& vha->port_name), 8UL); if (tmp___0 == 0) { spin_unlock_irqrestore(& ha->vport_slock, flags); return (vha); } else { } vha = tvha; __mptr___1 = (struct list_head const *)tvha->list.next; tvha = (struct scsi_qla_host *)__mptr___1; ldv_60817: ; if ((unsigned long )(& vha->list) != (unsigned long )(& ha->vp_list)) { goto ldv_60816; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); return ((scsi_qla_host_t *)0); } } static void qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha ) { fc_port_t *fcport ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_60828; ldv_60827: ql_dbg(262144U, vha, 40961, "Marking port dead, loop_id=0x%04x : %x.\n", (int )fcport->loop_id, (int )(fcport->vha)->vp_idx); qla2x00_mark_device_lost(vha, fcport, 0, 0); qla2x00_set_fcport_state___2(fcport, 1); __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_60828: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_60827; } else { } return; } } int qla24xx_disable_vp(scsi_qla_host_t *vha ) { unsigned long flags ; int ret ; raw_spinlock_t *tmp ; { ret = qla24xx_control_vp(vha, 11); atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); tmp = spinlock_check(& (vha->hw)->vport_slock); flags = _raw_spin_lock_irqsave(tmp); qlt_update_vp_map(vha, 4); spin_unlock_irqrestore(& (vha->hw)->vport_slock, flags); qla2x00_mark_vp_devices_dead(vha); atomic_set(& vha->vp_state, 2); vha->flags.management_server_logged_in = 0U; if (ret == 0) { fc_vport_set_state(vha->fc_vport, 2); } else { fc_vport_set_state(vha->fc_vport, 9); return (-1); } return (0); } } int qla24xx_enable_vp(scsi_qla_host_t *vha ) { int ret ; struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; int tmp___1 ; { ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; tmp___0 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___0 == 2) { vha->vp_err_state = 1U; fc_vport_set_state(vha->fc_vport, 3); goto enable_failed; } else { tmp___1 = atomic_read((atomic_t const *)(& base_vha->loop_state)); if (tmp___1 == 6) { vha->vp_err_state = 1U; fc_vport_set_state(vha->fc_vport, 3); goto enable_failed; } else if (((int )ha->current_topology & 8) == 0) { vha->vp_err_state = 1U; fc_vport_set_state(vha->fc_vport, 3); goto enable_failed; } else { } } mutex_lock_nested(& ha->vport_lock, 0U); ret = qla24xx_modify_vp_config(vha); mutex_unlock(& ha->vport_lock); if (ret != 0) { fc_vport_set_state(vha->fc_vport, 9); goto enable_failed; } else { } ql_dbg(4194304U, vha, 32794, "Virtual port with id: %d - Enabled.\n", (int )vha->vp_idx); return (0); enable_failed: ql_dbg(4194304U, vha, 32795, "Virtual port with id: %d - Disabled.\n", (int )vha->vp_idx); return (1); } } static void qla24xx_configure_vp(scsi_qla_host_t *vha ) { struct fc_vport *fc_vport ; int ret ; int tmp ; { fc_vport = vha->fc_vport; ql_dbg(262144U, vha, 40962, "%s: change request #3.\n", "qla24xx_configure_vp"); ret = qla2x00_send_change_request(vha, 3, (int )vha->vp_idx); if (ret != 0) { ql_dbg(262144U, vha, 40963, "Failed to enable receiving of RSCN requests: 0x%x.\n", ret); return; } else { clear_bit(4L, (unsigned long volatile *)(& vha->vp_flags)); } vha->flags.online = 1U; tmp = qla24xx_configure_vhba(vha); if (tmp != 0) { return; } else { } atomic_set(& vha->vp_state, 1); fc_vport_set_state(fc_vport, 1); return; } } void qla2x00_alert_all_vps(struct rsp_que *rsp , uint16_t *mb ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; int i ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; raw_spinlock_t *tmp___0 ; struct list_head const *__mptr___0 ; { ha = rsp->hw; i = 0; tmp = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)ha->vp_list.next; vha = (scsi_qla_host_t *)__mptr; goto ldv_60879; ldv_60878: ; if ((unsigned int )vha->vp_idx != 0U) { atomic_inc(& vha->vref_count); spin_unlock_irqrestore(& ha->vport_slock, flags); switch ((int )*mb) { case 32784: ; case 32785: ; case 32786: ; case 32787: ; case 32816: ; case 32822: ; case 32788: ; case 32789: ql_dbg(33554432U, vha, 20516, "Async_event for VP[%d], mb=0x%x vha=%p.\n", i, (int )*mb, vha); qla2x00_async_event(vha, rsp, mb); goto ldv_60874; } ldv_60874: tmp___0 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___0); atomic_dec(& vha->vref_count); } else { } i = i + 1; __mptr___0 = (struct list_head const *)vha->list.next; vha = (scsi_qla_host_t *)__mptr___0; ldv_60879: ; if ((unsigned long )(& vha->list) != (unsigned long )(& ha->vp_list)) { goto ldv_60878; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); return; } } int qla2x00_vp_abort_isp(scsi_qla_host_t *vha ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp___0 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___0 != 2) { atomic_set(& vha->loop_state, 2); qla2x00_mark_all_devices_lost(vha, 0); } else { tmp = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp == 0) { atomic_set(& vha->loop_down_timer, 255); } else { } } tmp___1 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 == 0) { qla24xx_control_vp(vha, 11); } else { } ql_dbg(4194304U, vha, 32797, "Scheduling enable of Vport %d.\n", (int )vha->vp_idx); tmp___2 = qla24xx_enable_vp(vha); return (tmp___2); } } static int qla2x00_do_dpc_vp(scsi_qla_host_t *vha ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; { ql_dbg(67141632U, vha, 16402, "Entering %s vp_flags: 0x%lx.\n", "qla2x00_do_dpc_vp", vha->vp_flags); qla2x00_do_work(vha); tmp = test_and_clear_bit(0L, (unsigned long volatile *)(& vha->vp_flags)); if (tmp != 0) { ql_dbg(67108864U, vha, 16404, "Configure VP scheduled.\n"); qla24xx_configure_vp(vha); ql_dbg(67108864U, vha, 16405, "Configure VP end.\n"); return (0); } else { } tmp___0 = constant_test_bit(13L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { ql_dbg(67108864U, vha, 16406, "FCPort update scheduled.\n"); qla2x00_update_fcports(vha); clear_bit(13L, (unsigned long volatile *)(& vha->dpc_flags)); ql_dbg(67108864U, vha, 16407, "FCPort update end.\n"); } else { } tmp___1 = test_and_clear_bit(8L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { tmp___2 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 == 0) { tmp___3 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___3 != 2) { ql_dbg(67108864U, vha, 16408, "Relogin needed scheduled.\n"); qla2x00_relogin(vha); ql_dbg(67108864U, vha, 16409, "Relogin needed end.\n"); } else { } } else { } } else { } tmp___4 = test_and_clear_bit(0L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___4 != 0) { tmp___5 = test_and_set_bit(1L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___5 == 0) { clear_bit(1L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } } else { } tmp___7 = test_and_clear_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___7 != 0) { tmp___6 = test_and_set_bit(5L, (unsigned long volatile *)(& vha->dpc_flags)); if (tmp___6 == 0) { ql_dbg(67108864U, vha, 16410, "Loop resync scheduled.\n"); qla2x00_loop_resync(vha); clear_bit(5L, (unsigned long volatile *)(& vha->dpc_flags)); ql_dbg(67108864U, vha, 16411, "Loop resync end.\n"); } else { } } else { } ql_dbg(67141632U, vha, 16412, "Exiting %s.\n", "qla2x00_do_dpc_vp"); return (0); } } void qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha ) { int ret ; struct qla_hw_data *ha ; scsi_qla_host_t *vp ; unsigned long flags ; int tmp ; raw_spinlock_t *tmp___0 ; struct list_head const *__mptr ; raw_spinlock_t *tmp___1 ; struct list_head const *__mptr___0 ; { ha = vha->hw; flags = 0UL; if ((unsigned int )vha->vp_idx != 0U) { return; } else { } tmp = list_empty((struct list_head const *)(& ha->vp_list)); if (tmp != 0) { return; } else { } clear_bit(14L, (unsigned long volatile *)(& vha->dpc_flags)); if (((int )ha->current_topology & 8) == 0) { return; } else { } tmp___0 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___0); __mptr = (struct list_head const *)ha->vp_list.next; vp = (scsi_qla_host_t *)__mptr; goto ldv_60906; ldv_60905: ; if ((unsigned int )vp->vp_idx != 0U) { atomic_inc(& vp->vref_count); spin_unlock_irqrestore(& ha->vport_slock, flags); ret = qla2x00_do_dpc_vp(vp); tmp___1 = spinlock_check(& ha->vport_slock); flags = _raw_spin_lock_irqsave(tmp___1); atomic_dec(& vp->vref_count); } else { } __mptr___0 = (struct list_head const *)vp->list.next; vp = (scsi_qla_host_t *)__mptr___0; ldv_60906: ; if ((unsigned long )(& vp->list) != (unsigned long )(& ha->vp_list)) { goto ldv_60905; } else { } spin_unlock_irqrestore(& ha->vport_slock, flags); return; } } int qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport ) { scsi_qla_host_t *base_vha ; void *tmp ; struct qla_hw_data *ha ; scsi_qla_host_t *vha ; uint8_t port_name[8U] ; int tmp___0 ; { tmp = shost_priv(fc_vport->shost); base_vha = (scsi_qla_host_t *)tmp; ha = base_vha->hw; if (fc_vport->roles != 2U) { return (-38); } else { } if (*((unsigned long *)ha + 2UL) == 0UL) { return (-38); } else { } if (((int )ha->switch_cap & 1024) == 0) { return (-95); } else { } u64_to_wwn(fc_vport->port_name, (u8 *)(& port_name)); tmp___0 = memcmp((void const *)(& port_name), (void const *)(& base_vha->port_name), 8UL); if (tmp___0 == 0) { return (-76); } else { } vha = qla24xx_find_vhost_by_name(ha, (uint8_t *)(& port_name)); if ((unsigned long )vha != (unsigned long )((scsi_qla_host_t *)0)) { return (-76); } else { } if ((int )ha->num_vhosts > (int )ha->max_npiv_vports) { ql_dbg(262144U, vha, 40964, "num_vhosts %ud is bigger than max_npiv_vports %ud.\n", (int )ha->num_vhosts, (int )ha->max_npiv_vports); return (-38); } else { } return (0); } } scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *fc_vport ) { scsi_qla_host_t *base_vha ; void *tmp ; struct qla_hw_data *ha ; scsi_qla_host_t *vha ; struct scsi_host_template *sht ; struct Scsi_Host *host ; struct scsi_qla_host *tmp___0 ; uint32_t tmp___1 ; { tmp = shost_priv(fc_vport->shost); base_vha = (scsi_qla_host_t *)tmp; ha = base_vha->hw; sht = & qla2xxx_driver_template; tmp___0 = qla2x00_create_host(sht, ha); vha = tmp___0; if ((unsigned long )vha == (unsigned long )((scsi_qla_host_t *)0)) { ql_log(1U, vha, 40965, "scsi_host_alloc() failed for vport.\n"); return ((scsi_qla_host_t *)0); } else { } host = vha->host; fc_vport->dd_data = (void *)vha; u64_to_wwn(fc_vport->node_name, (u8 *)(& vha->node_name)); u64_to_wwn(fc_vport->port_name, (u8 *)(& vha->port_name)); vha->fc_vport = fc_vport; vha->device_flags = 0U; tmp___1 = qla24xx_allocate_vp_id(vha); vha->vp_idx = (uint16_t )tmp___1; if ((int )vha->vp_idx > (int )ha->max_npiv_vports) { ql_dbg(262144U, vha, 40966, "Couldn\'t allocate vp_id.\n"); goto create_vhost_failed; } else { } vha->mgmt_svr_loop_id = (unsigned int )vha->vp_idx + 10U; vha->dpc_flags = 0UL; set_bit(4L, (unsigned long volatile *)(& vha->vp_flags)); atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 255); qla2x00_start_timer(vha, (void *)(& qla2x00_timer), 1UL); vha->req = base_vha->req; host->can_queue = (int )(base_vha->req)->length + 128; host->cmd_per_lun = 3; if ((ha->device_type & 33554432U) != 0U && ql2xenabledif != 0) { host->max_cmd_len = 32U; } else { host->max_cmd_len = 16U; } host->max_channel = 0U; host->max_lun = ql2xmaxlun; host->unique_id = host->host_no; host->max_id = (unsigned int )ha->max_fibre_devices; host->transportt = qla2xxx_transport_vport_template; ql_dbg(262144U, vha, 40967, "Detect vport hba %ld at address = %p.\n", vha->host_no, vha); vha->flags.init_done = 1U; mutex_lock_nested(& ha->vport_lock, 0U); set_bit((long )vha->vp_idx, (unsigned long volatile *)(& ha->vp_idx_map)); ha->cur_vport_count = ha->cur_vport_count + 1; mutex_unlock(& ha->vport_lock); return (vha); create_vhost_failed: ; return ((scsi_qla_host_t *)0); } } static void qla25xx_free_req_que(struct scsi_qla_host *vha , struct req_que *req ) { struct qla_hw_data *ha ; uint16_t que_id ; { ha = vha->hw; que_id = req->id; dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )req->length + 1) * 64UL, (void *)req->ring, req->dma, (struct dma_attrs *)0); req->ring = (request_t *)0; req->dma = 0ULL; if ((unsigned int )que_id != 0U) { *(ha->req_q_map + (unsigned long )que_id) = (struct req_que *)0; mutex_lock_nested(& ha->vport_lock, 0U); clear_bit((long )que_id, (unsigned long volatile *)(& ha->req_qid_map)); mutex_unlock(& ha->vport_lock); } else { } kfree((void const *)req->outstanding_cmds); kfree((void const *)req); req = (struct req_que *)0; return; } } static void qla25xx_free_rsp_que(struct scsi_qla_host *vha , struct rsp_que *rsp ) { struct qla_hw_data *ha ; uint16_t que_id ; { ha = vha->hw; que_id = rsp->id; if ((unsigned long )rsp->msix != (unsigned long )((struct qla_msix_entry *)0) && (rsp->msix)->have_irq != 0) { free_irq((rsp->msix)->vector, (void *)rsp); (rsp->msix)->have_irq = 0; (rsp->msix)->rsp = (struct rsp_que *)0; } else { } dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )rsp->length + 1) * 64UL, (void *)rsp->ring, rsp->dma, (struct dma_attrs *)0); rsp->ring = (response_t *)0; rsp->dma = 0ULL; if ((unsigned int )que_id != 0U) { *(ha->rsp_q_map + (unsigned long )que_id) = (struct rsp_que *)0; mutex_lock_nested(& ha->vport_lock, 0U); clear_bit((long )que_id, (unsigned long volatile *)(& ha->rsp_qid_map)); mutex_unlock(& ha->vport_lock); } else { } kfree((void const *)rsp); rsp = (struct rsp_que *)0; return; } } int qla25xx_delete_req_que(struct scsi_qla_host *vha , struct req_que *req ) { int ret ; { ret = -1; if ((unsigned long )req != (unsigned long )((struct req_que *)0)) { req->options = (uint16_t )((unsigned int )req->options | 1U); ret = qla25xx_init_req_que(vha, req); } else { } if (ret == 0) { qla25xx_free_req_que(vha, req); } else { } return (ret); } } static int qla25xx_delete_rsp_que(struct scsi_qla_host *vha , struct rsp_que *rsp ) { int ret ; { ret = -1; if ((unsigned long )rsp != (unsigned long )((struct rsp_que *)0)) { rsp->options = (uint16_t )((unsigned int )rsp->options | 1U); ret = qla25xx_init_rsp_que(vha, rsp); } else { } if (ret == 0) { qla25xx_free_rsp_que(vha, rsp); } else { } return (ret); } } int qla25xx_delete_queues(struct scsi_qla_host *vha ) { int cnt ; int ret ; struct req_que *req ; struct rsp_que *rsp ; struct qla_hw_data *ha ; { ret = 0; req = (struct req_que *)0; rsp = (struct rsp_que *)0; ha = vha->hw; cnt = 1; goto ldv_60955; ldv_60954: req = *(ha->req_q_map + (unsigned long )cnt); if ((unsigned long )req != (unsigned long )((struct req_que *)0)) { ret = qla25xx_delete_req_que(vha, req); if (ret != 0) { ql_log(1U, vha, 234, "Couldn\'t delete req que %d.\n", (int )req->id); return (ret); } else { } } else { } cnt = cnt + 1; ldv_60955: ; if ((int )ha->max_req_queues > cnt) { goto ldv_60954; } else { } cnt = 1; goto ldv_60958; ldv_60957: rsp = *(ha->rsp_q_map + (unsigned long )cnt); if ((unsigned long )rsp != (unsigned long )((struct rsp_que *)0)) { ret = qla25xx_delete_rsp_que(vha, rsp); if (ret != 0) { ql_log(1U, vha, 235, "Couldn\'t delete rsp que %d.\n", (int )rsp->id); return (ret); } else { } } else { } cnt = cnt + 1; ldv_60958: ; if ((int )ha->max_rsp_queues > cnt) { goto ldv_60957; } else { } return (ret); } } int qla25xx_create_req_que(struct qla_hw_data *ha , uint16_t options , uint8_t vp_idx , uint16_t rid , int rsp_que , uint8_t qos ) { int ret ; struct req_que *req ; struct scsi_qla_host *base_vha ; void *tmp ; uint16_t que_id ; device_reg_t *reg ; uint32_t cnt ; void *tmp___0 ; void *tmp___1 ; unsigned long tmp___2 ; { ret = 0; req = (struct req_que *)0; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; que_id = 0U; tmp___0 = kzalloc(184UL, 208U); req = (struct req_que *)tmp___0; if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { ql_log(0U, base_vha, 217, "Failed to allocate memory for request queue.\n"); goto failed; } else { } req->length = 2048U; tmp___1 = dma_alloc_attrs(& (ha->pdev)->dev, (unsigned long )((int )req->length + 1) * 64UL, & req->dma, 208U, (struct dma_attrs *)0); req->ring = (request_t *)tmp___1; if ((unsigned long )req->ring == (unsigned long )((request_t *)0)) { ql_log(0U, base_vha, 218, "Failed to allocate memory for request_ring.\n"); goto que_failed; } else { } ret = qla2x00_alloc_outstanding_cmds(ha, req); if (ret != 0) { goto que_failed; } else { } mutex_lock_nested(& ha->vport_lock, 0U); tmp___2 = find_first_zero_bit((unsigned long const *)(& ha->req_qid_map), (unsigned long )ha->max_req_queues); que_id = (uint16_t )tmp___2; if ((int )((unsigned short )ha->max_req_queues) <= (int )que_id) { mutex_unlock(& ha->vport_lock); ql_log(1U, base_vha, 219, "No resources to create additional request queue.\n"); goto que_failed; } else { } set_bit((long )que_id, (unsigned long volatile *)(& ha->req_qid_map)); *(ha->req_q_map + (unsigned long )que_id) = req; req->rid = rid; req->vp_idx = (uint16_t )vp_idx; req->qos = (uint16_t )qos; ql_dbg(1048576U, base_vha, 49154, "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", (int )que_id, (int )req->rid, (int )req->vp_idx, (int )req->qos); ql_dbg(1073741824U, base_vha, 220, "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", (int )que_id, (int )req->rid, (int )req->vp_idx, (int )req->qos); if (rsp_que < 0) { req->rsp = (struct rsp_que *)0; } else { req->rsp = *(ha->rsp_q_map + (unsigned long )rsp_que); } if ((unsigned int )((unsigned char )((int )req->rid >> 8)) != 0U) { options = (uint16_t )((unsigned int )options | 16U); } else { } if ((unsigned int )((unsigned char )req->rid) != 0U) { options = (uint16_t )((unsigned int )options | 32U); } else { } req->options = options; ql_dbg(1048576U, base_vha, 49155, "options=0x%x.\n", (int )req->options); ql_dbg(1073741824U, base_vha, 221, "options=0x%x.\n", (int )req->options); cnt = 1U; goto ldv_60977; ldv_60976: *(req->outstanding_cmds + (unsigned long )cnt) = (srb_t *)0; cnt = cnt + 1U; ldv_60977: ; if ((uint32_t )req->num_outstanding_cmds > cnt) { goto ldv_60976; } else { } req->current_outstanding_cmd = 1U; req->ring_ptr = req->ring; req->ring_index = 0U; req->cnt = req->length; req->id = que_id; reg = (unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) ? ha->mqiobase + (unsigned long )((int )que_id * 4096) : ha->iobase; req->req_q_in = & reg->isp25mq.req_q_in; req->req_q_out = & reg->isp25mq.req_q_out; req->max_q_depth = (*(ha->req_q_map))->max_q_depth; mutex_unlock(& ha->vport_lock); ql_dbg(1048576U, base_vha, 49156, "ring_ptr=%p ring_index=%d, cnt=%d id=%d max_q_depth=%d.\n", req->ring_ptr, (int )req->ring_index, (int )req->cnt, (int )req->id, req->max_q_depth); ql_dbg(1073741824U, base_vha, 222, "ring_ptr=%p ring_index=%d, cnt=%d id=%d max_q_depth=%d.\n", req->ring_ptr, (int )req->ring_index, (int )req->cnt, (int )req->id, req->max_q_depth); ret = qla25xx_init_req_que(base_vha, req); if (ret != 0) { ql_log(0U, base_vha, 223, "%s failed.\n", "qla25xx_create_req_que"); mutex_lock_nested(& ha->vport_lock, 0U); clear_bit((long )que_id, (unsigned long volatile *)(& ha->req_qid_map)); mutex_unlock(& ha->vport_lock); goto que_failed; } else { } return ((int )req->id); que_failed: qla25xx_free_req_que(base_vha, req); failed: ; return (0); } } static void qla_do_work(struct work_struct *work ) { unsigned long flags ; struct rsp_que *rsp ; struct work_struct const *__mptr ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; raw_spinlock_t *tmp ; void *tmp___0 ; { __mptr = (struct work_struct const *)work; rsp = (struct rsp_que *)__mptr + 0xffffffffffffffa8UL; ha = rsp->hw; tmp = spinlock_check(& (rsp->hw)->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (struct scsi_qla_host *)tmp___0; qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(& (rsp->hw)->hardware_lock, flags); return; } } int qla25xx_create_rsp_que(struct qla_hw_data *ha , uint16_t options , uint8_t vp_idx , uint16_t rid , int req ) { int ret ; struct rsp_que *rsp ; struct scsi_qla_host *base_vha ; void *tmp ; uint16_t que_id ; device_reg_t *reg ; void *tmp___0 ; void *tmp___1 ; unsigned long tmp___2 ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; { ret = 0; rsp = (struct rsp_que *)0; tmp = pci_get_drvdata(ha->pdev); base_vha = (struct scsi_qla_host *)tmp; que_id = 0U; tmp___0 = kzalloc(256UL, 208U); rsp = (struct rsp_que *)tmp___0; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(1U, base_vha, 102, "Failed to allocate memory for response queue.\n"); goto failed; } else { } rsp->length = 128U; tmp___1 = dma_alloc_attrs(& (ha->pdev)->dev, (unsigned long )((int )rsp->length + 1) * 64UL, & rsp->dma, 208U, (struct dma_attrs *)0); rsp->ring = (response_t *)tmp___1; if ((unsigned long )rsp->ring == (unsigned long )((response_t *)0)) { ql_log(1U, base_vha, 225, "Failed to allocate memory for response ring.\n"); goto que_failed; } else { } mutex_lock_nested(& ha->vport_lock, 0U); tmp___2 = find_first_zero_bit((unsigned long const *)(& ha->rsp_qid_map), (unsigned long )ha->max_rsp_queues); que_id = (uint16_t )tmp___2; if ((int )((unsigned short )ha->max_rsp_queues) <= (int )que_id) { mutex_unlock(& ha->vport_lock); ql_log(1U, base_vha, 226, "No resources to create additional request queue.\n"); goto que_failed; } else { } set_bit((long )que_id, (unsigned long volatile *)(& ha->rsp_qid_map)); if (*((unsigned long *)ha + 2UL) != 0UL) { rsp->msix = ha->msix_entries + ((unsigned long )que_id + 1UL); } else { ql_log(1U, base_vha, 227, "MSIX not enalbled.\n"); } *(ha->rsp_q_map + (unsigned long )que_id) = rsp; rsp->rid = rid; rsp->vp_idx = (uint16_t )vp_idx; rsp->hw = ha; ql_dbg(1073741824U, base_vha, 228, "queue_id=%d rid=%d vp_idx=%d hw=%p.\n", (int )que_id, (int )rsp->rid, (int )rsp->vp_idx, rsp->hw); if ((unsigned int )((unsigned char )((int )rsp->rid >> 8)) != 0U) { options = (uint16_t )((unsigned int )options | 16U); } else { } if ((unsigned int )((unsigned char )rsp->rid) != 0U) { options = (uint16_t )((unsigned int )options | 32U); } else { } if ((ha->device_type & 8192U) == 0U && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) { options = (uint16_t )((unsigned int )options | 64U); } else { } rsp->options = options; rsp->id = que_id; reg = (unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) ? ha->mqiobase + (unsigned long )((int )que_id * 4096) : ha->iobase; rsp->rsp_q_in = & reg->isp25mq.rsp_q_in; rsp->rsp_q_out = & reg->isp25mq.rsp_q_out; mutex_unlock(& ha->vport_lock); ql_dbg(1048576U, base_vha, 49163, "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", (int )rsp->options, (int )rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ql_dbg(1073741824U, base_vha, 229, "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", (int )rsp->options, (int )rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ret = qla25xx_request_irq(rsp); if (ret != 0) { goto que_failed; } else { } ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != 0) { ql_log(0U, base_vha, 231, "%s failed.\n", "qla25xx_create_rsp_que"); mutex_lock_nested(& ha->vport_lock, 0U); clear_bit((long )que_id, (unsigned long volatile *)(& ha->rsp_qid_map)); mutex_unlock(& ha->vport_lock); goto que_failed; } else { } if (req >= 0) { rsp->req = *(ha->req_q_map + (unsigned long )req); } else { rsp->req = (struct req_que *)0; } qla2x00_init_response_q_entries(rsp); if ((unsigned long )(rsp->hw)->wq != (unsigned long )((struct workqueue_struct *)0)) { __init_work(& rsp->q_work, 0); __constr_expr_0.counter = 137438953408L; rsp->q_work.data = __constr_expr_0; lockdep_init_map(& rsp->q_work.lockdep_map, "(&rsp->q_work)", & __key, 0); INIT_LIST_HEAD(& rsp->q_work.entry); rsp->q_work.func = & qla_do_work; } else { } return ((int )rsp->id); que_failed: qla25xx_free_rsp_que(base_vha, rsp); failed: ; return (0); } } void disable_suitable_timer_11(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_11) { ldv_timer_state_11 = 0; return; } else { } return; } } void choose_timer_11(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_11 = 2; return; } } int reg_timer_11(struct timer_list *timer ) { { ldv_timer_list_11 = timer; ldv_timer_state_11 = 1; return (0); } } void activate_pending_timer_11(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_11 == (unsigned long )timer) { if (ldv_timer_state_11 == 2 || pending_flag != 0) { ldv_timer_list_11 = timer; ldv_timer_list_11->data = data; ldv_timer_state_11 = 1; } else { } return; } else { } reg_timer_11(timer); ldv_timer_list_11->data = data; return; } } int ldv_del_timer_55(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_56(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } int ldv_del_timer_sync_57(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___1 ldv_func_res ; int tmp ; { tmp = del_timer_sync(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_del_timer_61(struct timer_list *ldv_func_arg1 ) ; int reg_timer_12(struct timer_list *timer ) ; void choose_timer_12(struct timer_list *timer ) ; void disable_suitable_timer_12(struct timer_list *timer ) ; void activate_pending_timer_12(struct timer_list *timer , unsigned long data , int pending_flag ) ; extern ssize_t seq_read(struct file * , char * , size_t , loff_t * ) ; extern loff_t seq_lseek(struct file * , loff_t , int ) ; extern int seq_printf(struct seq_file * , char const * , ...) ; extern int single_open(struct file * , int (*)(struct seq_file * , void * ) , void * ) ; extern int single_release(struct inode * , struct file * ) ; int ldv_scsi_add_host_with_dma_62(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; extern struct dentry *debugfs_create_file(char const * , umode_t , struct dentry * , void * , struct file_operations const * ) ; extern struct dentry *debugfs_create_dir(char const * , struct dentry * ) ; extern void debugfs_remove(struct dentry * ) ; static struct dentry *qla2x00_dfs_root ; static atomic_t qla2x00_dfs_root_count ; static int qla2x00_dfs_fce_show(struct seq_file *s , void *unused ) { scsi_qla_host_t *vha ; uint32_t cnt ; uint32_t *fce ; uint64_t fce_start ; struct qla_hw_data *ha ; uint32_t *tmp ; { vha = (scsi_qla_host_t *)s->private; ha = vha->hw; mutex_lock_nested(& ha->fce_mutex, 0U); seq_printf(s, "FCE Trace Buffer\n"); seq_printf(s, "In Pointer = %llx\n\n", ha->fce_wr); seq_printf(s, "Base = %llx\n\n", ha->fce_dma); seq_printf(s, "FCE Enable Registers\n"); seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", (int )ha->fce_mb[0], (int )ha->fce_mb[2], (int )ha->fce_mb[3], (int )ha->fce_mb[4], (int )ha->fce_mb[5], (int )ha->fce_mb[6]); fce = (uint32_t *)ha->fce; fce_start = ha->fce_dma; cnt = 0U; goto ldv_43551; ldv_43550: ; if ((cnt & 7U) == 0U) { seq_printf(s, "\n%llx: ", (uint64_t )(cnt * 4U) + fce_start); } else { seq_printf(s, " "); } tmp = fce; fce = fce + 1; seq_printf(s, "%08x", *tmp); cnt = cnt + 1U; ldv_43551: ; if ((ha->fce_bufs * 1024U) / 4U > cnt) { goto ldv_43550; } else { } seq_printf(s, "\nEnd\n"); mutex_unlock(& ha->fce_mutex); return (0); } } static int qla2x00_dfs_fce_open(struct inode *inode , struct file *file ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; int rval ; int tmp ; { vha = (scsi_qla_host_t *)inode->i_private; ha = vha->hw; if (*((unsigned long *)ha + 2UL) == 0UL) { goto out; } else { } mutex_lock_nested(& ha->fce_mutex, 0U); rval = qla2x00_disable_fce_trace(vha, & ha->fce_wr, & ha->fce_rd); if (rval != 0) { ql_dbg(8388608U, vha, 28764, "DebugFS: Unable to disable FCE (%d).\n", rval); } else { } ha->flags.fce_enabled = 0U; mutex_unlock(& ha->fce_mutex); out: tmp = single_open(file, & qla2x00_dfs_fce_show, (void *)vha); return (tmp); } } static int qla2x00_dfs_fce_release(struct inode *inode , struct file *file ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; int rval ; int tmp ; { vha = (scsi_qla_host_t *)inode->i_private; ha = vha->hw; if (*((unsigned long *)ha + 2UL) != 0UL) { goto out; } else { } mutex_lock_nested(& ha->fce_mutex, 0U); ha->flags.fce_enabled = 1U; memset(ha->fce, 0, (size_t )(ha->fce_bufs * 1024U)); rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, (int )((uint16_t )ha->fce_bufs), (uint16_t *)(& ha->fce_mb), & ha->fce_bufs); if (rval != 0) { ql_dbg(8388608U, vha, 28685, "DebugFS: Unable to reinitialize FCE (%d).\n", rval); ha->flags.fce_enabled = 0U; } else { } mutex_unlock(& ha->fce_mutex); out: tmp = single_release(inode, file); return (tmp); } } static struct file_operations const dfs_fce_ops = {0, & seq_lseek, & seq_read, 0, 0, 0, 0, 0, 0, 0, 0, & qla2x00_dfs_fce_open, 0, & qla2x00_dfs_fce_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; int qla2x00_dfs_setup(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct lock_class_key __key ; { ha = vha->hw; if (((ha->device_type & 2048U) == 0U && (ha->device_type & 8192U) == 0U) && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) { goto out; } else { } if ((unsigned long )ha->fce == (unsigned long )((void *)0)) { goto out; } else { } if ((unsigned long )qla2x00_dfs_root != (unsigned long )((struct dentry *)0)) { goto create_dir; } else { } atomic_set(& qla2x00_dfs_root_count, 0); qla2x00_dfs_root = debugfs_create_dir("qla2xxx", (struct dentry *)0); if ((unsigned long )qla2x00_dfs_root == (unsigned long )((struct dentry *)0)) { ql_log(1U, vha, 247, "Unable to create debugfs root directory.\n"); goto out; } else { } create_dir: ; if ((unsigned long )ha->dfs_dir != (unsigned long )((struct dentry *)0)) { goto create_nodes; } else { } __mutex_init(& ha->fce_mutex, "&ha->fce_mutex", & __key); ha->dfs_dir = debugfs_create_dir((char const *)(& vha->host_str), qla2x00_dfs_root); if ((unsigned long )ha->dfs_dir == (unsigned long )((struct dentry *)0)) { ql_log(1U, vha, 248, "Unable to create debugfs ha directory.\n"); goto out; } else { } atomic_inc(& qla2x00_dfs_root_count); create_nodes: ha->dfs_fce = debugfs_create_file("fce", 256, ha->dfs_dir, (void *)vha, & dfs_fce_ops); if ((unsigned long )ha->dfs_fce == (unsigned long )((struct dentry *)0)) { ql_log(1U, vha, 249, "Unable to create debugfs fce node.\n"); goto out; } else { } out: ; return (0); } } int qla2x00_dfs_remove(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; if ((unsigned long )ha->dfs_fce != (unsigned long )((struct dentry *)0)) { debugfs_remove(ha->dfs_fce); ha->dfs_fce = (struct dentry *)0; } else { } if ((unsigned long )ha->dfs_dir != (unsigned long )((struct dentry *)0)) { debugfs_remove(ha->dfs_dir); ha->dfs_dir = (struct dentry *)0; atomic_dec(& qla2x00_dfs_root_count); } else { } tmp = atomic_read((atomic_t const *)(& qla2x00_dfs_root_count)); if (tmp == 0 && (unsigned long )qla2x00_dfs_root != (unsigned long )((struct dentry *)0)) { debugfs_remove(qla2x00_dfs_root); qla2x00_dfs_root = (struct dentry *)0; } else { } return (0); } } int ldv_retval_0 ; int reg_timer_12(struct timer_list *timer ) { { ldv_timer_list_12 = timer; ldv_timer_state_12 = 1; return (0); } } void choose_timer_12(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_12 = 2; return; } } void disable_suitable_timer_12(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_12) { ldv_timer_state_12 = 0; return; } else { } return; } } void activate_pending_timer_12(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_12 == (unsigned long )timer) { if (ldv_timer_state_12 == 2 || pending_flag != 0) { ldv_timer_list_12 = timer; ldv_timer_list_12->data = data; ldv_timer_state_12 = 1; } else { } return; } else { } reg_timer_12(timer); ldv_timer_list_12->data = data; return; } } void ldv_file_operations_18(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_zalloc(1032UL); dfs_fce_ops_group1 = (struct inode *)tmp; tmp___0 = ldv_zalloc(360UL); dfs_fce_ops_group2 = (struct file *)tmp___0; return; } } void ldv_main_exported_18(void) { loff_t ldvarg83 ; loff_t tmp ; char *ldvarg86 ; void *tmp___0 ; int ldvarg82 ; int tmp___1 ; loff_t *ldvarg84 ; void *tmp___2 ; size_t ldvarg85 ; size_t tmp___3 ; int tmp___4 ; { tmp = __VERIFIER_nondet_loff_t(); ldvarg83 = tmp; tmp___0 = ldv_zalloc(1UL); ldvarg86 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); ldvarg82 = tmp___1; tmp___2 = ldv_zalloc(8UL); ldvarg84 = (loff_t *)tmp___2; tmp___3 = __VERIFIER_nondet_size_t(); ldvarg85 = tmp___3; tmp___4 = __VERIFIER_nondet_int(); switch (tmp___4) { case 0: ; if (ldv_state_variable_18 == 2) { qla2x00_dfs_fce_release(dfs_fce_ops_group1, dfs_fce_ops_group2); ldv_state_variable_18 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_43609; case 1: ; if (ldv_state_variable_18 == 1) { ldv_retval_0 = qla2x00_dfs_fce_open(dfs_fce_ops_group1, dfs_fce_ops_group2); if (ldv_retval_0 == 0) { ldv_state_variable_18 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_43609; case 2: ; if (ldv_state_variable_18 == 2) { seq_read(dfs_fce_ops_group2, ldvarg86, ldvarg85, ldvarg84); ldv_state_variable_18 = 2; } else { } goto ldv_43609; case 3: ; if (ldv_state_variable_18 == 2) { seq_lseek(dfs_fce_ops_group2, ldvarg83, ldvarg82); ldv_state_variable_18 = 2; } else { } goto ldv_43609; default: ldv_stop(); } ldv_43609: ; return; } } int ldv_del_timer_61(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_62(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } int ldv_del_timer_65(struct timer_list *ldv_func_arg1 ) ; void choose_timer_13(struct timer_list *timer ) ; void activate_pending_timer_13(struct timer_list *timer , unsigned long data , int pending_flag ) ; void disable_suitable_timer_13(struct timer_list *timer ) ; int reg_timer_13(struct timer_list *timer ) ; extern size_t sg_copy_from_buffer(struct scatterlist * , unsigned int , void * , size_t ) ; extern size_t sg_copy_to_buffer(struct scatterlist * , unsigned int , void * , size_t ) ; __inline static void dma_unmap_sg_attrs___0(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (65), "i" (12UL)); ldv_21302: ; goto ldv_21302; } else { } debug_dma_unmap_sg(dev, sg, nents, (int )dir); if ((unsigned long )ops->unmap_sg != (unsigned long )((void (*)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_sg))(dev, sg, nents, dir, attrs); } else { } return; } } int ldv_scsi_add_host_with_dma_66(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; void qla2x00_bsg_job_done(void *data , void *ptr , int res ) ; void qla2x00_bsg_sp_free(void *data , void *ptr ) ; void qla2x00_bsg_job_done(void *data , void *ptr , int res ) { srb_t *sp ; struct scsi_qla_host *vha ; struct fc_bsg_job *bsg_job ; { sp = (srb_t *)ptr; vha = (struct scsi_qla_host *)data; bsg_job = sp->u.bsg_job; (bsg_job->reply)->result = (uint32_t )res; (*(bsg_job->job_done))(bsg_job); (*(sp->free))((void *)vha, (void *)sp); return; } } void qla2x00_bsg_sp_free(void *data , void *ptr ) { srb_t *sp ; struct scsi_qla_host *vha ; struct fc_bsg_job *bsg_job ; struct qla_hw_data *ha ; struct qla_mt_iocb_rqst_fx00 *piocb_rqst ; { sp = (srb_t *)ptr; vha = (sp->fcport)->vha; bsg_job = sp->u.bsg_job; ha = vha->hw; if ((unsigned int )sp->type == 11U) { piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)(& (bsg_job->request)->rqst_data.h_vendor.vendor_cmd) + 1U; if ((int )piocb_rqst->flags & 1) { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); } else { } if (((int )piocb_rqst->flags & 2) != 0) { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); } else { } } else { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); } if (((unsigned int )sp->type == 5U || (unsigned int )sp->type == 11U) || (unsigned int )sp->type == 4U) { kfree((void const *)sp->fcport); } else { } qla2x00_rel_sp(vha, sp); return; } } int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha , struct qla_fcp_prio_cfg *pri_cfg , uint8_t flag ) { int i ; int ret ; int num_valid ; uint8_t *bcode ; struct qla_fcp_prio_entry *pri_entry ; uint32_t *bcode_val_ptr ; uint32_t bcode_val ; { ret = 1; num_valid = 0; bcode = (uint8_t *)pri_cfg; bcode_val_ptr = (uint32_t *)pri_cfg; bcode_val = *bcode_val_ptr; if (bcode_val == 4294967295U) { ql_dbg(8388608U, vha, 28753, "No FCP Priority config data.\n"); return (0); } else { } if ((((unsigned int )*bcode != 72U || (unsigned int )*(bcode + 1UL) != 81U) || (unsigned int )*(bcode + 2UL) != 79U) || (unsigned int )*(bcode + 3UL) != 83U) { ql_dbg(8388608U, vha, 28754, "Invalid FCP Priority data header. bcode=0x%x.\n", bcode_val); return (0); } else { } if ((unsigned int )flag != 1U) { return (ret); } else { } pri_entry = (struct qla_fcp_prio_entry *)(& pri_cfg->entry); i = 0; goto ldv_43546; ldv_43545: ; if (((int )pri_entry->flags & 2) != 0) { num_valid = num_valid + 1; } else { } pri_entry = pri_entry + 1; i = i + 1; ldv_43546: ; if ((int )pri_cfg->num_entries > i) { goto ldv_43545; } else { } if (num_valid == 0) { ql_dbg(8388608U, vha, 28755, "No valid FCP Priority data entries.\n"); ret = 0; } else { ql_dbg(8388608U, vha, 28756, "Valid FCP priority data. num entries = %d.\n", num_valid); } return (ret); } } static int qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int ret ; uint32_t len ; uint32_t oper ; size_t tmp___0 ; void *tmp___1 ; int tmp___2 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; ret = 0; if ((((((ha->device_type & 128U) == 0U && (ha->device_type & 256U) == 0U) && ((ha->device_type & 512U) == 0U && (ha->device_type & 1024U) == 0U)) && (ha->device_type & 4096U) == 0U) && (ha->device_type & 2048U) == 0U) && ((ha->device_type & 16384U) == 0U && (ha->device_type & 262144U) == 0U)) { ret = -22; goto exit_fcp_prio_cfg; } else { } oper = (bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; if ((unsigned long )ha->fcp_prio_cfg == (unsigned long )((struct qla_fcp_prio_cfg *)0) && oper != 3U) { ret = -22; goto exit_fcp_prio_cfg; } else { } switch (oper) { case 0U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { ha->flags.fcp_prio_enabled = 0U; (ha->fcp_prio_cfg)->attributes = (unsigned int )(ha->fcp_prio_cfg)->attributes & 254U; qla24xx_update_all_fcp_prio(vha); (bsg_job->reply)->result = 0U; } else { ret = -22; (bsg_job->reply)->result = 458752U; goto exit_fcp_prio_cfg; } goto ldv_43559; case 1U: ; if (*((unsigned long *)ha + 2UL) == 0UL) { if ((unsigned long )ha->fcp_prio_cfg != (unsigned long )((struct qla_fcp_prio_cfg *)0)) { ha->flags.fcp_prio_enabled = 1U; (ha->fcp_prio_cfg)->attributes = (uint8_t )((unsigned int )(ha->fcp_prio_cfg)->attributes | 1U); qla24xx_update_all_fcp_prio(vha); (bsg_job->reply)->result = 0U; } else { ret = -22; (bsg_job->reply)->result = 458752U; goto exit_fcp_prio_cfg; } } else { } goto ldv_43559; case 2U: len = bsg_job->reply_payload.payload_len; if (len == 0U || len > 32768U) { ret = -22; (bsg_job->reply)->result = 458752U; goto exit_fcp_prio_cfg; } else { } (bsg_job->reply)->result = 0U; tmp___0 = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void *)ha->fcp_prio_cfg, (size_t )len); (bsg_job->reply)->reply_payload_rcv_len = (uint32_t )tmp___0; goto ldv_43559; case 3U: len = bsg_job->request_payload.payload_len; if (len == 0U || len > 32768U) { (bsg_job->reply)->result = 458752U; ret = -22; goto exit_fcp_prio_cfg; } else { } if ((unsigned long )ha->fcp_prio_cfg == (unsigned long )((struct qla_fcp_prio_cfg *)0)) { tmp___1 = vmalloc(32768UL); ha->fcp_prio_cfg = (struct qla_fcp_prio_cfg *)tmp___1; if ((unsigned long )ha->fcp_prio_cfg == (unsigned long )((struct qla_fcp_prio_cfg *)0)) { ql_log(1U, vha, 28752, "Unable to allocate memory for fcp prio config data (%x).\n", 32768); (bsg_job->reply)->result = 458752U; ret = -12; goto exit_fcp_prio_cfg; } else { } } else { } memset((void *)ha->fcp_prio_cfg, 0, 32768UL); sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)ha->fcp_prio_cfg, 32768UL); tmp___2 = qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1); if (tmp___2 == 0) { (bsg_job->reply)->result = 458752U; ret = -22; vfree((void const *)ha->fcp_prio_cfg); ha->fcp_prio_cfg = (struct qla_fcp_prio_cfg *)0; goto exit_fcp_prio_cfg; } else { } ha->flags.fcp_prio_enabled = 0U; if ((int )(ha->fcp_prio_cfg)->attributes & 1) { ha->flags.fcp_prio_enabled = 1U; } else { } qla24xx_update_all_fcp_prio(vha); (bsg_job->reply)->result = 0U; goto ldv_43559; default: ret = -22; goto ldv_43559; } ldv_43559: ; exit_fcp_prio_cfg: ; if (ret == 0) { (*(bsg_job->job_done))(bsg_job); } else { } return (ret); } } static int qla2x00_process_els(struct fc_bsg_job *bsg_job ) { struct fc_rport *rport ; fc_port_t *fcport ; struct Scsi_Host *host ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; srb_t *sp ; char const *type ; int req_sg_cnt ; int rsp_sg_cnt ; int rval ; uint16_t nextlid ; void *tmp ; void *tmp___0 ; int tmp___1 ; { fcport = (fc_port_t *)0; rval = 262144; nextlid = 0U; if ((bsg_job->request)->msgcode == 1073741825U) { rport = bsg_job->rport; fcport = *((fc_port_t **)rport->dd_data); host = dev_to_shost(rport->dev.parent); tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; type = "FC_BSG_RPT_ELS"; } else { host = bsg_job->shost; tmp___0 = shost_priv(host); vha = (scsi_qla_host_t *)tmp___0; ha = vha->hw; type = "FC_BSG_HST_ELS_NOLOGIN"; } if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(1U, vha, 28677, "Host not online.\n"); rval = -5; goto done; } else { } if ((ha->device_type & 134217728U) == 0U) { ql_dbg(8388608U, vha, 28673, "ELS passthru not supported for ISP23xx based adapters.\n"); rval = -1; goto done; } else { } if (bsg_job->request_payload.sg_cnt > 1 || bsg_job->reply_payload.sg_cnt > 1) { ql_dbg(8388608U, vha, 28674, "Multiple SG\'s are not suppored for ELS requests, request_sg_cnt=%x reply_sg_cnt=%x.\n", bsg_job->request_payload.sg_cnt, bsg_job->reply_payload.sg_cnt); rval = -1; goto done; } else { } if ((bsg_job->request)->msgcode == 1073741825U) { tmp___1 = qla2x00_fabric_login(vha, fcport, & nextlid); if (tmp___1 != 0) { ql_dbg(8388608U, vha, 28675, "Failed to login port %06X for ELS passthru.\n", (int )fcport->d_id.b24); rval = -5; goto done; } else { } } else { fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { rval = -12; goto done; } else { } fcport->vha = vha; fcport->d_id.b.al_pa = (bsg_job->request)->rqst_data.h_els.port_id[0]; fcport->d_id.b.area = (bsg_job->request)->rqst_data.h_els.port_id[1]; fcport->d_id.b.domain = (bsg_job->request)->rqst_data.h_els.port_id[2]; fcport->loop_id = (unsigned int )fcport->d_id.b.al_pa == 253U ? 2045U : 2046U; } req_sg_cnt = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); if (req_sg_cnt == 0) { rval = -12; goto done_free_fcport; } else { } rsp_sg_cnt = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); if (rsp_sg_cnt == 0) { rval = -12; goto done_free_fcport; } else { } if (bsg_job->request_payload.sg_cnt != req_sg_cnt || bsg_job->reply_payload.sg_cnt != rsp_sg_cnt) { ql_log(1U, vha, 28680, "dma mapping resulted in different sg counts, request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); rval = -11; goto done_unmap_sg; } else { } sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { rval = -12; goto done_unmap_sg; } else { } sp->type = (bsg_job->request)->msgcode == 1073741825U ? 3U : 4U; sp->name = (bsg_job->request)->msgcode == 1073741825U ? (char *)"bsg_els_rpt" : (char *)"bsg_els_hst"; sp->u.bsg_job = bsg_job; sp->free = & qla2x00_bsg_sp_free; sp->done = & qla2x00_bsg_job_done; ql_dbg(8388608U, vha, 28682, "bsg rqst type: %s els type: %x - loop-id=%x portid=%-2x%02x%02x.\n", type, (int )(bsg_job->request)->rqst_data.h_els.command_code, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); rval = qla2x00_start_sp(sp); if (rval != 0) { ql_log(1U, vha, 28686, "qla2x00_start_sp failed = %d\n", rval); qla2x00_rel_sp(vha, sp); rval = -5; goto done_unmap_sg; } else { } return (rval); done_unmap_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); goto done_free_fcport; done_free_fcport: ; if ((bsg_job->request)->msgcode == 1073741825U) { kfree((void const *)fcport); } else { } done: ; return (rval); } } __inline uint16_t qla24xx_calc_ct_iocbs(uint16_t dsds ) { uint16_t iocbs ; { iocbs = 1U; if ((unsigned int )dsds > 2U) { iocbs = (int )((uint16_t )(((int )dsds + -2) / 5)) + (int )iocbs; if (((int )dsds + -2) % 5 != 0) { iocbs = (uint16_t )((int )iocbs + 1); } else { } } else { } return (iocbs); } } static int qla2x00_process_ct(struct fc_bsg_job *bsg_job ) { srb_t *sp ; struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; int req_sg_cnt ; int rsp_sg_cnt ; uint16_t loop_id ; struct fc_port *fcport ; char *type ; fc_port_t *tmp___0 ; uint16_t tmp___1 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 262144; type = (char *)"FC_BSG_HST_CT"; req_sg_cnt = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); if (req_sg_cnt == 0) { ql_log(1U, vha, 28687, "dma_map_sg return %d for request\n", req_sg_cnt); rval = -12; goto done; } else { } rsp_sg_cnt = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); if (rsp_sg_cnt == 0) { ql_log(1U, vha, 28688, "dma_map_sg return %d for reply\n", rsp_sg_cnt); rval = -12; goto done; } else { } if (bsg_job->request_payload.sg_cnt != req_sg_cnt || bsg_job->reply_payload.sg_cnt != rsp_sg_cnt) { ql_log(1U, vha, 28689, "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); rval = -11; goto done_unmap_sg; } else { } if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(1U, vha, 28690, "Host is not online.\n"); rval = -5; goto done_unmap_sg; } else { } loop_id = (uint16_t )((bsg_job->request)->rqst_data.h_ct.preamble_word1 >> 24); switch ((int )loop_id) { case 252: loop_id = 2044U; goto ldv_43601; case 250: loop_id = vha->mgmt_svr_loop_id; goto ldv_43601; default: ql_dbg(8388608U, vha, 28691, "Unknown loop id: %x.\n", (int )loop_id); rval = -22; goto done_unmap_sg; } ldv_43601: tmp___0 = qla2x00_alloc_fcport(vha, 208U); fcport = tmp___0; if ((unsigned long )fcport == (unsigned long )((struct fc_port *)0)) { ql_log(1U, vha, 28692, "Failed to allocate fcport.\n"); rval = -12; goto done_unmap_sg; } else { } fcport->vha = vha; fcport->d_id.b.al_pa = (bsg_job->request)->rqst_data.h_ct.port_id[0]; fcport->d_id.b.area = (bsg_job->request)->rqst_data.h_ct.port_id[1]; fcport->d_id.b.domain = (bsg_job->request)->rqst_data.h_ct.port_id[2]; fcport->loop_id = loop_id; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_log(1U, vha, 28693, "qla2x00_get_sp failed.\n"); rval = -12; goto done_free_fcport; } else { } sp->type = 5U; sp->name = (char *)"bsg_ct"; tmp___1 = qla24xx_calc_ct_iocbs((int )((uint16_t )req_sg_cnt) + (int )((uint16_t )rsp_sg_cnt)); sp->iocbs = (int )tmp___1; sp->u.bsg_job = bsg_job; sp->free = & qla2x00_bsg_sp_free; sp->done = & qla2x00_bsg_job_done; ql_dbg(8388608U, vha, 28694, "bsg rqst type: %s else type: %x - loop-id=%x portid=%02x%02x%02x.\n", type, (bsg_job->request)->rqst_data.h_ct.preamble_word2 >> 16, (int )fcport->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); rval = qla2x00_start_sp(sp); if (rval != 0) { ql_log(1U, vha, 28695, "qla2x00_start_sp failed=%d.\n", rval); qla2x00_rel_sp(vha, sp); rval = -5; goto done_free_fcport; } else { } return (rval); done_free_fcport: kfree((void const *)fcport); done_unmap_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); done: ; return (rval); } } __inline static int qla81xx_reset_loopback_mode(scsi_qla_host_t *vha , uint16_t *config , int wait , int wait2 ) { int ret ; int rval ; uint16_t new_config[4U] ; struct qla_hw_data *ha ; size_t __len ; void *__ret ; unsigned long tmp ; unsigned long tmp___0 ; { ret = 0; rval = 0; ha = vha->hw; if (((ha->device_type & 8192U) == 0U && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U) { goto done_reset_internal; } else { } memset((void *)(& new_config), 0, 8UL); if (((int )*config & 14) >> 1 == 2 || ((int )*config & 14) >> 1 == 4) { new_config[0] = (unsigned int )*config & 65521U; ql_dbg(8388608U, vha, 28863, "new_config[0]=%02x\n", (int )new_config[0] & 14); __len = 6UL; if (__len > 63UL) { __ret = __memcpy((void *)(& new_config) + 1U, (void const *)config + 1U, __len); } else { __ret = __builtin_memcpy((void *)(& new_config) + 1U, (void const *)config + 1U, __len); } ha->notify_dcbx_comp = wait; ha->notify_lb_portup_comp = wait2; ret = qla81xx_set_port_config(vha, (uint16_t *)(& new_config)); if (ret != 0) { ql_log(1U, vha, 28709, "Set port config failed.\n"); ha->notify_dcbx_comp = 0; ha->notify_lb_portup_comp = 0; rval = -22; goto done_reset_internal; } else { } if (wait != 0) { tmp = wait_for_completion_timeout(& ha->dcbx_comp, 5000UL); if (tmp == 0UL) { ql_dbg(8388608U, vha, 28710, "DCBX completion not received.\n"); ha->notify_dcbx_comp = 0; ha->notify_lb_portup_comp = 0; rval = -22; goto done_reset_internal; } else { ql_dbg(8388608U, vha, 28711, "DCBX completion received.\n"); } } else { ql_dbg(8388608U, vha, 28711, "DCBX completion received.\n"); } if (wait2 != 0) { tmp___0 = wait_for_completion_timeout(& ha->lb_portup_comp, 2500UL); if (tmp___0 == 0UL) { ql_dbg(8388608U, vha, 28869, "Port up completion not received.\n"); ha->notify_lb_portup_comp = 0; rval = -22; goto done_reset_internal; } else { ql_dbg(8388608U, vha, 28870, "Port up completion received.\n"); } } else { ql_dbg(8388608U, vha, 28870, "Port up completion received.\n"); } ha->notify_dcbx_comp = 0; ha->notify_lb_portup_comp = 0; } else { } done_reset_internal: ; return (rval); } } __inline static int qla81xx_set_loopback_mode(scsi_qla_host_t *vha , uint16_t *config , uint16_t *new_config , uint16_t mode ) { int ret ; int rval ; unsigned long rem_tmo ; unsigned long current_tmo ; struct qla_hw_data *ha ; size_t __len ; void *__ret ; { ret = 0; rval = 0; rem_tmo = 0UL; current_tmo = 0UL; ha = vha->hw; if (((ha->device_type & 8192U) == 0U && (ha->device_type & 65536U) == 0U) && (ha->device_type & 262144U) == 0U) { goto done_set_internal; } else { } if ((unsigned int )mode == 241U) { *new_config = (uint16_t )((unsigned int )*config | 4U); } else if ((unsigned int )mode == 242U) { *new_config = (uint16_t )((unsigned int )*config | 8U); } else { } ql_dbg(8388608U, vha, 28862, "new_config[0]=%02x\n", (int )*new_config & 14); __len = 6UL; if (__len > 63UL) { __ret = __memcpy((void *)new_config + 1U, (void const *)config + 1U, __len); } else { __ret = __builtin_memcpy((void *)new_config + 1U, (void const *)config + 1U, __len); } ha->notify_dcbx_comp = 1; ret = qla81xx_set_port_config(vha, new_config); if (ret != 0) { ql_log(1U, vha, 28705, "set port config failed.\n"); ha->notify_dcbx_comp = 0; rval = -22; goto done_set_internal; } else { } current_tmo = 5000UL; ldv_43635: rem_tmo = wait_for_completion_timeout(& ha->dcbx_comp, current_tmo); if (ha->idc_extend_tmo == 0U || rem_tmo != 0UL) { ha->idc_extend_tmo = 0U; goto ldv_43634; } else { } current_tmo = (unsigned long )(ha->idc_extend_tmo * 250U); ha->idc_extend_tmo = 0U; goto ldv_43635; ldv_43634: ; if (rem_tmo == 0UL) { ql_dbg(8388608U, vha, 28706, "DCBX completion not received.\n"); ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); if (ret != 0) { (*((ha->isp_ops)->fw_dump))(vha, 0); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } rval = -22; } else if (*((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(8388608U, vha, 28867, "Bad status in IDC Completion AEN\n"); rval = -22; ha->flags.idc_compl_status = 0U; } else { ql_dbg(8388608U, vha, 28707, "DCBX completion received.\n"); } ha->notify_dcbx_comp = 0; ha->idc_extend_tmo = 0U; done_set_internal: ; return (rval); } } static int qla2x00_process_loopback(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint8_t command_sent ; char *type ; struct msg_echo_lb elreq ; uint16_t response[32U] ; uint16_t config[4U] ; uint16_t new_config[4U] ; uint8_t *fw_sts_ptr ; uint8_t *req_data ; dma_addr_t req_data_dma ; uint32_t req_data_len ; uint8_t *rsp_data ; dma_addr_t rsp_data_dma ; uint32_t rsp_data_len ; int tmp___0 ; int tmp___1 ; void *tmp___2 ; void *tmp___3 ; int tmp___4 ; int tmp___5 ; int ret ; int tmp___6 ; size_t __len ; void *__ret ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; req_data = (uint8_t *)0U; rsp_data = (uint8_t *)0U; if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(1U, vha, 28697, "Host is not online.\n"); return (-5); } else { } tmp___0 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); elreq.req_sg_cnt = (uint16_t )tmp___0; if ((unsigned int )elreq.req_sg_cnt == 0U) { ql_log(1U, vha, 28698, "dma_map_sg returned %d for request.\n", (int )elreq.req_sg_cnt); return (-12); } else { } tmp___1 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); elreq.rsp_sg_cnt = (uint16_t )tmp___1; if ((unsigned int )elreq.rsp_sg_cnt == 0U) { ql_log(1U, vha, 28699, "dma_map_sg returned %d for reply.\n", (int )elreq.rsp_sg_cnt); rval = -12; goto done_unmap_req_sg; } else { } if ((int )elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt || (int )elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt) { ql_log(1U, vha, 28700, "dma mapping resulted in different sg counts, request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", bsg_job->request_payload.sg_cnt, (int )elreq.req_sg_cnt, bsg_job->reply_payload.sg_cnt, (int )elreq.rsp_sg_cnt); rval = -11; goto done_unmap_sg; } else { } rsp_data_len = bsg_job->request_payload.payload_len; req_data_len = rsp_data_len; tmp___2 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )req_data_len, & req_data_dma, 208U, (struct dma_attrs *)0); req_data = (uint8_t *)tmp___2; if ((unsigned long )req_data == (unsigned long )((uint8_t *)0U)) { ql_log(1U, vha, 28701, "dma alloc failed for req_data.\n"); rval = -12; goto done_unmap_sg; } else { } tmp___3 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )rsp_data_len, & rsp_data_dma, 208U, (struct dma_attrs *)0); rsp_data = (uint8_t *)tmp___3; if ((unsigned long )rsp_data == (unsigned long )((uint8_t *)0U)) { ql_log(1U, vha, 28676, "dma alloc failed for rsp_data.\n"); rval = -12; goto done_free_dma_req; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)req_data, (size_t )req_data_len); elreq.send_dma = req_data_dma; elreq.rcv_dma = rsp_data_dma; elreq.transfer_size = req_data_len; elreq.options = (uint16_t )(bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; elreq.iteration_count = (bsg_job->request)->rqst_data.h_vendor.vendor_cmd[2]; tmp___6 = atomic_read((atomic_t const *)(& vha->loop_state)); if ((tmp___6 == 5 && ((unsigned int )ha->current_topology == 8U || (((((ha->device_type & 8192U) != 0U || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) && *((uint32_t *)req_data) == 16U) && req_data_len == 252U))) && (unsigned int )elreq.options == 242U) { type = (char *)"FC_BSG_HST_VENDOR_ECHO_DIAG"; ql_dbg(8388608U, vha, 28702, "BSG request type: %s.\n", type); command_sent = 1U; rval = qla2x00_echo_test(vha, & elreq, (uint16_t *)(& response)); } else if (((ha->device_type & 8192U) != 0U || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U) { memset((void *)(& config), 0, 8UL); memset((void *)(& new_config), 0, 8UL); tmp___4 = qla81xx_get_port_config(vha, (uint16_t *)(& config)); if (tmp___4 != 0) { ql_log(1U, vha, 28703, "Get port config failed.\n"); rval = -1; goto done_free_dma_rsp; } else { } if (((int )config[0] & 14) != 0) { ql_dbg(8388608U, vha, 28868, "Loopback operation already in progress.\n"); rval = -11; goto done_free_dma_rsp; } else { } ql_dbg(8388608U, vha, 28864, "elreq.options=%04x\n", (int )elreq.options); if ((unsigned int )elreq.options == 242U) { if ((ha->device_type & 65536U) != 0U || (ha->device_type & 262144U) != 0U) { rval = qla81xx_set_loopback_mode(vha, (uint16_t *)(& config), (uint16_t *)(& new_config), (int )elreq.options); } else { rval = qla81xx_reset_loopback_mode(vha, (uint16_t *)(& config), 1, 0); } } else { rval = qla81xx_set_loopback_mode(vha, (uint16_t *)(& config), (uint16_t *)(& new_config), (int )elreq.options); } if (rval != 0) { rval = -1; goto done_free_dma_rsp; } else { } type = (char *)"FC_BSG_HST_VENDOR_LOOPBACK"; ql_dbg(8388608U, vha, 28712, "BSG request type: %s.\n", type); command_sent = 0U; rval = qla2x00_loopback_test(vha, & elreq, (uint16_t *)(& response)); if ((unsigned int )response[0] == 16389U && (unsigned int )response[1] == 23U) { ql_log(1U, vha, 28713, "MBX command error, Aborting ISP.\n"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); if ((ha->device_type & 8192U) != 0U) { tmp___5 = qla81xx_restart_mpi_firmware(vha); if (tmp___5 != 0) { ql_log(1U, vha, 28714, "MPI reset failed.\n"); } else { } } else { } rval = -5; goto done_free_dma_rsp; } else { } if ((unsigned int )new_config[0] != 0U) { ret = qla81xx_reset_loopback_mode(vha, (uint16_t *)(& new_config), 0, 1); if (ret != 0) { (*((ha->isp_ops)->fw_dump))(vha, 0); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } } else { } } else { type = (char *)"FC_BSG_HST_VENDOR_LOOPBACK"; ql_dbg(8388608U, vha, 28715, "BSG request type: %s.\n", type); command_sent = 0U; rval = qla2x00_loopback_test(vha, & elreq, (uint16_t *)(& response)); } if (rval != 0) { ql_log(1U, vha, 28716, "Vendor request %s failed.\n", type); rval = 0; (bsg_job->reply)->result = 458752U; (bsg_job->reply)->reply_payload_rcv_len = 0U; } else { ql_dbg(8388608U, vha, 28717, "Vendor request %s completed.\n", type); (bsg_job->reply)->result = 0U; sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void *)rsp_data, (size_t )rsp_data_len); } bsg_job->reply_len = 81U; fw_sts_ptr = (uint8_t *)(bsg_job->req)->sense + 16UL; __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)fw_sts_ptr, (void const *)(& response), __len); } else { __ret = __builtin_memcpy((void *)fw_sts_ptr, (void const *)(& response), __len); } fw_sts_ptr = fw_sts_ptr + 64UL; *fw_sts_ptr = command_sent; done_free_dma_rsp: dma_free_attrs(& (ha->pdev)->dev, (size_t )rsp_data_len, (void *)rsp_data, rsp_data_dma, (struct dma_attrs *)0); done_free_dma_req: dma_free_attrs(& (ha->pdev)->dev, (size_t )req_data_len, (void *)req_data, req_data_dma, (struct dma_attrs *)0); done_unmap_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); done_unmap_req_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); if (rval == 0) { (*(bsg_job->job_done))(bsg_job); } else { } return (rval); } } static int qla84xx_reset(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint32_t flag ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; if ((ha->device_type & 4096U) == 0U) { ql_dbg(8388608U, vha, 28719, "Not 84xx, exiting.\n"); return (-22); } else { } flag = (bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; rval = qla84xx_reset_chip(vha, flag == 4U); if (rval != 0) { ql_log(1U, vha, 28720, "Vendor request 84xx reset failed.\n"); rval = 458752; } else { ql_dbg(8388608U, vha, 28721, "Vendor request 84xx reset completed.\n"); (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); } return (rval); } } static int qla84xx_updatefw(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; struct verify_chip_entry_84xx *mn ; dma_addr_t mn_dma ; dma_addr_t fw_dma ; void *fw_buf ; int rval ; uint32_t sg_cnt ; uint32_t data_len ; uint16_t options ; uint32_t flag ; uint32_t fw_ver ; int tmp___0 ; void *tmp___1 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; mn = (struct verify_chip_entry_84xx *)0; fw_buf = (void *)0; rval = 0; if ((ha->device_type & 4096U) == 0U) { ql_dbg(8388608U, vha, 28722, "Not 84xx, exiting.\n"); return (-22); } else { } tmp___0 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); sg_cnt = (uint32_t )tmp___0; if (sg_cnt == 0U) { ql_log(1U, vha, 28723, "dma_map_sg returned %d for request.\n", sg_cnt); return (-12); } else { } if ((uint32_t )bsg_job->request_payload.sg_cnt != sg_cnt) { ql_log(1U, vha, 28724, "DMA mapping resulted in different sg counts, request_sg_cnt: %x dma_request_sg_cnt: %x.\n", bsg_job->request_payload.sg_cnt, sg_cnt); rval = -11; goto done_unmap_sg; } else { } data_len = bsg_job->request_payload.payload_len; fw_buf = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )data_len, & fw_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )fw_buf == (unsigned long )((void *)0)) { ql_log(1U, vha, 28725, "DMA alloc failed for fw_buf.\n"); rval = -12; goto done_unmap_sg; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, fw_buf, (size_t )data_len); tmp___1 = dma_pool_alloc(ha->s_dma_pool, 208U, & mn_dma); mn = (struct verify_chip_entry_84xx *)tmp___1; if ((unsigned long )mn == (unsigned long )((struct verify_chip_entry_84xx *)0)) { ql_log(1U, vha, 28726, "DMA alloc failed for fw buffer.\n"); rval = -12; goto done_free_fw_buf; } else { } flag = (bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; fw_ver = *((uint32_t *)fw_buf + 2UL); memset((void *)mn, 0, 64UL); mn->entry_type = 27U; mn->entry_count = 1U; options = 16386U; if (flag == 6U) { options = (uint16_t )((unsigned int )options | 8U); } else { } mn->options = options; mn->fw_ver = fw_ver; mn->fw_size = data_len; mn->fw_seq_size = data_len; mn->dseg_address[0] = (unsigned int )fw_dma; mn->dseg_address[1] = (unsigned int )(fw_dma >> 32ULL); mn->dseg_length = data_len; mn->data_seg_cnt = 1U; rval = qla2x00_issue_iocb_timeout(vha, (void *)mn, mn_dma, 0UL, 120U); if (rval != 0) { ql_log(1U, vha, 28727, "Vendor request 84xx updatefw failed.\n"); rval = 458752; } else { ql_dbg(8388608U, vha, 28728, "Vendor request 84xx updatefw completed.\n"); bsg_job->reply_len = 16U; (bsg_job->reply)->result = 0U; } dma_pool_free(ha->s_dma_pool, (void *)mn, mn_dma); done_free_fw_buf: dma_free_attrs(& (ha->pdev)->dev, (size_t )data_len, fw_buf, fw_dma, (struct dma_attrs *)0); done_unmap_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); if (rval == 0) { (*(bsg_job->job_done))(bsg_job); } else { } return (rval); } } static int qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; struct access_chip_84xx *mn ; dma_addr_t mn_dma ; dma_addr_t mgmt_dma ; void *mgmt_b ; int rval ; struct qla_bsg_a84_mgmt *ql84_mgmt ; uint32_t sg_cnt ; uint32_t data_len ; uint32_t dma_direction ; void *tmp___0 ; int tmp___1 ; int tmp___2 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; mn = (struct access_chip_84xx *)0; mgmt_b = (void *)0; rval = 0; data_len = 0U; dma_direction = 3U; if ((ha->device_type & 4096U) == 0U) { ql_log(1U, vha, 28730, "Not 84xx, exiting.\n"); return (-22); } else { } tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & mn_dma); mn = (struct access_chip_84xx *)tmp___0; if ((unsigned long )mn == (unsigned long )((struct access_chip_84xx *)0)) { ql_log(1U, vha, 28732, "DMA alloc failed for fw buffer.\n"); return (-12); } else { } memset((void *)mn, 0, 64UL); mn->entry_type = 43U; mn->entry_count = 1U; ql84_mgmt = (struct qla_bsg_a84_mgmt *)bsg_job->request + 20U; switch ((int )ql84_mgmt->mgmt.cmd) { case 0: ; case 3: tmp___1 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); sg_cnt = (uint32_t )tmp___1; if (sg_cnt == 0U) { ql_log(1U, vha, 28733, "dma_map_sg returned %d for reply.\n", sg_cnt); rval = -12; goto exit_mgmt; } else { } dma_direction = 2U; if ((uint32_t )bsg_job->reply_payload.sg_cnt != sg_cnt) { ql_log(1U, vha, 28734, "DMA mapping resulted in different sg counts, reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", bsg_job->reply_payload.sg_cnt, sg_cnt); rval = -11; goto done_unmap_sg; } else { } data_len = bsg_job->reply_payload.payload_len; mgmt_b = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )data_len, & mgmt_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )mgmt_b == (unsigned long )((void *)0)) { ql_log(1U, vha, 28735, "DMA alloc failed for mgmt_b.\n"); rval = -12; goto done_unmap_sg; } else { } if ((unsigned int )ql84_mgmt->mgmt.cmd == 0U) { mn->options = 0U; mn->parameter1 = ql84_mgmt->mgmt.mgmtp.u.mem.start_addr; } else if ((unsigned int )ql84_mgmt->mgmt.cmd == 3U) { mn->options = 3U; mn->parameter1 = ql84_mgmt->mgmt.mgmtp.u.info.type; mn->parameter2 = ql84_mgmt->mgmt.mgmtp.u.info.context; } else { } goto ldv_43709; case 1: tmp___2 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); sg_cnt = (uint32_t )tmp___2; if (sg_cnt == 0U) { ql_log(1U, vha, 28736, "dma_map_sg returned %d.\n", sg_cnt); rval = -12; goto exit_mgmt; } else { } dma_direction = 1U; if ((uint32_t )bsg_job->request_payload.sg_cnt != sg_cnt) { ql_log(1U, vha, 28737, "DMA mapping resulted in different sg counts, request_sg_cnt: %x dma_request_sg_cnt: %x.\n", bsg_job->request_payload.sg_cnt, sg_cnt); rval = -11; goto done_unmap_sg; } else { } data_len = bsg_job->request_payload.payload_len; mgmt_b = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )data_len, & mgmt_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )mgmt_b == (unsigned long )((void *)0)) { ql_log(1U, vha, 28738, "DMA alloc failed for mgmt_b.\n"); rval = -12; goto done_unmap_sg; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, mgmt_b, (size_t )data_len); mn->options = 1U; mn->parameter1 = ql84_mgmt->mgmt.mgmtp.u.mem.start_addr; goto ldv_43709; case 2: mn->options = 2U; mn->parameter1 = ql84_mgmt->mgmt.mgmtp.u.config.id; mn->parameter2 = ql84_mgmt->mgmt.mgmtp.u.config.param0; mn->parameter3 = ql84_mgmt->mgmt.mgmtp.u.config.param1; goto ldv_43709; default: rval = -5; goto exit_mgmt; } ldv_43709: ; if ((unsigned int )ql84_mgmt->mgmt.cmd != 2U) { mn->total_byte_cnt = ql84_mgmt->mgmt.len; mn->dseg_count = 1U; mn->dseg_address[0] = (unsigned int )mgmt_dma; mn->dseg_address[1] = (unsigned int )(mgmt_dma >> 32ULL); mn->dseg_length = ql84_mgmt->mgmt.len; } else { } rval = qla2x00_issue_iocb(vha, (void *)mn, mn_dma, 0UL); if (rval != 0) { ql_log(1U, vha, 28739, "Vendor request 84xx mgmt failed.\n"); rval = 458752; } else { ql_dbg(8388608U, vha, 28740, "Vendor request 84xx mgmt completed.\n"); bsg_job->reply_len = 16U; (bsg_job->reply)->result = 0U; if ((unsigned int )ql84_mgmt->mgmt.cmd == 0U || (unsigned int )ql84_mgmt->mgmt.cmd == 3U) { (bsg_job->reply)->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, mgmt_b, (size_t )data_len); } else { } } done_unmap_sg: ; if ((unsigned long )mgmt_b != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )data_len, mgmt_b, mgmt_dma, (struct dma_attrs *)0); } else { } if (dma_direction == 1U) { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); } else if (dma_direction == 2U) { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); } else { } exit_mgmt: dma_pool_free(ha->s_dma_pool, (void *)mn, mn_dma); if (rval == 0) { (*(bsg_job->job_done))(bsg_job); } else { } return (rval); } } static int qla24xx_iidma(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; int rval ; struct qla_port_param *port_param ; fc_port_t *fcport ; int found ; uint16_t mb[32U] ; uint8_t *rsp_ptr ; struct list_head const *__mptr ; int tmp___0 ; struct list_head const *__mptr___0 ; int tmp___1 ; size_t __len ; void *__ret ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; rval = 0; port_param = (struct qla_port_param *)0; fcport = (fc_port_t *)0; found = 0; rsp_ptr = (uint8_t *)0U; if (((vha->hw)->device_type & 67108864U) == 0U) { ql_log(2U, vha, 28742, "iiDMA not supported.\n"); return (-22); } else { } port_param = (struct qla_port_param *)bsg_job->request + 20U; if ((unsigned int )port_param->fc_scsi_addr.dest_type != 2U) { ql_log(1U, vha, 28744, "Invalid destination type.\n"); return (-22); } else { } __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_43731; ldv_43730: ; if ((unsigned int )fcport->port_type != 5U) { goto ldv_43728; } else { } tmp___0 = memcmp((void const *)(& port_param->fc_scsi_addr.dest_addr.wwpn), (void const *)(& fcport->port_name), 8UL); if (tmp___0 != 0) { goto ldv_43728; } else { } found = 1; goto ldv_43729; ldv_43728: __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_43731: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_43730; } else { } ldv_43729: ; if (found == 0) { ql_log(1U, vha, 28745, "Failed to find port.\n"); return (-22); } else { } tmp___1 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___1 != 4) { ql_log(1U, vha, 28746, "Port is not online.\n"); return (-22); } else { } if ((fcport->flags & 2U) != 0U) { ql_log(1U, vha, 28747, "Remote port not logged in flags = 0x%x.\n", fcport->flags); return (-22); } else { } if ((unsigned int )port_param->mode != 0U) { rval = qla2x00_set_idma_speed(vha, (int )fcport->loop_id, (int )port_param->speed, (uint16_t *)(& mb)); } else { rval = qla2x00_get_idma_speed(vha, (int )fcport->loop_id, & port_param->speed, (uint16_t *)(& mb)); } if (rval != 0) { ql_log(1U, vha, 28748, "iIDMA cmd failed for %8phN -- %04x %x %04x %04x.\n", (uint8_t *)(& fcport->port_name), rval, (int )fcport->fp_speed, (int )mb[0], (int )mb[1]); rval = 458752; } else { if ((unsigned int )port_param->mode == 0U) { bsg_job->reply_len = 36U; rsp_ptr = (uint8_t *)bsg_job->reply + 16UL; __len = 20UL; if (__len > 63UL) { __ret = __memcpy((void *)rsp_ptr, (void const *)port_param, __len); } else { __ret = __builtin_memcpy((void *)rsp_ptr, (void const *)port_param, __len); } } else { } (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); } return (rval); } } static int qla2x00_optrom_setup(struct fc_bsg_job *bsg_job , scsi_qla_host_t *vha , uint8_t is_update ) { uint32_t start ; int valid ; struct qla_hw_data *ha ; int tmp ; long tmp___0 ; void *tmp___1 ; { start = 0U; valid = 0; ha = vha->hw; tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (-22); } else { } start = (bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; if (ha->optrom_size < start) { ql_log(1U, vha, 28757, "start %d > optrom_size %d.\n", start, ha->optrom_size); return (-22); } else { } if (ha->optrom_state != 0) { ql_log(2U, vha, 28758, "optrom_state %d.\n", ha->optrom_state); return (-16); } else { } ha->optrom_region_start = start; ql_dbg(8388608U, vha, 28759, "is_update=%d.\n", (int )is_update); if ((unsigned int )is_update != 0U) { if (ha->optrom_size == 131072U && start == 0U) { valid = 1; } else if (ha->flt_region_boot * 4U == start || ha->flt_region_fw * 4U == start) { valid = 1; } else if (((((((ha->device_type & 128U) != 0U || (ha->device_type & 256U) != 0U) || ((ha->device_type & 512U) != 0U || (ha->device_type & 1024U) != 0U)) || (ha->device_type & 4096U) != 0U) || (ha->device_type & 2048U) != 0U) || ((((ha->device_type & 8192U) != 0U || (ha->device_type & 16384U) != 0U) || (ha->device_type & 65536U) != 0U) || (ha->device_type & 262144U) != 0U)) || (ha->device_type & 32768U) != 0U) { valid = 1; } else { } if (valid == 0) { ql_log(1U, vha, 28760, "Invalid start region 0x%x/0x%x.\n", start, bsg_job->request_payload.payload_len); return (-22); } else { } ha->optrom_region_size = bsg_job->request_payload.payload_len + start > ha->optrom_size ? ha->optrom_size - start : bsg_job->request_payload.payload_len; ha->optrom_state = 2; } else { ha->optrom_region_size = bsg_job->reply_payload.payload_len + start > ha->optrom_size ? ha->optrom_size - start : bsg_job->reply_payload.payload_len; ha->optrom_state = 1; } tmp___1 = vmalloc((unsigned long )ha->optrom_region_size); ha->optrom_buffer = (char *)tmp___1; if ((unsigned long )ha->optrom_buffer == (unsigned long )((char *)0)) { ql_log(1U, vha, 28761, "Read: Unable to allocate memory for optrom retrieval (%x)\n", ha->optrom_region_size); ha->optrom_state = 0; return (-12); } else { } memset((void *)ha->optrom_buffer, 0, (size_t )ha->optrom_region_size); return (0); } } static int qla2x00_read_optrom(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; if (*((unsigned long *)ha + 2UL) != 0UL) { return (-16); } else { } rval = qla2x00_optrom_setup(bsg_job, vha, 0); if (rval != 0) { return (rval); } else { } (*((ha->isp_ops)->read_optrom))(vha, (uint8_t *)ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void *)ha->optrom_buffer, (size_t )ha->optrom_region_size); (bsg_job->reply)->reply_payload_rcv_len = ha->optrom_region_size; (bsg_job->reply)->result = 0U; vfree((void const *)ha->optrom_buffer); ha->optrom_buffer = (char *)0; ha->optrom_state = 0; (*(bsg_job->job_done))(bsg_job); return (rval); } } static int qla2x00_update_optrom(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; rval = qla2x00_optrom_setup(bsg_job, vha, 1); if (rval != 0) { return (rval); } else { } ha->flags.isp82xx_no_md_cap = 1U; sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)ha->optrom_buffer, (size_t )ha->optrom_region_size); (*((ha->isp_ops)->write_optrom))(vha, (uint8_t *)ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); (bsg_job->reply)->result = 0U; vfree((void const *)ha->optrom_buffer); ha->optrom_buffer = (char *)0; ha->optrom_state = 0; (*(bsg_job->job_done))(bsg_job); return (rval); } } static int qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint8_t bsg[256U] ; struct qla_image_version_list *list ; struct qla_image_version *image ; uint32_t count ; dma_addr_t sfp_dma ; void *sfp ; void *tmp___0 ; size_t __len ; void *__ret ; uint32_t tmp___1 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; list = (struct qla_image_version_list *)(& bsg); tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & sfp_dma); sfp = tmp___0; if ((unsigned long )sfp == (unsigned long )((void *)0)) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 17U; goto done; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)list, 256UL); image = (struct qla_image_version *)(& list->version); count = list->count; goto ldv_43776; ldv_43775: __len = 36UL; if (__len > 63UL) { __ret = __memcpy(sfp, (void const *)(& image->field_info), __len); } else { __ret = __builtin_memcpy(sfp, (void const *)(& image->field_info), __len); } rval = qla2x00_write_sfp(vha, sfp_dma, (uint8_t *)sfp, (int )image->field_address.device, (int )image->field_address.offset, 36, (int )image->field_address.option); if (rval != 0) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 11U; goto dealloc; } else { } image = image + 1; ldv_43776: tmp___1 = count; count = count - 1U; if (tmp___1 != 0U) { goto ldv_43775; } else { } (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 0U; dealloc: dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); done: bsg_job->reply_len = 16U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qla2x00_read_fru_status(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint8_t bsg[256U] ; struct qla_status_reg *sr ; dma_addr_t sfp_dma ; uint8_t *sfp ; void *tmp___0 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; sr = (struct qla_status_reg *)(& bsg); tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & sfp_dma); sfp = (uint8_t *)tmp___0; if ((unsigned long )sfp == (unsigned long )((uint8_t *)0U)) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 17U; goto done; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)sr, 14UL); rval = qla2x00_read_sfp(vha, sfp_dma, sfp, (int )sr->field_address.device, (int )sr->field_address.offset, 1, (int )sr->field_address.option); sr->status_reg = *sfp; if (rval != 0) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 11U; goto dealloc; } else { } sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void *)sr, 14UL); (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 0U; dealloc: dma_pool_free(ha->s_dma_pool, (void *)sfp, sfp_dma); done: bsg_job->reply_len = 16U; (bsg_job->reply)->reply_payload_rcv_len = 14U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qla2x00_write_fru_status(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint8_t bsg[256U] ; struct qla_status_reg *sr ; dma_addr_t sfp_dma ; uint8_t *sfp ; void *tmp___0 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; sr = (struct qla_status_reg *)(& bsg); tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & sfp_dma); sfp = (uint8_t *)tmp___0; if ((unsigned long )sfp == (unsigned long )((uint8_t *)0U)) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 17U; goto done; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)sr, 14UL); *sfp = sr->status_reg; rval = qla2x00_write_sfp(vha, sfp_dma, sfp, (int )sr->field_address.device, (int )sr->field_address.offset, 1, (int )sr->field_address.option); if (rval != 0) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 11U; goto dealloc; } else { } (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 0U; dealloc: dma_pool_free(ha->s_dma_pool, (void *)sfp, sfp_dma); done: bsg_job->reply_len = 16U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qla2x00_write_i2c(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint8_t bsg[256U] ; struct qla_i2c_access *i2c ; dma_addr_t sfp_dma ; uint8_t *sfp ; void *tmp___0 ; size_t __len ; void *__ret ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; i2c = (struct qla_i2c_access *)(& bsg); tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & sfp_dma); sfp = (uint8_t *)tmp___0; if ((unsigned long )sfp == (unsigned long )((uint8_t *)0U)) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 17U; goto done; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)i2c, 72UL); __len = (size_t )i2c->length; __ret = __builtin_memcpy((void *)sfp, (void const *)(& i2c->buffer), __len); rval = qla2x00_write_sfp(vha, sfp_dma, sfp, (int )i2c->device, (int )i2c->offset, (int )i2c->length, (int )i2c->option); if (rval != 0) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 11U; goto dealloc; } else { } (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 0U; dealloc: dma_pool_free(ha->s_dma_pool, (void *)sfp, sfp_dma); done: bsg_job->reply_len = 16U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qla2x00_read_i2c(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; uint8_t bsg[256U] ; struct qla_i2c_access *i2c ; dma_addr_t sfp_dma ; uint8_t *sfp ; void *tmp___0 ; size_t __len ; void *__ret ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0; i2c = (struct qla_i2c_access *)(& bsg); tmp___0 = dma_pool_alloc(ha->s_dma_pool, 208U, & sfp_dma); sfp = (uint8_t *)tmp___0; if ((unsigned long )sfp == (unsigned long )((uint8_t *)0U)) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 17U; goto done; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)i2c, 72UL); rval = qla2x00_read_sfp(vha, sfp_dma, sfp, (int )i2c->device, (int )i2c->offset, (int )i2c->length, (int )i2c->option); if (rval != 0) { (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 11U; goto dealloc; } else { } __len = (size_t )i2c->length; __ret = __builtin_memcpy((void *)(& i2c->buffer), (void const *)sfp, __len); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void *)i2c, 72UL); (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = 0U; dealloc: dma_pool_free(ha->s_dma_pool, (void *)sfp, sfp_dma); done: bsg_job->reply_len = 16U; (bsg_job->reply)->reply_payload_rcv_len = 72U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; uint16_t thread_id ; uint32_t rval ; uint16_t req_sg_cnt ; uint16_t rsp_sg_cnt ; uint16_t nextlid ; uint32_t tot_dsds ; srb_t *sp ; uint32_t req_data_len ; uint32_t rsp_data_len ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 0U; req_sg_cnt = 0U; rsp_sg_cnt = 0U; nextlid = 0U; sp = (srb_t *)0; req_data_len = 0U; rsp_data_len = 0U; if ((ha->device_type & 2048U) == 0U && (ha->device_type & 32768U) == 0U) { ql_log(1U, vha, 28832, "This adapter is not supported\n"); rval = 27U; goto done; } else { } tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 != 0) { rval = 2U; goto done; } else { tmp___1 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { rval = 2U; goto done; } else { tmp___2 = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 != 0) { rval = 2U; goto done; } else { } } } if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(1U, vha, 28833, "Host is not online\n"); rval = 22U; goto done; } else { } if ((vha->device_flags & 2U) != 0U) { ql_log(1U, vha, 28834, "Cable is unplugged...\n"); rval = 28U; goto done; } else { } if ((unsigned int )ha->current_topology != 8U) { ql_log(1U, vha, 28835, "Host is not connected to the switch\n"); rval = 28U; goto done; } else { } if ((unsigned int )ha->operating_mode != 1U) { ql_log(1U, vha, 28836, "Host is operating mode is not P2p\n"); rval = 28U; goto done; } else { } thread_id = (uint16_t )(bsg_job->request)->rqst_data.h_vendor.vendor_cmd[1]; mutex_lock_nested(& ha->selflogin_lock, 0U); if ((unsigned int )vha->self_login_loop_id == 0U) { vha->bidir_fcport.vha = vha; vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; vha->bidir_fcport.loop_id = vha->loop_id; tmp___3 = qla2x00_fabric_login(vha, & vha->bidir_fcport, & nextlid); if (tmp___3 != 0) { ql_log(1U, vha, 28839, "Failed to login port %06X for bidirectional IOCB\n", (int )vha->bidir_fcport.d_id.b24); mutex_unlock(& ha->selflogin_lock); rval = 11U; goto done; } else { } vha->self_login_loop_id = (unsigned int )nextlid + 65535U; } else { } mutex_unlock(& ha->selflogin_lock); vha->bidir_fcport.loop_id = vha->self_login_loop_id; tmp___4 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); req_sg_cnt = (uint16_t )tmp___4; if ((unsigned int )req_sg_cnt == 0U) { rval = 17U; goto done; } else { } tmp___5 = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); rsp_sg_cnt = (uint16_t )tmp___5; if ((unsigned int )rsp_sg_cnt == 0U) { rval = 17U; goto done_unmap_req_sg; } else { } if ((int )req_sg_cnt != bsg_job->request_payload.sg_cnt || (int )rsp_sg_cnt != bsg_job->reply_payload.sg_cnt) { ql_dbg(8388608U, vha, 28841, "Dma mapping resulted in different sg counts [request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", bsg_job->request_payload.sg_cnt, (int )req_sg_cnt, bsg_job->reply_payload.sg_cnt, (int )rsp_sg_cnt); rval = 17U; goto done_unmap_sg; } else { } if (req_data_len != rsp_data_len) { rval = 2U; ql_log(1U, vha, 28842, "req_data_len != rsp_data_len\n"); goto done_unmap_sg; } else { } req_data_len = bsg_job->request_payload.payload_len; rsp_data_len = bsg_job->reply_payload.payload_len; sp = qla2x00_get_sp(vha, & vha->bidir_fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_dbg(8388608U, vha, 28844, "Alloc SRB structure failed\n"); rval = 17U; goto done_unmap_sg; } else { } sp->u.bsg_job = bsg_job; sp->free = & qla2x00_bsg_sp_free; sp->type = 9U; sp->done = & qla2x00_bsg_job_done; tot_dsds = (uint32_t )((int )rsp_sg_cnt + (int )req_sg_cnt); tmp___6 = qla2x00_start_bidir(sp, vha, tot_dsds); rval = (uint32_t )tmp___6; if (rval != 0U) { goto done_free_srb; } else { } return ((int )rval); done_free_srb: mempool_free((void *)sp, ha->srb_mempool); done_unmap_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); done_unmap_req_sg: dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); done: (bsg_job->reply)->reply_data.vendor_reply.vendor_rsp[0] = rval; bsg_job->reply_len = 16U; (bsg_job->reply)->reply_payload_rcv_len = 0U; (bsg_job->reply)->result = 0U; (*(bsg_job->job_done))(bsg_job); return (0); } } static int qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job ) { struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; int rval ; struct qla_mt_iocb_rqst_fx00 *piocb_rqst ; srb_t *sp ; int req_sg_cnt ; int rsp_sg_cnt ; struct fc_port *fcport ; char *type ; fc_port_t *tmp___0 ; uint16_t tmp___1 ; { host = bsg_job->shost; tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; rval = 262144; req_sg_cnt = 0; rsp_sg_cnt = 0; type = (char *)"FC_BSG_HST_FX_MGMT"; piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)(& (bsg_job->request)->rqst_data.h_vendor.vendor_cmd) + 1U; ql_dump_buffer(8421376U, vha, 28879, (uint8_t *)piocb_rqst, 32U); if (*((unsigned long *)vha + 19UL) == 0UL) { ql_log(1U, vha, 28880, "Host is not online.\n"); rval = -5; goto done; } else { } if ((int )piocb_rqst->flags & 1) { req_sg_cnt = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); if (req_sg_cnt == 0) { ql_log(1U, vha, 28871, "dma_map_sg return %d for request\n", req_sg_cnt); rval = -12; goto done; } else { } } else { } if (((int )piocb_rqst->flags & 2) != 0) { rsp_sg_cnt = dma_map_sg_attrs(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); if (rsp_sg_cnt == 0) { ql_log(1U, vha, 28872, "dma_map_sg return %d for reply\n", rsp_sg_cnt); rval = -12; goto done_unmap_req_sg; } else { } } else { } ql_dbg(8388608U, vha, 28873, "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); tmp___0 = qla2x00_alloc_fcport(vha, 208U); fcport = tmp___0; if ((unsigned long )fcport == (unsigned long )((struct fc_port *)0)) { ql_log(1U, vha, 28874, "Failed to allocate fcport.\n"); rval = -12; goto done_unmap_rsp_sg; } else { } sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_log(1U, vha, 28875, "qla2x00_get_sp failed.\n"); rval = -12; goto done_free_fcport; } else { } fcport->vha = vha; fcport->loop_id = (uint16_t )piocb_rqst->dataword; sp->type = 11U; sp->name = (char *)"bsg_fx_mgmt"; tmp___1 = qla24xx_calc_ct_iocbs((int )((uint16_t )req_sg_cnt) + (int )((uint16_t )rsp_sg_cnt)); sp->iocbs = (int )tmp___1; sp->u.bsg_job = bsg_job; sp->free = & qla2x00_bsg_sp_free; sp->done = & qla2x00_bsg_job_done; ql_dbg(8388608U, vha, 28876, "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", type, (int )piocb_rqst->func_type, (int )fcport->loop_id); rval = qla2x00_start_sp(sp); if (rval != 0) { ql_log(1U, vha, 28877, "qla2x00_start_sp failed=%d.\n", rval); mempool_free((void *)sp, ha->srb_mempool); rval = -5; goto done_free_fcport; } else { } return (rval); done_free_fcport: kfree((void const *)fcport); done_unmap_rsp_sg: ; if (((int )piocb_rqst->flags & 2) != 0) { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 2, (struct dma_attrs *)0); } else { } done_unmap_req_sg: ; if ((int )piocb_rqst->flags & 1) { dma_unmap_sg_attrs___0(& (ha->pdev)->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 1, (struct dma_attrs *)0); } else { } done: ; return (rval); } } static int qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; { switch ((bsg_job->request)->rqst_data.h_vendor.vendor_cmd[0]) { case 1U: tmp = qla2x00_process_loopback(bsg_job); return (tmp); case 2U: tmp___0 = qla84xx_reset(bsg_job); return (tmp___0); case 3U: tmp___1 = qla84xx_updatefw(bsg_job); return (tmp___1); case 4U: tmp___2 = qla84xx_mgmt_cmd(bsg_job); return (tmp___2); case 5U: tmp___3 = qla24xx_iidma(bsg_job); return (tmp___3); case 6U: tmp___4 = qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); return (tmp___4); case 7U: tmp___5 = qla2x00_read_optrom(bsg_job); return (tmp___5); case 8U: tmp___6 = qla2x00_update_optrom(bsg_job); return (tmp___6); case 11U: tmp___7 = qla2x00_update_fru_versions(bsg_job); return (tmp___7); case 12U: tmp___8 = qla2x00_read_fru_status(bsg_job); return (tmp___8); case 13U: tmp___9 = qla2x00_write_fru_status(bsg_job); return (tmp___9); case 16U: tmp___10 = qla2x00_write_i2c(bsg_job); return (tmp___10); case 17U: tmp___11 = qla2x00_read_i2c(bsg_job); return (tmp___11); case 10U: tmp___12 = qla24xx_process_bidir_cmd(bsg_job); return (tmp___12); case 18U: tmp___13 = qlafx00_mgmt_cmd(bsg_job); return (tmp___13); default: ; return (-38); } } } int qla24xx_bsg_request(struct fc_bsg_job *bsg_job ) { int ret ; struct fc_rport *rport ; fc_port_t *fcport ; struct Scsi_Host *host ; scsi_qla_host_t *vha ; void *tmp ; void *tmp___0 ; int tmp___1 ; { ret = -22; fcport = (fc_port_t *)0; (bsg_job->reply)->reply_payload_rcv_len = 0U; if ((bsg_job->request)->msgcode == 1073741825U) { rport = bsg_job->rport; fcport = *((fc_port_t **)rport->dd_data); host = dev_to_shost(rport->dev.parent); tmp = shost_priv(host); vha = (scsi_qla_host_t *)tmp; } else { host = bsg_job->shost; tmp___0 = shost_priv(host); vha = (scsi_qla_host_t *)tmp___0; } tmp___1 = qla2x00_reset_active(vha); if (tmp___1 != 0) { ql_dbg(8388608U, vha, 28831, "BSG: ISP abort active/needed -- cmd=%d.\n", (bsg_job->request)->msgcode); return (-16); } else { } ql_dbg(8388608U, vha, 28672, "Entered %s msgcode=0x%x.\n", "qla24xx_bsg_request", (bsg_job->request)->msgcode); switch ((bsg_job->request)->msgcode) { case 1073741825U: ; case 2147483651U: ret = qla2x00_process_els(bsg_job); goto ldv_43902; case 2147483652U: ret = qla2x00_process_ct(bsg_job); goto ldv_43902; case 2147483903U: ret = qla2x00_process_vendor_specific(bsg_job); goto ldv_43902; case 2147483649U: ; case 2147483650U: ; case 1073741826U: ; default: ql_log(1U, vha, 28762, "Unsupported BSG request.\n"); goto ldv_43902; } ldv_43902: ; return (ret); } } int qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job ) { scsi_qla_host_t *vha ; void *tmp ; struct qla_hw_data *ha ; srb_t *sp ; int cnt ; int que ; unsigned long flags ; struct req_que *req ; raw_spinlock_t *tmp___0 ; int tmp___1 ; raw_spinlock_t *tmp___2 ; { tmp = shost_priv(bsg_job->shost); vha = (scsi_qla_host_t *)tmp; ha = vha->hw; tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); que = 0; goto ldv_43931; ldv_43930: req = *(ha->req_q_map + (unsigned long )que); if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { goto ldv_43922; } else { } cnt = 1; goto ldv_43928; ldv_43927: sp = *(req->outstanding_cmds + (unsigned long )cnt); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { if ((((unsigned int )sp->type == 5U || (unsigned int )sp->type == 4U) || (unsigned int )sp->type == 11U) && (unsigned long )sp->u.bsg_job == (unsigned long )bsg_job) { *(req->outstanding_cmds + (unsigned long )cnt) = (srb_t *)0; spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___1 = (*((ha->isp_ops)->abort_command))(sp); if (tmp___1 != 0) { ql_log(1U, vha, 28809, "mbx abort_command failed.\n"); (bsg_job->reply)->result = 4294967291U; (bsg_job->req)->errors = -5; } else { ql_dbg(8388608U, vha, 28810, "mbx abort_command success.\n"); (bsg_job->reply)->result = 0U; (bsg_job->req)->errors = 0; } tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); goto done; } else { } } else { } cnt = cnt + 1; ldv_43928: ; if ((int )req->num_outstanding_cmds > cnt) { goto ldv_43927; } else { } ldv_43922: que = que + 1; ldv_43931: ; if ((int )ha->max_req_queues > que) { goto ldv_43930; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); ql_log(2U, vha, 28811, "SRB not found to abort.\n"); (bsg_job->reply)->result = 4294967290U; (bsg_job->req)->errors = -6; return (0); done: spin_unlock_irqrestore(& ha->hardware_lock, flags); (*(sp->free))((void *)vha, (void *)sp); return (0); } } void choose_timer_13(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_13 = 2; return; } } void activate_pending_timer_13(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_13 == (unsigned long )timer) { if (ldv_timer_state_13 == 2 || pending_flag != 0) { ldv_timer_list_13 = timer; ldv_timer_list_13->data = data; ldv_timer_state_13 = 1; } else { } return; } else { } reg_timer_13(timer); ldv_timer_list_13->data = data; return; } } void disable_suitable_timer_13(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_13) { ldv_timer_state_13 = 0; return; } else { } return; } } int reg_timer_13(struct timer_list *timer ) { { ldv_timer_list_13 = timer; ldv_timer_state_13 = 1; return (0); } } int ldv_del_timer_65(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_66(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } extern int __printk_ratelimit(char const * ) ; extern void __rwlock_init(rwlock_t * , char const * , struct lock_class_key * ) ; void ldv_write_lock_irqsave(rwlock_t *lock ) ; void ldv_write_unlock_irqrestore(rwlock_t *lock ) ; void ldv_read_lock(rwlock_t *lock ) ; void ldv_read_unlock(rwlock_t *lock ) ; int ldv_del_timer_69(struct timer_list *ldv_func_arg1 ) ; __inline static unsigned char readb(void const volatile *addr ) { unsigned char ret ; { __asm__ volatile ("movb %1,%0": "=q" (ret): "m" (*((unsigned char volatile *)addr)): "memory"); return (ret); } } __inline static unsigned long readq(void const volatile *addr ) { unsigned long ret ; { __asm__ volatile ("movq %1,%0": "=r" (ret): "m" (*((unsigned long volatile *)addr)): "memory"); return (ret); } } __inline static void writeq(unsigned long val , void volatile *addr ) { { __asm__ volatile ("movq %0,%1": : "r" (val), "m" (*((unsigned long volatile *)addr)): "memory"); return; } } void activate_pending_timer_14(struct timer_list *timer , unsigned long data , int pending_flag ) ; int reg_timer_14(struct timer_list *timer ) ; void disable_suitable_timer_14(struct timer_list *timer ) ; void choose_timer_14(struct timer_list *timer ) ; extern int ___ratelimit(struct ratelimit_state * , char const * ) ; extern int dev_err(struct device const * , char const * , ...) ; extern int pci_bus_read_config_dword(struct pci_bus * , unsigned int , int , u32 * ) ; __inline static int pci_read_config_dword(struct pci_dev const *dev , int where , u32 *val ) { int tmp ; { tmp = pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); return (tmp); } } extern int pcie_capability_read_word(struct pci_dev * , int , u16 * ) ; extern int pci_set_mwi(struct pci_dev * ) ; extern int pci_request_regions(struct pci_dev * , char const * ) ; extern long schedule_timeout(long ) ; int ldv_scsi_add_host_with_dma_70(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; static int const MD_MIU_TEST_AGT_RDDATA[4U] = { 1090519208, 1090519212, 1090519224, 1090519228}; int qla82xx_pci_mem_read_2M(struct qla_hw_data *ha , u64 off , void *data , int size ) ; int qla82xx_pci_region_offset(struct pci_dev *pdev , int region ) ; uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *vha , uint32_t curr_state ) ; char *qdev_state(uint32_t dev_state ) ; int qla82xx_md_alloc(scsi_qla_host_t *vha ) ; int qla82xx_md_collect(scsi_qla_host_t *vha ) ; int qla82xx_validate_template_chksum(scsi_qla_host_t *vha ) ; void qla82xx_mbx_completion(scsi_qla_host_t *vha , uint16_t mb0 ) ; int qla8044_check_fw_alive(struct scsi_qla_host *vha ) ; extern unsigned long msleep_interruptible(unsigned int ) ; static unsigned long crb_addr_xform[60U] ; static int qla82xx_crb_table_initialized ; static void qla82xx_crb_addr_transform_setup(void) { { crb_addr_xform[49] = 1078984704UL; crb_addr_xform[40] = 1097859072UL; crb_addr_xform[5] = 218103808UL; crb_addr_xform[11] = 238026752UL; crb_addr_xform[10] = 236978176UL; crb_addr_xform[9] = 235929600UL; crb_addr_xform[8] = 234881024UL; crb_addr_xform[16] = 1883242496UL; crb_addr_xform[15] = 1882193920UL; crb_addr_xform[14] = 1881145344UL; crb_addr_xform[13] = 1880096768UL; crb_addr_xform[48] = 1894776832UL; crb_addr_xform[47] = 148897792UL; crb_addr_xform[46] = 147849216UL; crb_addr_xform[45] = 1891631104UL; crb_addr_xform[44] = 1890582528UL; crb_addr_xform[43] = 1889533952UL; crb_addr_xform[42] = 143654912UL; crb_addr_xform[53] = 142606336UL; crb_addr_xform[51] = 1108344832UL; crb_addr_xform[29] = 1090519040UL; crb_addr_xform[7] = 241172480UL; crb_addr_xform[12] = 1879048192UL; crb_addr_xform[22] = 876609536UL; crb_addr_xform[21] = 877658112UL; crb_addr_xform[20] = 875560960UL; crb_addr_xform[19] = 874512384UL; crb_addr_xform[18] = 873463808UL; crb_addr_xform[17] = 872415232UL; crb_addr_xform[28] = 1010827264UL; crb_addr_xform[27] = 1011875840UL; crb_addr_xform[26] = 1009778688UL; crb_addr_xform[25] = 1008730112UL; crb_addr_xform[24] = 1007681536UL; crb_addr_xform[23] = 1006632960UL; crb_addr_xform[1] = 1999634432UL; crb_addr_xform[0] = 698351616UL; crb_addr_xform[6] = 454033408UL; crb_addr_xform[50] = 1107296256UL; crb_addr_xform[31] = 219152384UL; crb_addr_xform[2] = 693108736UL; crb_addr_xform[3] = 709885952UL; crb_addr_xform[37] = 209715200UL; crb_addr_xform[36] = 208666624UL; crb_addr_xform[35] = 207618048UL; crb_addr_xform[34] = 1096810496UL; crb_addr_xform[39] = 1972371456UL; crb_addr_xform[38] = 1971322880UL; crb_addr_xform[58] = 1904214016UL; crb_addr_xform[56] = 1080033280UL; crb_addr_xform[59] = 428867584UL; qla82xx_crb_table_initialized = 1; return; } } static struct crb_128M_2M_block_map crb_128M_2M_map[64U] = { {{{0U, 0U, 0U, 0U}}}, {{{1U, 1048576U, 1056768U, 1179648U}, {1U, 1114112U, 1179648U, 1245184U}, {1U, 1179648U, 1187840U, 1196032U}, {1U, 1245184U, 1253376U, 1204224U}, {1U, 1310720U, 1318912U, 1212416U}, {1U, 1376256U, 1384448U, 1220608U}, {1U, 1441792U, 1507328U, 1114112U}, {1U, 1507328U, 1515520U, 1236992U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 1966080U, 1968128U, 1187840U}, {0U, 0U, 0U, 0U}}}, {{{1U, 2097152U, 2162688U, 1572864U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 4194304U, 4198400U, 1478656U}}}, {{{1U, 5242880U, 5308416U, 1310720U}}}, {{{1U, 6291456U, 6356992U, 1835008U}}}, {{{1U, 7340032U, 7356416U, 1802240U}}}, {{{1U, 8388608U, 8396800U, 1507328U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 9371648U, 9379840U, 1515520U}}}, {{{1U, 9437184U, 9445376U, 1523712U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 10420224U, 10428416U, 1531904U}}}, {{{0U, 10485760U, 10493952U, 1540096U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 11468800U, 11476992U, 1548288U}}}, {{{0U, 11534336U, 11542528U, 1556480U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 12517376U, 12525568U, 1564672U}}}, {{{1U, 12582912U, 12599296U, 1916928U}}}, {{{1U, 13631488U, 13647872U, 1720320U}}}, {{{1U, 14680064U, 14696448U, 1703936U}}}, {{{1U, 15728640U, 15732736U, 1458176U}}}, {{{0U, 16777216U, 16793600U, 1736704U}}}, {{{1U, 17825792U, 17829888U, 1441792U}}}, {{{1U, 18874368U, 18878464U, 1445888U}}}, {{{1U, 19922944U, 19927040U, 1449984U}}}, {{{1U, 20971520U, 20975616U, 1454080U}}}, {{{1U, 22020096U, 22024192U, 1462272U}}}, {{{1U, 23068672U, 23072768U, 1466368U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 30408704U, 30474240U, 1638400U}}}, {{{1U, 31457280U, 31461376U, 1482752U}}}, {{{1U, 32505856U, 32571392U, 1376256U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 34603008U, 34611200U, 1179648U}, {1U, 34668544U, 34734080U, 1245184U}, {1U, 34734080U, 34742272U, 1196032U}, {1U, 34799616U, 34807808U, 1204224U}, {1U, 34865152U, 34873344U, 1212416U}, {1U, 34930688U, 34938880U, 1220608U}, {1U, 34996224U, 35061760U, 1114112U}, {1U, 35061760U, 35069952U, 1236992U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}}}, {{{1U, 35651584U, 35667968U, 1769472U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 41943040U, 41959424U, 1720320U}}}, {{{1U, 42991616U, 42995712U, 1486848U}}}, {{{1U, 44040192U, 44041216U, 1754112U}}}, {{{1U, 45088768U, 45089792U, 1755136U}}}, {{{1U, 46137344U, 46138368U, 1756160U}}}, {{{1U, 47185920U, 47186944U, 1757184U}}}, {{{1U, 48234496U, 48235520U, 1758208U}}}, {{{1U, 49283072U, 49284096U, 1759232U}}}, {{{1U, 50331648U, 50332672U, 1760256U}}}, {{{0U, 51380224U, 51396608U, 1736704U}}}, {{{1U, 52428800U, 52445184U, 1916928U}}}, {{{1U, 53477376U, 53493760U, 1703936U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 55574528U, 55575552U, 1753088U}}}, {{{1U, 56623104U, 56624128U, 1761280U}}}, {{{1U, 57671680U, 57672704U, 1762304U}}}, {{{1U, 58720256U, 58736640U, 1900544U}}}, {{{1U, 59768832U, 59785216U, 1785856U}}}, {{{1U, 60817408U, 60833792U, 1933312U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 63963136U, 63979520U, 1949696U}}}, {{{1U, 65011712U, 65015808U, 1470464U}}}, {{{1U, 66060288U, 66064384U, 1474560U}}}}; static unsigned int qla82xx_crb_hub_agt[64U] = { 0U, 1907U, 661U, 677U, 0U, 208U, 433U, 230U, 224U, 225U, 226U, 227U, 1056U, 1047U, 1057U, 843U, 1029U, 832U, 833U, 834U, 835U, 837U, 836U, 960U, 961U, 962U, 963U, 0U, 964U, 1040U, 0U, 209U, 0U, 1907U, 1046U, 0U, 0U, 0U, 0U, 0U, 1047U, 0U, 137U, 1802U, 1803U, 1804U, 141U, 142U, 1807U, 1029U, 1056U, 1057U, 0U, 136U, 145U, 1810U, 1030U, 0U, 1816U, 409U, 425U, 0U, 838U, 0U}; static char *q_dev_state[8U] = { (char *)"Unknown", (char *)"Cold", (char *)"Initializing", (char *)"Ready", (char *)"Need Reset", (char *)"Need Quiescent", (char *)"Failed", (char *)"Quiescent"}; char *qdev_state(uint32_t dev_state ) { { return (q_dev_state[dev_state]); } } static void qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha , ulong *off ) { u32 win_read ; scsi_qla_host_t *vha ; void *tmp ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ha->crb_win = (qla82xx_crb_hub_agt[(*off >> 20) & 63UL] << 20) | ((uint32_t )*off & 983040U); writel(ha->crb_win, (void volatile *)(ha->nx_pcibase + 1245280UL)); win_read = readl((void const volatile *)(ha->nx_pcibase + 1245280UL)); if (ha->crb_win != win_read) { ql_dbg(524288U, vha, 45056, "%s: Written crbwin (0x%x) != Read crbwin (0x%x), off=0x%lx.\n", "qla82xx_pci_set_crbwindow_2M", ha->crb_win, win_read, *off); } else { } *off = (ulong )((((unsigned long long )*off & 65535ULL) + (unsigned long long )ha->nx_pcibase) + 1966080ULL); return; } } static int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha , ulong *off ) { struct crb_128M_2M_sub_block_map *m ; { if (*off > 167772159UL) { return (-1); } else { } if (*off > 75497471UL && *off <= 75499519UL) { *off = (*off + ha->nx_pcibase) - 74450944UL; return (0); } else { } if (*off <= 100663295UL) { return (-1); } else { } *off = *off - 100663296UL; m = (struct crb_128M_2M_sub_block_map *)(& crb_128M_2M_map[(*off >> 20) & 63UL].sub_block) + ((*off >> 16) & 15UL); if ((m->valid != 0U && (ulong )m->start_128M <= *off) && (ulong )m->end_128M > *off) { *off = ((*off + (ulong )m->start_2M) - (ulong )m->start_128M) + ha->nx_pcibase; return (0); } else { } return (1); } } static int qla82xx_crb_win_lock(struct qla_hw_data *ha ) { int done ; int timeout ; { done = 0; timeout = 0; goto ldv_43529; ldv_43528: done = qla82xx_rd_32(ha, 101826616UL); if (done == 1) { goto ldv_43527; } else { } if (timeout > 99999999) { return (-1); } else { } timeout = timeout + 1; ldv_43529: ; if (done == 0) { goto ldv_43528; } else { } ldv_43527: qla82xx_wr_32(ha, 136323364UL, (u32 )ha->portnum); return (0); } } int qla82xx_wr_32(struct qla_hw_data *ha , ulong off , u32 data ) { unsigned long flags ; int rv ; long tmp ; { flags = 0UL; rv = qla82xx_pci_get_crb_addr_2M(ha, & off); tmp = ldv__builtin_expect(rv == -1, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_nx.o.c.prepared"), "i" (774), "i" (12UL)); ldv_43537: ; goto ldv_43537; } else { } if (rv == 1) { ldv_write_lock_irqsave(& ha->hw_lock); qla82xx_crb_win_lock(ha); qla82xx_pci_set_crbwindow_2M(ha, & off); } else { } writel(data, (void volatile *)off); if (rv == 1) { qla82xx_rd_32(ha, 101826620UL); ldv_write_unlock_irqrestore(& ha->hw_lock); } else { } return (0); } } int qla82xx_rd_32(struct qla_hw_data *ha , ulong off ) { unsigned long flags ; int rv ; u32 data ; long tmp ; { flags = 0UL; rv = qla82xx_pci_get_crb_addr_2M(ha, & off); tmp = ldv__builtin_expect(rv == -1, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_nx.o.c.prepared"), "i" (800), "i" (12UL)); ldv_43545: ; goto ldv_43545; } else { } if (rv == 1) { ldv_write_lock_irqsave(& ha->hw_lock); qla82xx_crb_win_lock(ha); qla82xx_pci_set_crbwindow_2M(ha, & off); } else { } data = readl((void const volatile *)off); if (rv == 1) { qla82xx_rd_32(ha, 101826620UL); ldv_write_unlock_irqrestore(& ha->hw_lock); } else { } return ((int )data); } } int qla82xx_idc_lock(struct qla_hw_data *ha ) { int i ; int done ; int timeout ; struct thread_info *tmp ; { done = 0; timeout = 0; goto ldv_43557; ldv_43556: done = qla82xx_rd_32(ha, 101826600UL); if (done == 1) { goto ldv_43552; } else { } if (timeout > 99999999) { return (-1); } else { } timeout = timeout + 1; tmp = current_thread_info(); if (((unsigned long )tmp->preempt_count & 134217472UL) == 0UL) { schedule(); } else { i = 0; goto ldv_43554; ldv_43553: cpu_relax(); i = i + 1; ldv_43554: ; if (i <= 19) { goto ldv_43553; } else { } } ldv_43557: ; if (done == 0) { goto ldv_43556; } else { } ldv_43552: ; return (0); } } void qla82xx_idc_unlock(struct qla_hw_data *ha ) { { qla82xx_rd_32(ha, 101826604UL); return; } } static unsigned long qla82xx_pci_mem_bound_check(struct qla_hw_data *ha , unsigned long long addr , int size ) { { if ((addr > 268435455ULL || ((unsigned long long )size + addr) - 1ULL > 268435455ULL) || (((size != 1 && size != 2) && size != 4) && size != 8)) { return (0UL); } else { return (1UL); } } } static int qla82xx_pci_set_window_warning_count ; static unsigned long qla82xx_pci_set_window(struct qla_hw_data *ha , unsigned long long addr ) { int window ; u32 win_read ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; unsigned int temp1 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; if (addr <= 268435455ULL) { window = (int )((unsigned int )((addr & 33292288ULL) >> 1) | ((unsigned int )(addr >> 25) & 1023U)); ha->ddr_mn_window = (uint32_t )window; qla82xx_wr_32(ha, ha->mn_win_crb | 100663296UL, (u32 )window); tmp___0 = qla82xx_rd_32(ha, ha->mn_win_crb | 100663296UL); win_read = (u32 )tmp___0; if (win_read << 17 != (u32 )window) { ql_dbg(524288U, vha, 45059, "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n", "qla82xx_pci_set_window", window, win_read); } else { } addr = addr & 262143ULL; } else if (addr <= 8590983167ULL && addr > 8589934591ULL) { if ((addr & 1046528ULL) == 1046528ULL) { ql_log(1U, vha, 45060, "%s: QM access not handled.\n", "qla82xx_pci_set_window"); addr = 0xffffffffffffffffULL; } else { } window = (int )((unsigned int )((addr & 33488896ULL) >> 1) | ((unsigned int )(addr >> 25) & 1023U)); ha->ddr_mn_window = (uint32_t )window; qla82xx_wr_32(ha, ha->mn_win_crb | 100663296UL, (u32 )window); tmp___1 = qla82xx_rd_32(ha, ha->mn_win_crb | 100663296UL); win_read = (u32 )tmp___1; temp1 = ((unsigned int )(window << 7) & 65535U) | ((unsigned int )window >> 17); if (win_read != temp1) { ql_log(1U, vha, 45061, "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n", "qla82xx_pci_set_window", temp1, win_read); } else { } addr = (addr & 262143ULL) + 786432ULL; } else if (addr <= 12952010751ULL && addr > 12884901887ULL) { window = (int )addr & 268173312; ha->qdr_sn_window = window; qla82xx_wr_32(ha, ha->ms_win_crb | 100663296UL, (u32 )window); tmp___2 = qla82xx_rd_32(ha, ha->ms_win_crb | 100663296UL); win_read = (u32 )tmp___2; if ((u32 )window != win_read) { ql_log(1U, vha, 45062, "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n", "qla82xx_pci_set_window", window, win_read); } else { } addr = (addr & 262143ULL) + 67108864ULL; } else { tmp___3 = qla82xx_pci_set_window_warning_count; qla82xx_pci_set_window_warning_count = qla82xx_pci_set_window_warning_count + 1; if (tmp___3 <= 7 || ((unsigned int )qla82xx_pci_set_window_warning_count & 63U) == 0U) { ql_log(1U, vha, 45063, "%s: Warning:%s Unknown address range!.\n", "qla82xx_pci_set_window", (char *)"qla2xxx"); } else { } addr = 0xffffffffffffffffULL; } return ((unsigned long )addr); } } static int qla82xx_pci_is_same_window(struct qla_hw_data *ha , unsigned long long addr ) { int window ; unsigned long long qdr_max ; { qdr_max = 12952010751ULL; if (addr <= 268435455ULL) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_nx.o.c.prepared"), "i" (960), "i" (12UL)); ldv_43582: ; goto ldv_43582; } else if (addr <= 8590983167ULL && addr > 8589934591ULL) { return (1); } else if (addr <= 8595177471ULL && addr > 8594128895ULL) { return (1); } else if (addr <= qdr_max && addr > 12884901887ULL) { window = (int )((addr - 12884901888ULL) >> 22) & 63; if (ha->qdr_sn_window == window) { return (1); } else { } } else { } return (0); } } static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha , u64 off , void *data , int size ) { void *addr ; int ret ; u64 start ; uint8_t *mem_ptr ; unsigned long mem_base ; unsigned long mem_page ; scsi_qla_host_t *vha ; void *tmp ; unsigned long tmp___0 ; int tmp___1 ; void *tmp___2 ; void *tmp___3 ; unsigned long tmp___4 ; { addr = (void *)0; ret = 0; mem_ptr = (uint8_t *)0U; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ldv_write_lock_irqsave(& ha->hw_lock); tmp___0 = qla82xx_pci_set_window(ha, off); start = (u64 )tmp___0; if (start == 0xffffffffffffffffULL) { ldv_write_unlock_irqrestore(& ha->hw_lock); ql_log(0U, vha, 45064, "%s out of bound pci memory access, offset is 0x%llx.\n", (char *)"qla2xxx", off); return (-1); } else { tmp___1 = qla82xx_pci_is_same_window(ha, ((u64 )size + off) - 1ULL); if (tmp___1 == 0) { ldv_write_unlock_irqrestore(& ha->hw_lock); ql_log(0U, vha, 45064, "%s out of bound pci memory access, offset is 0x%llx.\n", (char *)"qla2xxx", off); return (-1); } else { } } ldv_write_unlock_irqrestore(& ha->hw_lock); mem_base = (unsigned long )(ha->pdev)->resource[0].start; mem_page = (unsigned long )start & 0xfffffffffffff000UL; if (((((u64 )size + start) - 1ULL) & 0xfffffffffffff000ULL) != (unsigned long long )mem_page) { tmp___2 = ioremap((resource_size_t )(mem_base + mem_page), 8192UL); mem_ptr = (uint8_t *)tmp___2; } else { tmp___3 = ioremap((resource_size_t )(mem_base + mem_page), 4096UL); mem_ptr = (uint8_t *)tmp___3; } if ((unsigned long )mem_ptr == (unsigned long )((uint8_t *)0U)) { *((u8 *)data) = 0U; return (-1); } else { } addr = (void *)mem_ptr; addr = addr + (start & 4095ULL); ldv_write_lock_irqsave(& ha->hw_lock); switch (size) { case 1: *((u8 *)data) = readb((void const volatile *)addr); goto ldv_43598; case 2: *((u16 *)data) = readw((void const volatile *)addr); goto ldv_43598; case 4: *((u32 *)data) = readl((void const volatile *)addr); goto ldv_43598; case 8: tmp___4 = readq((void const volatile *)addr); *((u64 *)data) = (u64 )tmp___4; goto ldv_43598; default: ret = -1; goto ldv_43598; } ldv_43598: ldv_write_unlock_irqrestore(& ha->hw_lock); if ((unsigned long )mem_ptr != (unsigned long )((uint8_t *)0U)) { iounmap((void volatile *)mem_ptr); } else { } return (ret); } } static int qla82xx_pci_mem_write_direct(struct qla_hw_data *ha , u64 off , void *data , int size ) { void *addr ; int ret ; u64 start ; uint8_t *mem_ptr ; unsigned long mem_base ; unsigned long mem_page ; scsi_qla_host_t *vha ; void *tmp ; unsigned long tmp___0 ; int tmp___1 ; void *tmp___2 ; void *tmp___3 ; { addr = (void *)0; ret = 0; mem_ptr = (uint8_t *)0U; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ldv_write_lock_irqsave(& ha->hw_lock); tmp___0 = qla82xx_pci_set_window(ha, off); start = (u64 )tmp___0; if (start == 0xffffffffffffffffULL) { ldv_write_unlock_irqrestore(& ha->hw_lock); ql_log(0U, vha, 45065, "%s out of bount memory access, offset is 0x%llx.\n", (char *)"qla2xxx", off); return (-1); } else { tmp___1 = qla82xx_pci_is_same_window(ha, ((u64 )size + off) - 1ULL); if (tmp___1 == 0) { ldv_write_unlock_irqrestore(& ha->hw_lock); ql_log(0U, vha, 45065, "%s out of bount memory access, offset is 0x%llx.\n", (char *)"qla2xxx", off); return (-1); } else { } } ldv_write_unlock_irqrestore(& ha->hw_lock); mem_base = (unsigned long )(ha->pdev)->resource[0].start; mem_page = (unsigned long )start & 0xfffffffffffff000UL; if (((((u64 )size + start) - 1ULL) & 0xfffffffffffff000ULL) != (unsigned long long )mem_page) { tmp___2 = ioremap((resource_size_t )(mem_base + mem_page), 8192UL); mem_ptr = (uint8_t *)tmp___2; } else { tmp___3 = ioremap((resource_size_t )(mem_base + mem_page), 4096UL); mem_ptr = (uint8_t *)tmp___3; } if ((unsigned long )mem_ptr == (unsigned long )((uint8_t *)0U)) { return (-1); } else { } addr = (void *)mem_ptr; addr = addr + (start & 4095ULL); ldv_write_lock_irqsave(& ha->hw_lock); switch (size) { case 1: writeb((int )*((u8 *)data), (void volatile *)addr); goto ldv_43618; case 2: writew((int )*((u16 *)data), (void volatile *)addr); goto ldv_43618; case 4: writel(*((u32 *)data), (void volatile *)addr); goto ldv_43618; case 8: writeq((unsigned long )*((u64 *)data), (void volatile *)addr); goto ldv_43618; default: ret = -1; goto ldv_43618; } ldv_43618: ldv_write_unlock_irqrestore(& ha->hw_lock); if ((unsigned long )mem_ptr != (unsigned long )((uint8_t *)0U)) { iounmap((void volatile *)mem_ptr); } else { } return (ret); } } static unsigned long qla82xx_decode_crb_addr(unsigned long addr ) { int i ; unsigned long base_addr ; unsigned long offset ; unsigned long pci_base ; { if (qla82xx_crb_table_initialized == 0) { qla82xx_crb_addr_transform_setup(); } else { } pci_base = 4294967295UL; base_addr = addr & 4293918720UL; offset = addr & 1048575UL; i = 0; goto ldv_43632; ldv_43631: ; if (crb_addr_xform[i] == base_addr) { pci_base = (unsigned long )(i << 20); goto ldv_43630; } else { } i = i + 1; ldv_43632: ; if (i <= 59) { goto ldv_43631; } else { } ldv_43630: ; if (pci_base == 4294967295UL) { return (pci_base); } else { } return (pci_base + offset); } } static long rom_max_timeout = 100L; static long qla82xx_rom_lock_timeout = 100L; static int qla82xx_rom_lock(struct qla_hw_data *ha ) { int done ; int timeout ; uint32_t lock_owner ; int tmp ; { done = 0; timeout = 0; lock_owner = 0U; goto ldv_43643; ldv_43642: done = qla82xx_rd_32(ha, 101826576UL); if (done == 1) { goto ldv_43641; } else { } if ((long )timeout >= qla82xx_rom_lock_timeout) { tmp = qla82xx_rd_32(ha, 136323328UL); lock_owner = (uint32_t )tmp; return (-1); } else { } timeout = timeout + 1; ldv_43643: ; if (done == 0) { goto ldv_43642; } else { } ldv_43641: qla82xx_wr_32(ha, 136323328UL, 222393152U); return (0); } } static void qla82xx_rom_unlock(struct qla_hw_data *ha ) { { qla82xx_rd_32(ha, 101826580UL); return; } } static int qla82xx_wait_rom_busy(struct qla_hw_data *ha ) { long timeout ; long done ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { timeout = 0L; done = 0L; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; goto ldv_43654; ldv_43653: tmp___0 = qla82xx_rd_32(ha, 154140676UL); done = (long )tmp___0; done = done & 4L; timeout = timeout + 1L; if (timeout >= rom_max_timeout) { ql_dbg(524288U, vha, 45066, "%s: Timeout reached waiting for rom busy.\n", (char *)"qla2xxx"); return (-1); } else { } ldv_43654: ; if (done == 0L) { goto ldv_43653; } else { } return (0); } } static int qla82xx_wait_rom_done(struct qla_hw_data *ha ) { long timeout ; long done ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { timeout = 0L; done = 0L; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; goto ldv_43663; ldv_43662: tmp___0 = qla82xx_rd_32(ha, 154140676UL); done = (long )tmp___0; done = done & 2L; timeout = timeout + 1L; if (timeout >= rom_max_timeout) { ql_dbg(524288U, vha, 45067, "%s: Timeout reached waiting for rom done.\n", (char *)"qla2xxx"); return (-1); } else { } ldv_43663: ; if (done == 0L) { goto ldv_43662; } else { } return (0); } } static int qla82xx_md_rw_32(struct qla_hw_data *ha , uint32_t off , u32 data , uint8_t flag ) { uint32_t off_value ; uint32_t rval ; { rval = 0U; writel(off & 4294901760U, (void volatile *)(ha->nx_pcibase + 1245280UL)); readl((void const volatile *)(ha->nx_pcibase + 1245280UL)); off_value = off & 65535U; if ((unsigned int )flag != 0U) { writel(data, (void volatile *)(((unsigned long )off_value + ha->nx_pcibase) + 1966080UL)); } else { rval = readl((void const volatile *)(((unsigned long )off_value + ha->nx_pcibase) + 1966080UL)); } return ((int )rval); } } static int qla82xx_do_rom_fast_read(struct qla_hw_data *ha , int addr , int *valp ) { { qla82xx_md_rw_32(ha, 1108410416U, (unsigned int )addr & 4294901760U, 1); *valp = qla82xx_md_rw_32(ha, (uint32_t )((addr & 65535) + 1108672512), 0U, 0); return (0); } } static int qla82xx_rom_fast_read(struct qla_hw_data *ha , int addr , int *valp ) { int ret ; int loops ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { loops = 0; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; goto ldv_43687; ldv_43686: __const_udelay(429500UL); schedule(); loops = loops + 1; ldv_43687: tmp___0 = qla82xx_rom_lock(ha); if (tmp___0 != 0 && loops <= 49999) { goto ldv_43686; } else { } if (loops > 49999) { ql_log(0U, vha, 185, "Failed to acquire SEM2 lock.\n"); return (-1); } else { } ret = qla82xx_do_rom_fast_read(ha, addr, valp); qla82xx_rom_unlock(ha); return (ret); } } static int qla82xx_read_status_reg(struct qla_hw_data *ha , uint32_t *val ) { scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; int tmp___1 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; qla82xx_wr_32(ha, 154206212UL, 5U); qla82xx_wait_rom_busy(ha); tmp___0 = qla82xx_wait_rom_done(ha); if (tmp___0 != 0) { ql_log(1U, vha, 45068, "Error waiting for rom done.\n"); return (-1); } else { } tmp___1 = qla82xx_rd_32(ha, 154206232UL); *val = (uint32_t )tmp___1; return (0); } } static int qla82xx_flash_wait_write_finish(struct qla_hw_data *ha ) { long timeout ; uint32_t done ; uint32_t val ; int ret ; scsi_qla_host_t *vha ; void *tmp ; { timeout = 0L; done = 1U; ret = 0; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; qla82xx_wr_32(ha, 154206224UL, 0U); goto ldv_43704; ldv_43703: ret = qla82xx_read_status_reg(ha, & val); done = val & 1U; timeout = timeout + 1L; __const_udelay(42950UL); __might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_nx.o.c.prepared", 1299, 0); _cond_resched(); if (timeout > 49999L) { ql_log(1U, vha, 45069, "Timeout reached waiting for write finish.\n"); return (-1); } else { } ldv_43704: ; if (done != 0U && ret == 0) { goto ldv_43703; } else { } return (ret); } } static int qla82xx_flash_set_write_enable(struct qla_hw_data *ha ) { uint32_t val ; int tmp ; int tmp___0 ; { qla82xx_wait_rom_busy(ha); qla82xx_wr_32(ha, 154206224UL, 0U); qla82xx_wr_32(ha, 154206212UL, 6U); qla82xx_wait_rom_busy(ha); tmp = qla82xx_wait_rom_done(ha); if (tmp != 0) { return (-1); } else { } tmp___0 = qla82xx_read_status_reg(ha, & val); if (tmp___0 != 0) { return (-1); } else { } if ((val & 2U) == 0U) { return (-1); } else { } return (0); } } static int qla82xx_write_status_reg(struct qla_hw_data *ha , uint32_t val ) { scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = qla82xx_flash_set_write_enable(ha); if (tmp___0 != 0) { return (-1); } else { } qla82xx_wr_32(ha, 154206220UL, val); qla82xx_wr_32(ha, 154206212UL, 1U); tmp___1 = qla82xx_wait_rom_done(ha); if (tmp___1 != 0) { ql_log(1U, vha, 45070, "Error waiting for rom done.\n"); return (-1); } else { } tmp___2 = qla82xx_flash_wait_write_finish(ha); return (tmp___2); } } static int qla82xx_write_disable_flash(struct qla_hw_data *ha ) { scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; qla82xx_wr_32(ha, 154206212UL, 4U); tmp___0 = qla82xx_wait_rom_done(ha); if (tmp___0 != 0) { ql_log(1U, vha, 45071, "Error waiting for rom done.\n"); return (-1); } else { } return (0); } } static int ql82xx_rom_lock_d(struct qla_hw_data *ha ) { int loops ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { loops = 0; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; goto ldv_43726; ldv_43725: __const_udelay(429500UL); __might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_nx.o.c.prepared", 1363, 0); _cond_resched(); loops = loops + 1; ldv_43726: tmp___0 = qla82xx_rom_lock(ha); if (tmp___0 != 0 && loops <= 49999) { goto ldv_43725; } else { } if (loops > 49999) { ql_log(1U, vha, 45072, "ROM lock failed.\n"); return (-1); } else { } return (0); } } static int qla82xx_write_flash_dword(struct qla_hw_data *ha , uint32_t flashaddr , uint32_t data ) { int ret ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; int tmp___1 ; { ret = 0; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(1U, vha, 45073, "ROM lock failed.\n"); return (ret); } else { } tmp___0 = qla82xx_flash_set_write_enable(ha); if (tmp___0 != 0) { goto done_write; } else { } qla82xx_wr_32(ha, 154206220UL, data); qla82xx_wr_32(ha, 154206216UL, flashaddr); qla82xx_wr_32(ha, 154206224UL, 3U); qla82xx_wr_32(ha, 154206212UL, 2U); qla82xx_wait_rom_busy(ha); tmp___1 = qla82xx_wait_rom_done(ha); if (tmp___1 != 0) { ql_log(1U, vha, 45074, "Error waiting for rom done.\n"); ret = -1; goto done_write; } else { } ret = qla82xx_flash_wait_write_finish(ha); done_write: qla82xx_rom_unlock(ha); return (ret); } } static int qla82xx_pinit_from_rom(scsi_qla_host_t *vha ) { int addr ; int val ; int i ; struct crb_addr_pair *buf ; unsigned long off ; unsigned int offset ; unsigned int n ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; void *tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long tmp___5 ; { ha = vha->hw; qla82xx_rom_lock(ha); qla82xx_wr_32(ha, 153092112UL, 0U); qla82xx_wr_32(ha, 153092116UL, 0U); qla82xx_wr_32(ha, 153092120UL, 0U); qla82xx_wr_32(ha, 153092124UL, 0U); qla82xx_wr_32(ha, 153092128UL, 0U); qla82xx_wr_32(ha, 153092132UL, 0U); qla82xx_wr_32(ha, 106954816UL, 255U); qla82xx_wr_32(ha, 107413504UL, 0U); qla82xx_wr_32(ha, 107479040UL, 0U); qla82xx_wr_32(ha, 107544576UL, 0U); qla82xx_wr_32(ha, 107610112UL, 0U); qla82xx_wr_32(ha, 107675648UL, 0U); val = qla82xx_rd_32(ha, 105910272UL); qla82xx_wr_32(ha, 105910272UL, (u32 )val & 4294967294U); qla82xx_wr_32(ha, 133174016UL, 1U); qla82xx_wr_32(ha, 142606336UL, 0U); qla82xx_wr_32(ha, 142606344UL, 0U); qla82xx_wr_32(ha, 142606352UL, 0U); qla82xx_wr_32(ha, 142606360UL, 0U); qla82xx_wr_32(ha, 142606592UL, 0U); qla82xx_wr_32(ha, 142606848UL, 0U); qla82xx_wr_32(ha, 118489148UL, 1U); qla82xx_wr_32(ha, 119537724UL, 1U); qla82xx_wr_32(ha, 120586300UL, 1U); qla82xx_wr_32(ha, 121634876UL, 1U); qla82xx_wr_32(ha, 116391996UL, 1U); msleep(20U); tmp = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp != 0) { qla82xx_wr_32(ha, 154140680UL, 4278190079U); } else { qla82xx_wr_32(ha, 154140680UL, 4294967295U); } qla82xx_rom_unlock(ha); tmp___0 = qla82xx_rom_fast_read(ha, 0, (int *)(& n)); if (tmp___0 != 0 || n != 3405695742U) { ql_log(0U, vha, 110, "Error Reading crb_init area: n: %08x.\n", n); return (-1); } else { tmp___1 = qla82xx_rom_fast_read(ha, 4, (int *)(& n)); if (tmp___1 != 0) { ql_log(0U, vha, 110, "Error Reading crb_init area: n: %08x.\n", n); return (-1); } else { } } offset = n & 65535U; n = n >> 16; if (n > 1023U) { ql_log(0U, vha, 113, "Card flash not initialized:n=0x%x.\n", n); return (-1); } else { } ql_log(2U, vha, 114, "%d CRB init values found in ROM.\n", n); tmp___2 = kmalloc((unsigned long )n * 16UL, 208U); buf = (struct crb_addr_pair___0 *)tmp___2; if ((unsigned long )buf == (unsigned long )((struct crb_addr_pair___0 *)0)) { ql_log(0U, vha, 268, "Unable to allocate memory.\n"); return (-1); } else { } i = 0; goto ldv_43751; ldv_43750: tmp___3 = qla82xx_rom_fast_read(ha, (int )(((unsigned int )(i * 2) + offset) * 4U), & val); if (tmp___3 != 0) { kfree((void const *)buf); return (-1); } else { tmp___4 = qla82xx_rom_fast_read(ha, (int )((((unsigned int )(i * 2) + offset) + 1U) * 4U), & addr); if (tmp___4 != 0) { kfree((void const *)buf); return (-1); } else { } } (buf + (unsigned long )i)->addr = (long )addr; (buf + (unsigned long )i)->data = (long )val; i = i + 1; ldv_43751: ; if ((unsigned int )i < n) { goto ldv_43750; } else { } i = 0; goto ldv_43755; ldv_43754: tmp___5 = qla82xx_decode_crb_addr((unsigned long )(buf + (unsigned long )i)->addr); off = tmp___5 + 100663296UL; if (off == 136323580UL) { goto ldv_43753; } else { } if (off == 154140860UL) { goto ldv_43753; } else { } if (off == 154140872UL) { goto ldv_43753; } else { } if (off == 101785664UL) { goto ldv_43753; } else { } if (off == 101785672UL) { goto ldv_43753; } else { } if ((off & 267386880UL) == 161480704UL) { goto ldv_43753; } else { } if ((off & 267386880UL) == 102760448UL) { goto ldv_43753; } else { } if (off == 4294967295UL) { ql_log(0U, vha, 278, "Unknow addr: 0x%08lx.\n", (buf + (unsigned long )i)->addr); goto ldv_43753; } else { } qla82xx_wr_32(ha, off, (u32 )(buf + (unsigned long )i)->data); if (off == 154140680UL) { msleep(1000U); } else { } msleep(1U); ldv_43753: i = i + 1; ldv_43755: ; if ((unsigned int )i < n) { goto ldv_43754; } else { } kfree((void const *)buf); qla82xx_wr_32(ha, 122683628UL, 30U); qla82xx_wr_32(ha, 122683468UL, 8U); qla82xx_wr_32(ha, 123732044UL, 8U); qla82xx_wr_32(ha, 118489096UL, 0U); qla82xx_wr_32(ha, 118489100UL, 0U); qla82xx_wr_32(ha, 119537672UL, 0U); qla82xx_wr_32(ha, 119537676UL, 0U); qla82xx_wr_32(ha, 120586248UL, 0U); qla82xx_wr_32(ha, 120586252UL, 0U); qla82xx_wr_32(ha, 121634824UL, 0U); qla82xx_wr_32(ha, 121634828UL, 0U); return (0); } } static int qla82xx_pci_mem_write_2M(struct qla_hw_data *ha , u64 off , void *data , int size ) { int i ; int j ; int ret ; int loop ; int sz[2U] ; int off0 ; int scale ; int shift_amount ; int startword ; uint32_t temp ; uint64_t off8 ; uint64_t mem_crb ; uint64_t tmpw ; uint64_t word[2U] ; int tmp ; unsigned long tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { ret = 0; word[0] = 0ULL; word[1] = 0ULL; if (off > 12884901887ULL && off <= 12952010751ULL) { mem_crb = 131072000ULL; } else { mem_crb = 102760448ULL; tmp___0 = qla82xx_pci_mem_bound_check(ha, off, size); if (tmp___0 == 0UL) { tmp = qla82xx_pci_mem_write_direct(ha, off, data, size); return (tmp); } else { } } off0 = (int )off & 7; sz[0] = size < 8 - off0 ? size : 8 - off0; sz[1] = size - sz[0]; off8 = off & 4294967280ULL; loop = (int )((unsigned int )((((off & 15ULL) + (u64 )size) - 1ULL) >> 4) + 1U); shift_amount = 4; scale = 2; startword = (int )((off & 15ULL) / 8ULL); i = 0; goto ldv_43778; ldv_43777: tmp___1 = qla82xx_pci_mem_read_2M(ha, (uint64_t )(i << shift_amount) + off8, (void *)(& word) + (unsigned long )(i * scale), 8); if (tmp___1 != 0) { return (-1); } else { } i = i + 1; ldv_43778: ; if (i < loop) { goto ldv_43777; } else { } switch (size) { case 1: tmpw = (uint64_t )*((uint8_t *)data); goto ldv_43781; case 2: tmpw = (uint64_t )*((uint16_t *)data); goto ldv_43781; case 4: tmpw = (uint64_t )*((uint32_t *)data); goto ldv_43781; case 8: ; default: tmpw = *((uint64_t *)data); goto ldv_43781; } ldv_43781: ; if (sz[0] == 8) { word[startword] = tmpw; } else { word[startword] = word[startword] & ~ (~ (0xffffffffffffffffULL << sz[0] * 8) << off0 * 8); word[startword] = word[startword] | (tmpw << off0 * 8); } if (sz[1] != 0) { word[startword + 1] = word[startword + 1] & ~ (0xffffffffffffffffULL << sz[1] * 8); word[startword + 1] = word[startword + 1] | (tmpw >> sz[0] * 8); } else { } i = 0; goto ldv_43792; ldv_43791: temp = (uint32_t )(i << shift_amount) + (uint32_t )off8; qla82xx_wr_32(ha, (ulong )(mem_crb + 148ULL), temp); temp = 0U; qla82xx_wr_32(ha, (ulong )(mem_crb + 152ULL), temp); temp = (uint32_t )word[i * scale]; qla82xx_wr_32(ha, (ulong )(mem_crb + 160ULL), temp); temp = (uint32_t )(word[i * scale] >> 32); qla82xx_wr_32(ha, (ulong )(mem_crb + 164ULL), temp); temp = (uint32_t )word[i * scale + 1]; qla82xx_wr_32(ha, (ulong )(mem_crb + 176ULL), temp); temp = (uint32_t )(word[i * scale + 1] >> 32); qla82xx_wr_32(ha, (ulong )(mem_crb + 180ULL), temp); temp = 6U; qla82xx_wr_32(ha, (ulong )(mem_crb + 144ULL), temp); temp = 7U; qla82xx_wr_32(ha, (ulong )(mem_crb + 144ULL), temp); j = 0; goto ldv_43788; ldv_43787: tmp___2 = qla82xx_rd_32(ha, (ulong )(mem_crb + 144ULL)); temp = (uint32_t )tmp___2; if ((temp & 8U) == 0U) { goto ldv_43786; } else { } j = j + 1; ldv_43788: ; if (j <= 999) { goto ldv_43787; } else { } ldv_43786: ; if (j > 999) { tmp___3 = __printk_ratelimit("qla82xx_pci_mem_write_2M"); if (tmp___3 != 0) { dev_err((struct device const *)(& (ha->pdev)->dev), "failed to write through agent.\n"); } else { } ret = -1; goto ldv_43790; } else { } i = i + 1; ldv_43792: ; if (i < loop) { goto ldv_43791; } else { } ldv_43790: ; return (ret); } } static int qla82xx_fw_load_from_flash(struct qla_hw_data *ha ) { int i ; long size ; long flashaddr ; long memaddr ; u64 data ; u32 high ; u32 low ; int tmp ; int tmp___0 ; { size = 0L; flashaddr = (long )(ha->flt_region_bootload << 2); memaddr = 65536L; size = 122880L; i = 0; goto ldv_43804; ldv_43803: tmp = qla82xx_rom_fast_read(ha, (int )flashaddr, (int *)(& low)); if (tmp != 0) { return (-1); } else { tmp___0 = qla82xx_rom_fast_read(ha, (int )((unsigned int )flashaddr + 4U), (int *)(& high)); if (tmp___0 != 0) { return (-1); } else { } } data = ((unsigned long long )high << 32) | (unsigned long long )low; qla82xx_pci_mem_write_2M(ha, (u64 )memaddr, (void *)(& data), 8); flashaddr = flashaddr + 8L; memaddr = memaddr + 8L; if (((unsigned int )i & 4095U) == 0U) { msleep(1U); } else { } i = i + 1; ldv_43804: ; if ((long )i < size) { goto ldv_43803; } else { } __const_udelay(429500UL); ldv_read_lock(& ha->hw_lock); qla82xx_wr_32(ha, 118489112UL, 4128U); qla82xx_wr_32(ha, 154140680UL, 8388638U); ldv_read_unlock(& ha->hw_lock); return (0); } } int qla82xx_pci_mem_read_2M(struct qla_hw_data *ha , u64 off , void *data , int size ) { int i ; int j ; int k ; int start ; int end ; int loop ; int sz[2U] ; int off0[2U] ; int shift_amount ; uint32_t temp ; uint64_t off8 ; uint64_t val ; uint64_t mem_crb ; uint64_t word[2U] ; int tmp ; unsigned long tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { j = 0; word[0] = 0ULL; word[1] = 0ULL; if (off > 12884901887ULL && off <= 12952010751ULL) { mem_crb = 131072000ULL; } else { mem_crb = 102760448ULL; tmp___0 = qla82xx_pci_mem_bound_check(ha, off, size); if (tmp___0 == 0UL) { tmp = qla82xx_pci_mem_read_direct(ha, off, data, size); return (tmp); } else { } } off8 = off & 4294967280ULL; off0[0] = (int )off & 15; sz[0] = size < 16 - off0[0] ? size : 16 - off0[0]; shift_amount = 4; loop = (((off0[0] + size) + -1) >> shift_amount) + 1; off0[1] = 0; sz[1] = size - sz[0]; i = 0; goto ldv_43835; ldv_43834: temp = (uint32_t )(i << shift_amount) + (uint32_t )off8; qla82xx_wr_32(ha, (ulong )(mem_crb + 148ULL), temp); temp = 0U; qla82xx_wr_32(ha, (ulong )(mem_crb + 152ULL), temp); temp = 2U; qla82xx_wr_32(ha, (ulong )(mem_crb + 144ULL), temp); temp = 3U; qla82xx_wr_32(ha, (ulong )(mem_crb + 144ULL), temp); j = 0; goto ldv_43828; ldv_43827: tmp___1 = qla82xx_rd_32(ha, (ulong )(mem_crb + 144ULL)); temp = (uint32_t )tmp___1; if ((temp & 8U) == 0U) { goto ldv_43826; } else { } j = j + 1; ldv_43828: ; if (j <= 999) { goto ldv_43827; } else { } ldv_43826: ; if (j > 999) { tmp___2 = __printk_ratelimit("qla82xx_pci_mem_read_2M"); if (tmp___2 != 0) { dev_err((struct device const *)(& (ha->pdev)->dev), "failed to read through agent.\n"); } else { } goto ldv_43830; } else { } start = off0[i] >> 2; end = ((off0[i] + sz[i]) + -1) >> 2; k = start; goto ldv_43832; ldv_43831: tmp___3 = qla82xx_rd_32(ha, (ulong )((uint64_t )((k + 42) * 4) + mem_crb)); temp = (uint32_t )tmp___3; word[i] = word[i] | ((unsigned long long )temp << (k & 1) * 32); k = k + 1; ldv_43832: ; if (k <= end) { goto ldv_43831; } else { } i = i + 1; ldv_43835: ; if (i < loop) { goto ldv_43834; } else { } ldv_43830: ; if (j > 999) { return (-1); } else { } if ((off0[0] & 7) == 0) { val = word[0]; } else { val = ((word[0] >> off0[0] * 8) & ~ (0xffffffffffffffffULL << sz[0] * 8)) | ((word[1] & ~ (0xffffffffffffffffULL << sz[1] * 8)) << sz[0] * 8); } switch (size) { case 1: *((uint8_t *)data) = (uint8_t )val; goto ldv_43837; case 2: *((uint16_t *)data) = (uint16_t )val; goto ldv_43837; case 4: *((uint32_t *)data) = (uint32_t )val; goto ldv_43837; case 8: *((uint64_t *)data) = val; goto ldv_43837; } ldv_43837: ; return (0); } } static struct qla82xx_uri_table_desc *qla82xx_get_table_desc(u8 const *unirom , int section ) { uint32_t i ; struct qla82xx_uri_table_desc *directory ; __le32 offset ; __le32 tab_type ; __le32 entries ; { directory = (struct qla82xx_uri_table_desc *)unirom; entries = directory->num_entries; i = 0U; goto ldv_43851; ldv_43850: offset = directory->findex + directory->entry_size * i; tab_type = *((u32 *)unirom + ((unsigned long )offset + 8UL)); if ((__le32 )section == tab_type) { return ((struct qla82xx_uri_table_desc *)unirom + (unsigned long )offset); } else { } i = i + 1U; ldv_43851: ; if (i < entries) { goto ldv_43850; } else { } return ((struct qla82xx_uri_table_desc *)0); } } static struct qla82xx_uri_data_desc *qla82xx_get_data_desc(struct qla_hw_data *ha , u32 section , u32 idx_offset ) { u8 const *unirom ; int idx ; struct qla82xx_uri_table_desc *tab_desc ; __le32 offset ; { unirom = ((ha->hablob)->fw)->data; idx = *((int *)unirom + ((unsigned long )ha->file_prd_off + (unsigned long )idx_offset)); tab_desc = (struct qla82xx_uri_table_desc *)0; tab_desc = qla82xx_get_table_desc(unirom, (int )section); if ((unsigned long )tab_desc == (unsigned long )((struct qla82xx_uri_table_desc *)0)) { return ((struct qla82xx_uri_data_desc *)0); } else { } offset = tab_desc->findex + tab_desc->entry_size * (uint32_t )idx; return ((struct qla82xx_uri_data_desc *)unirom + (unsigned long )offset); } } static u8 *qla82xx_get_bootld_offset(struct qla_hw_data *ha ) { u32 offset ; struct qla82xx_uri_data_desc *uri_desc ; { offset = 65536U; uri_desc = (struct qla82xx_uri_data_desc *)0; if ((unsigned int )ha->fw_type == 3U) { uri_desc = qla82xx_get_data_desc(ha, 6U, 27U); if ((unsigned long )uri_desc != (unsigned long )((struct qla82xx_uri_data_desc *)0)) { offset = uri_desc->findex; } else { } } else { } return ((u8 *)((ha->hablob)->fw)->data + (unsigned long )offset); } } static __le32 qla82xx_get_fw_size(struct qla_hw_data *ha ) { struct qla82xx_uri_data_desc *uri_desc ; { uri_desc = (struct qla82xx_uri_data_desc *)0; if ((unsigned int )ha->fw_type == 3U) { uri_desc = qla82xx_get_data_desc(ha, 7U, 29U); if ((unsigned long )uri_desc != (unsigned long )((struct qla82xx_uri_data_desc *)0)) { return (uri_desc->size); } else { } } else { } return (*((u32 *)((ha->hablob)->fw)->data + 4097036U)); } } static u8 *qla82xx_get_fw_offs(struct qla_hw_data *ha ) { u32 offset ; struct qla82xx_uri_data_desc *uri_desc ; { offset = 1048576U; uri_desc = (struct qla82xx_uri_data_desc *)0; if ((unsigned int )ha->fw_type == 3U) { uri_desc = qla82xx_get_data_desc(ha, 7U, 29U); if ((unsigned long )uri_desc != (unsigned long )((struct qla82xx_uri_data_desc *)0)) { offset = uri_desc->findex; } else { } } else { } return ((u8 *)((ha->hablob)->fw)->data + (unsigned long )offset); } } int qla82xx_pci_region_offset(struct pci_dev *pdev , int region ) { unsigned long val ; u32 control ; { val = 0UL; switch (region) { case 0: val = 0UL; goto ldv_43883; case 1: pci_read_config_dword((struct pci_dev const *)pdev, 68, & control); val = (unsigned long )(control + 8192U); goto ldv_43883; } ldv_43883: ; return ((int )val); } } int qla82xx_iospace_config(struct qla_hw_data *ha ) { uint32_t len ; int tmp ; void *tmp___0 ; void *tmp___1 ; uint8_t tmp___2 ; { len = 0U; tmp = pci_request_regions(ha->pdev, "qla2xxx"); if (tmp != 0) { ql_log_pci(0U, ha->pdev, 12, "Failed to reserver selected regions.\n"); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[0].flags & 512UL) == 0UL) { ql_log_pci(0U, ha->pdev, 13, "Region #0 not an MMIO resource, aborting.\n"); goto iospace_error_exit; } else { } len = (ha->pdev)->resource[0].start != 0ULL || (ha->pdev)->resource[0].end != (ha->pdev)->resource[0].start ? ((uint32_t )(ha->pdev)->resource[0].end - (uint32_t )(ha->pdev)->resource[0].start) + 1U : 0U; tmp___0 = ioremap((ha->pdev)->resource[0].start, (unsigned long )len); ha->nx_pcibase = (unsigned long )tmp___0; if (ha->nx_pcibase == 0UL) { ql_log_pci(0U, ha->pdev, 14, "Cannot remap pcibase MMIO, aborting.\n"); goto iospace_error_exit; } else { } if ((ha->device_type & 262144U) != 0U) { ha->iobase = (device_reg_t *)ha->nx_pcibase; } else if ((ha->device_type & 16384U) != 0U) { ha->iobase = (device_reg_t *)(((unsigned long )((ha->pdev)->devfn << 11) + ha->nx_pcibase) + 770048UL); } else { } if (ql2xdbwr == 0) { tmp___1 = ioremap((ha->pdev)->resource[4].start + (resource_size_t )((ha->pdev)->devfn << 12), 4UL); ha->nxdb_wr_ptr = (unsigned long )tmp___1; if (ha->nxdb_wr_ptr == 0UL) { ql_log_pci(0U, ha->pdev, 15, "Cannot remap MMIO, aborting.\n"); goto iospace_error_exit; } else { } ha->nxdb_rd_ptr = (uint8_t *)(((unsigned long )((ha->pdev)->devfn * 8U) + ha->nx_pcibase) + 524288UL); } else { ha->nxdb_wr_ptr = (ha->pdev)->devfn == 6U ? 136323512UL : 136323516UL; } tmp___2 = 1U; ha->max_rsp_queues = tmp___2; ha->max_req_queues = tmp___2; ha->msix_count = (unsigned int )((uint16_t )ha->max_rsp_queues) + 1U; ql_dbg_pci(1048576U, ha->pdev, 49158, "nx_pci_base=%p iobase=%p max_req_queues=%d msix_count=%d.\n", (void *)ha->nx_pcibase, ha->iobase, (int )ha->max_req_queues, (int )ha->msix_count); ql_dbg_pci(1073741824U, ha->pdev, 16, "nx_pci_base=%p iobase=%p max_req_queues=%d msix_count=%d.\n", (void *)ha->nx_pcibase, ha->iobase, (int )ha->max_req_queues, (int )ha->msix_count); return (0); iospace_error_exit: ; return (-12); } } int qla82xx_pci_config(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; int ret ; { ha = vha->hw; pci_set_master(ha->pdev); ret = pci_set_mwi(ha->pdev); ha->chip_revision = (uint16_t )(ha->pdev)->revision; ql_dbg(1073741824U, vha, 67, "Chip revision:%d.\n", (int )ha->chip_revision); return (0); } } void qla82xx_reset_chip(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; (*((ha->isp_ops)->disable_intrs))(ha); return; } } void qla82xx_config_rings(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct device_reg_82xx *reg ; struct init_cb_81xx *icb ; struct req_que *req ; struct rsp_que *rsp ; { ha = vha->hw; reg = & (ha->iobase)->isp82; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); icb = (struct init_cb_81xx *)ha->init_cb; icb->request_q_outpointer = 0U; icb->response_q_inpointer = 0U; icb->request_q_length = req->length; icb->response_q_length = rsp->length; icb->request_q_address[0] = (unsigned int )req->dma; icb->request_q_address[1] = (unsigned int )(req->dma >> 32ULL); icb->response_q_address[0] = (unsigned int )rsp->dma; icb->response_q_address[1] = (unsigned int )(rsp->dma >> 32ULL); writel(0U, (void volatile *)(& reg->req_q_out)); writel(0U, (void volatile *)(& reg->rsp_q_in)); writel(0U, (void volatile *)(& reg->rsp_q_out)); return; } } static int qla82xx_fw_load_from_blob(struct qla_hw_data *ha ) { u64 *ptr64 ; u32 i ; u32 flashaddr ; u32 size ; __le64 data ; u8 *tmp ; int tmp___0 ; __le32 tmp___1 ; u8 *tmp___2 ; int tmp___3 ; { size = 122880U; tmp = qla82xx_get_bootld_offset(ha); ptr64 = (u64 *)tmp; flashaddr = 65536U; i = 0U; goto ldv_43916; ldv_43915: data = *(ptr64 + (unsigned long )i); tmp___0 = qla82xx_pci_mem_write_2M(ha, (u64 )flashaddr, (void *)(& data), 8); if (tmp___0 != 0) { return (-5); } else { } flashaddr = flashaddr + 8U; i = i + 1U; ldv_43916: ; if (i < size) { goto ldv_43915; } else { } flashaddr = 274432U; tmp___1 = qla82xx_get_fw_size(ha); size = tmp___1 / 8U; tmp___2 = qla82xx_get_fw_offs(ha); ptr64 = (u64 *)tmp___2; i = 0U; goto ldv_43919; ldv_43918: data = *(ptr64 + (unsigned long )i); tmp___3 = qla82xx_pci_mem_write_2M(ha, (u64 )flashaddr, (void *)(& data), 8); if (tmp___3 != 0) { return (-5); } else { } flashaddr = flashaddr + 8U; i = i + 1U; ldv_43919: ; if (i < size) { goto ldv_43918; } else { } __const_udelay(429500UL); qla82xx_wr_32(ha, 136323580UL, 305419896U); ldv_read_lock(& ha->hw_lock); qla82xx_wr_32(ha, 118489112UL, 4128U); qla82xx_wr_32(ha, 154140680UL, 8388638U); ldv_read_unlock(& ha->hw_lock); return (0); } } static int qla82xx_set_product_offset(struct qla_hw_data *ha ) { struct qla82xx_uri_table_desc *ptab_desc ; uint8_t const *unirom ; uint32_t i ; __le32 entries ; __le32 flags ; __le32 file_chiprev ; __le32 offset ; uint8_t chiprev ; int mn_present ; uint32_t flagbit ; { ptab_desc = (struct qla82xx_uri_table_desc *)0; unirom = (uint8_t const *)((ha->hablob)->fw)->data; chiprev = (uint8_t )ha->chip_revision; mn_present = 0; ptab_desc = qla82xx_get_table_desc(unirom, 0); if ((unsigned long )ptab_desc == (unsigned long )((struct qla82xx_uri_table_desc *)0)) { return (-1); } else { } entries = ptab_desc->num_entries; i = 0U; goto ldv_43935; ldv_43934: offset = ptab_desc->findex + ptab_desc->entry_size * i; flags = (unsigned int )*((int *)unirom + ((unsigned long )offset + 11UL)); file_chiprev = (unsigned int )*((int *)unirom + ((unsigned long )offset + 10UL)); flagbit = mn_present != 0 ? 1U : 2U; if ((__le32 )chiprev == file_chiprev && (int )((unsigned long long )flags >> (int )flagbit) & 1) { ha->file_prd_off = offset; return (0); } else { } i = i + 1U; ldv_43935: ; if (i < entries) { goto ldv_43934; } else { } return (-1); } } static int qla82xx_validate_firmware_blob(scsi_qla_host_t *vha , uint8_t fw_type ) { __le32 val ; uint32_t min_size ; struct qla_hw_data *ha ; struct firmware const *fw ; int tmp ; { ha = vha->hw; fw = (ha->hablob)->fw; ha->fw_type = fw_type; if ((unsigned int )fw_type == 3U) { tmp = qla82xx_set_product_offset(ha); if (tmp != 0) { return (-22); } else { } min_size = 819200U; } else { val = *((u32 *)fw->data + 16680U); if (val != 305419896U) { return (-22); } else { } min_size = 4194303U; } if ((unsigned long )fw->size < (unsigned long )min_size) { return (-22); } else { } return (0); } } static int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha ) { u32 val ; int retries ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; int tmp___1 ; { val = 0U; retries = 60; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ldv_43956: ldv_read_lock(& ha->hw_lock); tmp___0 = qla82xx_rd_32(ha, 136323664UL); val = (u32 )tmp___0; ldv_read_unlock(& ha->hw_lock); switch (val) { case 65281U: ; case 61455U: ; return (0); case 65535U: ; goto ldv_43954; default: ; goto ldv_43954; } ldv_43954: ql_log(2U, vha, 168, "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n", val, retries); msleep(500U); retries = retries - 1; if (retries != 0) { goto ldv_43956; } else { } ql_log(0U, vha, 169, "Cmd Peg initialization failed: 0x%x.\n", val); tmp___1 = qla82xx_rd_32(ha, 154140764UL); val = (u32 )tmp___1; ldv_read_lock(& ha->hw_lock); qla82xx_wr_32(ha, 136323664UL, 65535U); ldv_read_unlock(& ha->hw_lock); return (258); } } static int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha ) { u32 val ; int retries ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { val = 0U; retries = 60; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ldv_43969: ldv_read_lock(& ha->hw_lock); tmp___0 = qla82xx_rd_32(ha, 136323900UL); val = (u32 )tmp___0; ldv_read_unlock(& ha->hw_lock); switch (val) { case 65281U: ; case 61455U: ; return (0); case 65535U: ; goto ldv_43967; default: ; goto ldv_43967; } ldv_43967: ql_log(2U, vha, 171, "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n", val, retries); msleep(500U); retries = retries - 1; if (retries != 0) { goto ldv_43969; } else { } ql_log(0U, vha, 172, "Rcv Peg initializatin failed: 0x%x.\n", val); ldv_read_lock(& ha->hw_lock); qla82xx_wr_32(ha, 136323900UL, 65535U); ldv_read_unlock(& ha->hw_lock); return (258); } } static struct qla82xx_legacy_intr_set legacy_intr[8U] = { {128U, 101777688U, 101777704U, 101789696U}, {256U, 101777760U, 101777776U, 101789700U}, {512U, 101777764U, 101777780U, 101789704U}, {1024U, 101777768U, 101777784U, 101789708U}, {2048U, 101778272U, 101778288U, 101789712U}, {4096U, 101778276U, 101778292U, 101789716U}, {8192U, 101778280U, 101778296U, 101789720U}, {16384U, 101778284U, 101778300U, 101789724U}}; void qla82xx_mbx_completion(scsi_qla_host_t *vha , uint16_t mb0 ) { uint16_t cnt ; uint16_t *wptr ; struct qla_hw_data *ha ; struct device_reg_82xx *reg ; { ha = vha->hw; reg = & (ha->iobase)->isp82; wptr = (uint16_t *)(& reg->mailbox_out) + 1UL; ha->flags.mbox_int = 1U; ha->mailbox_out[0] = mb0; cnt = 1U; goto ldv_43981; ldv_43980: ha->mailbox_out[(int )cnt] = readw((void const volatile *)wptr); wptr = wptr + 1; cnt = (uint16_t )((int )cnt + 1); ldv_43981: ; if ((int )((unsigned short )ha->mbx_count) > (int )cnt) { goto ldv_43980; } else { } if ((unsigned long )ha->mcp == (unsigned long )((mbx_cmd_t *)0)) { ql_dbg(33554432U, vha, 20563, "MBX pointer OLD_ERROR.\n"); } else { } return; } } irqreturn_t qla82xx_intr_handler(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_82xx *reg ; int status ; int status1 ; unsigned long flags ; unsigned long iter ; uint32_t stat ; uint16_t mb[4U] ; raw_spinlock_t *tmp ; void *tmp___0 ; unsigned int tmp___1 ; unsigned long tmp___2 ; { status = 0; status1 = 0; stat = 0U; rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 45139, "%s: NULL response queue pointer.\n", "qla82xx_intr_handler"); return (0); } else { } ha = rsp->hw; if (*((unsigned long *)ha + 2UL) == 0UL) { status = qla82xx_rd_32(ha, 101777664UL); if ((ha->nx_legacy_intr.int_vec_bit & (uint32_t )status) == 0U) { return (0); } else { } status1 = qla82xx_rd_32(ha, 101785708UL); if ((status1 & 768) != 512) { return (0); } else { } } else { } qla82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_status_reg, 4294967295U); qla82xx_rd_32(ha, 101777664UL); qla82xx_rd_32(ha, 101777664UL); reg = & (ha->iobase)->isp82; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; iter = 1UL; goto ldv_44010; ldv_44009: tmp___1 = readl((void const volatile *)(& reg->host_int)); if (tmp___1 != 0U) { stat = readl((void const volatile *)(& reg->host_status)); switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla82xx_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; goto ldv_44005; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((void const volatile *)(& reg->mailbox_out) + 1U); mb[2] = readw((void const volatile *)(& reg->mailbox_out) + 2U); mb[3] = readw((void const volatile *)(& reg->mailbox_out) + 3U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_44005; case 19U: qla24xx_process_response_queue(vha, rsp); goto ldv_44005; default: ql_dbg(33554432U, vha, 20564, "Unrecognized interrupt type (%d).\n", stat & 255U); goto ldv_44005; } ldv_44005: ; } else { } writel(0U, (void volatile *)(& reg->host_int)); ldv_44010: tmp___2 = iter; iter = iter - 1UL; if (tmp___2 != 0UL) { goto ldv_44009; } else { } qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); if (*((unsigned long *)ha + 2UL) == 0UL) { qla82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_mask_reg, 64511U); } else { } return (1); } } irqreturn_t qla82xx_msix_default(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_82xx *reg ; int status ; unsigned long flags ; uint32_t stat ; uint16_t mb[4U] ; raw_spinlock_t *tmp ; void *tmp___0 ; unsigned int tmp___1 ; { status = 0; stat = 0U; rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { printk("\016%s(): NULL response queue pointer.\n", "qla82xx_msix_default"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp82; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; tmp___1 = readl((void const volatile *)(& reg->host_int)); if (tmp___1 != 0U) { stat = readl((void const volatile *)(& reg->host_status)); switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla82xx_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; goto ldv_44032; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((void const volatile *)(& reg->mailbox_out) + 1U); mb[2] = readw((void const volatile *)(& reg->mailbox_out) + 2U); mb[3] = readw((void const volatile *)(& reg->mailbox_out) + 3U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_44032; case 19U: qla24xx_process_response_queue(vha, rsp); goto ldv_44032; default: ql_dbg(33554432U, vha, 20545, "Unrecognized interrupt type (%d).\n", stat & 255U); goto ldv_44032; } ldv_44032: ; } else { } writel(0U, (void volatile *)(& reg->host_int)); qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } irqreturn_t qla82xx_msix_rsp_q(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_82xx *reg ; unsigned long flags ; raw_spinlock_t *tmp ; void *tmp___0 ; { rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { printk("\016%s(): NULL response queue pointer.\n", "qla82xx_msix_rsp_q"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->isp82; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; qla24xx_process_response_queue(vha, rsp); writel(0U, (void volatile *)(& reg->host_int)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } void qla82xx_poll(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_82xx *reg ; int status ; uint32_t stat ; uint16_t mb[4U] ; unsigned long flags ; raw_spinlock_t *tmp ; void *tmp___0 ; unsigned int tmp___1 ; { status = 0; rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { printk("\016%s(): NULL response queue pointer.\n", "qla82xx_poll"); return; } else { } ha = rsp->hw; reg = & (ha->iobase)->isp82; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___0; tmp___1 = readl((void const volatile *)(& reg->host_int)); if (tmp___1 != 0U) { stat = readl((void const volatile *)(& reg->host_status)); switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla82xx_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; goto ldv_44069; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((void const volatile *)(& reg->mailbox_out) + 1U); mb[2] = readw((void const volatile *)(& reg->mailbox_out) + 2U); mb[3] = readw((void const volatile *)(& reg->mailbox_out) + 3U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_44069; case 19U: qla24xx_process_response_queue(vha, rsp); goto ldv_44069; default: ql_dbg(524288U, vha, 45075, "Unrecognized interrupt type (%d).\n", stat * 255U); goto ldv_44069; } ldv_44069: ; } else { } writel(0U, (void volatile *)(& reg->host_int)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } void qla82xx_enable_intrs(struct qla_hw_data *ha ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; qla82xx_mbx_intr_enable(vha); spin_lock_irq(& ha->hardware_lock); if ((ha->device_type & 262144U) != 0U) { qla8044_wr_reg(ha, 14536UL, 0U); } else { qla82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_mask_reg, 64511U); } spin_unlock_irq(& ha->hardware_lock); ha->interrupts_on = 1U; return; } } void qla82xx_disable_intrs(struct qla_hw_data *ha ) { scsi_qla_host_t *vha ; void *tmp ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; qla82xx_mbx_intr_disable(vha); spin_lock_irq(& ha->hardware_lock); if ((ha->device_type & 262144U) != 0U) { qla8044_wr_reg(ha, 14536UL, 1U); } else { qla82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_mask_reg, 1024U); } spin_unlock_irq(& ha->hardware_lock); ha->interrupts_on = 0U; return; } } void qla82xx_init_flags(struct qla_hw_data *ha ) { struct qla82xx_legacy_intr_set *nx_legacy_intr ; struct lock_class_key __key ; { __rwlock_init(& ha->hw_lock, "&ha->hw_lock", & __key); ha->qdr_sn_window = -1; ha->ddr_mn_window = 4294967295U; ha->curr_window = 255U; ha->portnum = (unsigned int )((uint16_t )(ha->pdev)->devfn) & 7U; nx_legacy_intr = (struct qla82xx_legacy_intr_set *)(& legacy_intr) + (unsigned long )ha->portnum; ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg; ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; return; } } __inline void qla82xx_set_idc_version(scsi_qla_host_t *vha ) { int idc_ver ; uint32_t drv_active ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp; if ((uint32_t )(1 << (int )ha->portnum * 4) == drv_active) { qla82xx_wr_32(ha, 136323444UL, 1U); ql_log(2U, vha, 45186, "IDC version updated to %d\n", 1); } else { idc_ver = qla82xx_rd_32(ha, 136323444UL); if (idc_ver != 1) { ql_log(2U, vha, 45187, "qla2xxx driver IDC version %d is not compatible with IDC version %d of the other drivers\n", 1, idc_ver); } else { } } return; } } void qla82xx_set_drv_active(scsi_qla_host_t *vha ) { uint32_t drv_active ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = vha->hw; tmp = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp; if (drv_active == 4294967295U) { qla82xx_wr_32(ha, 136323384UL, 0U); tmp___0 = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp___0; } else { } drv_active = (uint32_t )(1 << (int )ha->portnum * 4) | drv_active; qla82xx_wr_32(ha, 136323384UL, drv_active); return; } } void qla82xx_clear_drv_active(struct qla_hw_data *ha ) { uint32_t drv_active ; int tmp ; { tmp = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp; drv_active = (uint32_t )(~ (1 << (int )ha->portnum * 4)) & drv_active; qla82xx_wr_32(ha, 136323384UL, drv_active); return; } } __inline static int qla82xx_need_reset(struct qla_hw_data *ha ) { uint32_t drv_state ; int rval ; int tmp ; { if (*((unsigned long *)ha + 2UL) != 0UL) { return (1); } else { tmp = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp; rval = (int )((uint32_t )(1 << (int )ha->portnum * 4) & drv_state); return (rval); } } } __inline static void qla82xx_set_rst_ready(struct qla_hw_data *ha ) { uint32_t drv_state ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; int tmp___1 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp___0; if (drv_state == 4294967295U) { qla82xx_wr_32(ha, 136323396UL, 0U); tmp___1 = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp___1; } else { } drv_state = (uint32_t )(1 << (int )ha->portnum * 4) | drv_state; ql_dbg(1073741824U, vha, 187, "drv_state = 0x%08x.\n", drv_state); qla82xx_wr_32(ha, 136323396UL, drv_state); return; } } __inline static void qla82xx_clear_rst_ready(struct qla_hw_data *ha ) { uint32_t drv_state ; int tmp ; { tmp = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp; drv_state = (uint32_t )(~ (1 << (int )ha->portnum * 4)) & drv_state; qla82xx_wr_32(ha, 136323396UL, drv_state); return; } } __inline static void qla82xx_set_qsnt_ready(struct qla_hw_data *ha ) { uint32_t qsnt_state ; int tmp ; { tmp = qla82xx_rd_32(ha, 136323396UL); qsnt_state = (uint32_t )tmp; qsnt_state = (uint32_t )(2 << (int )ha->portnum * 4) | qsnt_state; qla82xx_wr_32(ha, 136323396UL, qsnt_state); return; } } void qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t qsnt_state ; int tmp ; { ha = vha->hw; tmp = qla82xx_rd_32(ha, 136323396UL); qsnt_state = (uint32_t )tmp; qsnt_state = (uint32_t )(~ (2 << (int )ha->portnum * 4)) & qsnt_state; qla82xx_wr_32(ha, 136323396UL, qsnt_state); return; } } static int qla82xx_load_fw(scsi_qla_host_t *vha ) { int rst ; struct fw_blob *blob ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; struct fw_blob *tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { ha = vha->hw; tmp = qla82xx_pinit_from_rom(vha); if (tmp != 0) { ql_log(0U, vha, 159, "Error during CRB initialization.\n"); return (258); } else { } __const_udelay(2147500UL); rst = qla82xx_rd_32(ha, 154140680UL); rst = rst & -285212673; qla82xx_wr_32(ha, 154140680UL, (u32 )rst); if (ql2xfwloadbin == 2) { goto try_blob_fw; } else { } ql_log(2U, vha, 160, "Attempting to load firmware from flash.\n"); tmp___0 = qla82xx_fw_load_from_flash(ha); if (tmp___0 == 0) { ql_log(2U, vha, 161, "Firmware loaded successfully from flash.\n"); return (0); } else { ql_log(1U, vha, 264, "Firmware load from flash failed.\n"); } try_blob_fw: ql_log(2U, vha, 162, "Attempting to load firmware from blob.\n"); tmp___1 = qla2x00_request_firmware(vha); ha->hablob = tmp___1; blob = tmp___1; if ((unsigned long )blob == (unsigned long )((struct fw_blob *)0)) { ql_log(0U, vha, 163, "Firmware image not present.\n"); goto fw_load_failed; } else { } tmp___3 = qla82xx_validate_firmware_blob(vha, 4); if (tmp___3 != 0) { tmp___2 = qla82xx_validate_firmware_blob(vha, 3); if (tmp___2 != 0) { ql_log(0U, vha, 164, "No valid firmware image found.\n"); return (258); } else { } } else { } tmp___4 = qla82xx_fw_load_from_blob(ha); if (tmp___4 == 0) { ql_log(2U, vha, 165, "Firmware loaded successfully from binary blob.\n"); return (0); } else { ql_log(0U, vha, 166, "Firmware load failed for binary blob.\n"); blob->fw = (struct firmware const *)0; blob = (struct fw_blob *)0; goto fw_load_failed; } return (0); fw_load_failed: ; return (258); } } int qla82xx_start_firmware(scsi_qla_host_t *vha ) { uint16_t lnk ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; { ha = vha->hw; qla82xx_wr_32(ha, 136323788UL, 1431655765U); qla82xx_wr_32(ha, 136323664UL, 0U); qla82xx_wr_32(ha, 136323900UL, 0U); qla82xx_wr_32(ha, 136323240UL, 0U); qla82xx_wr_32(ha, 136323244UL, 0U); tmp = qla82xx_load_fw(vha); if (tmp != 0) { ql_log(0U, vha, 167, "Error trying to start fw.\n"); return (258); } else { } tmp___0 = qla82xx_check_cmdpeg_state(ha); if (tmp___0 != 0) { ql_log(0U, vha, 170, "Error during card handshake.\n"); return (258); } else { } pcie_capability_read_word(ha->pdev, 18, & lnk); ha->link_width = ((int )lnk >> 4) & 63; tmp___1 = qla82xx_check_rcvpeg_state(ha); return (tmp___1); } } static uint32_t *qla82xx_read_flash_data(scsi_qla_host_t *vha , uint32_t *dwptr , uint32_t faddr , uint32_t length ) { uint32_t i ; uint32_t val ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; i = 0U; goto ldv_44148; ldv_44147: tmp = qla82xx_rom_fast_read(ha, (int )faddr, (int *)(& val)); if (tmp != 0) { ql_log(1U, vha, 262, "Do ROM fast read failed.\n"); goto done_read; } else { } *(dwptr + (unsigned long )i) = val; i = i + 1U; faddr = faddr + 4U; ldv_44148: ; if (length / 4U > i) { goto ldv_44147; } else { } done_read: ; return (dwptr); } } static int qla82xx_unprotect_flash(struct qla_hw_data *ha ) { int ret ; uint32_t val ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(1U, vha, 45076, "ROM Lock failed.\n"); return (ret); } else { } ret = qla82xx_read_status_reg(ha, & val); if (ret < 0) { goto done_unprotect; } else { } val = val & 4294967235U; ret = qla82xx_write_status_reg(ha, val); if (ret < 0) { val = val | 60U; qla82xx_write_status_reg(ha, val); } else { } tmp___0 = qla82xx_write_disable_flash(ha); if (tmp___0 != 0) { ql_log(1U, vha, 45077, "Write disable failed.\n"); } else { } done_unprotect: qla82xx_rom_unlock(ha); return (ret); } } static int qla82xx_protect_flash(struct qla_hw_data *ha ) { int ret ; uint32_t val ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(1U, vha, 45078, "ROM Lock failed.\n"); return (ret); } else { } ret = qla82xx_read_status_reg(ha, & val); if (ret < 0) { goto done_protect; } else { } val = val | 60U; ret = qla82xx_write_status_reg(ha, val); if (ret < 0) { ql_log(1U, vha, 45079, "Write status register failed.\n"); } else { } tmp___0 = qla82xx_write_disable_flash(ha); if (tmp___0 != 0) { ql_log(1U, vha, 45080, "Write disable failed.\n"); } else { } done_protect: qla82xx_rom_unlock(ha); return (ret); } } static int qla82xx_erase_sector(struct qla_hw_data *ha , int addr ) { int ret ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { ret = 0; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(1U, vha, 45081, "ROM Lock failed.\n"); return (ret); } else { } qla82xx_flash_set_write_enable(ha); qla82xx_wr_32(ha, 154206216UL, (u32 )addr); qla82xx_wr_32(ha, 154206224UL, 3U); qla82xx_wr_32(ha, 154206212UL, 216U); tmp___0 = qla82xx_wait_rom_done(ha); if (tmp___0 != 0) { ql_log(1U, vha, 45082, "Error waiting for rom done.\n"); ret = -1; goto done; } else { } ret = qla82xx_flash_wait_write_finish(ha); done: qla82xx_rom_unlock(ha); return (ret); } } uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { { scsi_block_requests(vha->host); qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length); scsi_unblock_requests(vha->host); return (buf); } } static int qla82xx_write_flash_data(struct scsi_qla_host *vha , uint32_t *dwptr , uint32_t faddr , uint32_t dwords ) { int ret ; uint32_t liter ; uint32_t sec_mask ; uint32_t rest_addr ; dma_addr_t optrom_dma ; void *optrom ; int page_mode ; struct qla_hw_data *ha ; size_t __len ; void *__ret ; { optrom = (void *)0; page_mode = 0; ha = vha->hw; ret = -1; if ((page_mode != 0 && (faddr & 4095U) == 0U) && dwords > 1024U) { optrom = dma_alloc_attrs(& (ha->pdev)->dev, 4096UL, & optrom_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )optrom == (unsigned long )((void *)0)) { ql_log(1U, vha, 45083, "Unable to allocate memory for optrom burst write (%x KB).\n", 4); } else { } } else { } rest_addr = ha->fdt_block_size - 1U; sec_mask = ~ rest_addr; ret = qla82xx_unprotect_flash(ha); if (ret != 0) { ql_log(1U, vha, 45084, "Unable to unprotect flash for update.\n"); goto write_done; } else { } liter = 0U; goto ldv_44198; ldv_44197: ; if ((faddr & rest_addr) == 0U) { ret = qla82xx_erase_sector(ha, (int )faddr); if (ret != 0) { ql_log(1U, vha, 45085, "Unable to erase sector: address=%x.\n", faddr); goto ldv_44192; } else { } } else { } if ((unsigned long )optrom != (unsigned long )((void *)0) && liter + 1024U <= dwords) { __len = 4096UL; if (__len > 63UL) { __ret = __memcpy(optrom, (void const *)dwptr, __len); } else { __ret = __builtin_memcpy(optrom, (void const *)dwptr, __len); } ret = qla2x00_load_ram(vha, optrom_dma, ha->flash_data_off | faddr, 1024U); if (ret != 0) { ql_log(1U, vha, 45086, "Unable to burst-write optrom segment (%x/%x/%llx).\n", ret, ha->flash_data_off | faddr, optrom_dma); ql_log(1U, vha, 45087, "Reverting to slow-write.\n"); dma_free_attrs(& (ha->pdev)->dev, 4096UL, optrom, optrom_dma, (struct dma_attrs *)0); optrom = (void *)0; } else { liter = liter + 1023U; faddr = faddr + 1023U; dwptr = dwptr + 1023UL; goto ldv_44196; } } else { } ret = qla82xx_write_flash_dword(ha, faddr, *dwptr); if (ret != 0) { ql_dbg(524288U, vha, 45088, "Unable to program flash address=%x data=%x.\n", faddr, *dwptr); goto ldv_44192; } else { } ldv_44196: liter = liter + 1U; faddr = faddr + 4U; dwptr = dwptr + 1; ldv_44198: ; if (liter < dwords) { goto ldv_44197; } else { } ldv_44192: ret = qla82xx_protect_flash(ha); if (ret != 0) { ql_log(1U, vha, 45089, "Unable to protect flash after update.\n"); } else { } write_done: ; if ((unsigned long )optrom != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 4096UL, optrom, optrom_dma, (struct dma_attrs *)0); } else { } return (ret); } } int qla82xx_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { int rval ; { scsi_block_requests(vha->host); rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset, length >> 2); scsi_unblock_requests(vha->host); if (rval != 0) { rval = 258; } else { rval = 0; } return (rval); } } void qla82xx_start_iocbs(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct req_que *req ; struct device_reg_82xx *reg ; uint32_t dbval ; unsigned int tmp ; { ha = vha->hw; req = *(ha->req_q_map); req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } reg = & (ha->iobase)->isp82; dbval = (uint32_t )(((int )ha->portnum << 5) | 4); dbval = ((uint32_t )((int )req->id << 8) | dbval) | (uint32_t )((int )req->ring_index << 16); if (ql2xdbwr != 0) { qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); } else { writel(dbval, (void volatile *)ha->nxdb_wr_ptr); __asm__ volatile ("sfence": : : "memory"); goto ldv_44214; ldv_44213: writel(dbval, (void volatile *)ha->nxdb_wr_ptr); __asm__ volatile ("sfence": : : "memory"); ldv_44214: tmp = readl((void const volatile *)ha->nxdb_rd_ptr); if (tmp != dbval) { goto ldv_44213; } else { } } return; } } static void qla82xx_rom_lock_recovery(struct qla_hw_data *ha ) { scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = qla82xx_rom_lock(ha); if (tmp___0 != 0) { ql_log(2U, vha, 45090, "Resetting rom_lock.\n"); } else { } qla82xx_rom_unlock(ha); return; } } static int qla82xx_device_bootstrap(scsi_qla_host_t *vha ) { int rval ; int i ; int timeout ; uint32_t old_count ; uint32_t count ; struct qla_hw_data *ha ; int need_reset ; int peg_stuck ; int tmp ; unsigned long tmp___0 ; int tmp___1 ; { rval = 0; ha = vha->hw; need_reset = 0; peg_stuck = 1; need_reset = qla82xx_need_reset(ha); tmp = qla82xx_rd_32(ha, 136323248UL); old_count = (uint32_t )tmp; i = 0; goto ldv_44232; ldv_44231: tmp___0 = msleep_interruptible(200U); timeout = (int )tmp___0; if (timeout != 0) { qla82xx_wr_32(ha, 136323392UL, 6U); return (258); } else { } tmp___1 = qla82xx_rd_32(ha, 136323248UL); count = (uint32_t )tmp___1; if (count != old_count) { peg_stuck = 0; } else { } i = i + 1; ldv_44232: ; if (i <= 9) { goto ldv_44231; } else { } if (need_reset != 0) { if (peg_stuck != 0) { qla82xx_rom_lock_recovery(ha); } else { } goto dev_initialize; } else if (peg_stuck != 0) { qla82xx_rom_lock_recovery(ha); goto dev_initialize; } else { goto dev_ready; } return (rval); dev_initialize: ql_log(2U, vha, 158, "HW State: INITIALIZING.\n"); qla82xx_wr_32(ha, 136323392UL, 2U); qla82xx_idc_unlock(ha); rval = qla82xx_start_firmware(vha); qla82xx_idc_lock(ha); if (rval != 0) { ql_log(0U, vha, 173, "HW State: FAILED.\n"); qla82xx_clear_drv_active(ha); qla82xx_wr_32(ha, 136323392UL, 6U); return (rval); } else { } dev_ready: ql_log(2U, vha, 174, "HW State: READY.\n"); qla82xx_wr_32(ha, 136323392UL, 3U); return (0); } } static void qla82xx_need_qsnt_handler(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t dev_state ; uint32_t drv_state ; uint32_t drv_active ; unsigned long reset_timeout ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { ha = vha->hw; if (*((unsigned long *)vha + 19UL) != 0UL) { qla2x00_quiesce_io(vha); } else { } qla82xx_set_qsnt_ready(ha); reset_timeout = (unsigned long )jiffies + 7500UL; tmp = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp; tmp___0 = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp___0; drv_active = drv_active << 1; goto ldv_44251; ldv_44250: ; if ((long )((unsigned long )jiffies - reset_timeout) >= 0L) { ql_log(2U, vha, 45091, "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d DRV_STATE:%d.\n", (char *)"qla2xxx", drv_active, drv_state); qla82xx_wr_32(ha, 136323392UL, 3U); ql_log(2U, vha, 45093, "HW State: DEV_READY.\n"); qla82xx_idc_unlock(ha); qla2x00_perform_loop_resync(vha); qla82xx_idc_lock(ha); qla82xx_clear_qsnt_ready(vha); return; } else { } qla82xx_idc_unlock(ha); msleep(1000U); qla82xx_idc_lock(ha); tmp___1 = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp___1; tmp___2 = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp___2; drv_active = drv_active << 1; ldv_44251: ; if (drv_state != drv_active) { goto ldv_44250; } else { } tmp___3 = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp___3; if (dev_state == 5U) { ql_log(2U, vha, 45094, "HW State: DEV_QUIESCENT.\n"); qla82xx_wr_32(ha, 136323392UL, 7U); } else { } return; } } uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *vha , uint32_t curr_state ) { struct qla_hw_data *ha ; uint32_t dev_state ; int tmp ; { ha = vha->hw; ldv_44259: msleep(1000U); qla82xx_idc_lock(ha); tmp = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp; qla82xx_idc_unlock(ha); if (dev_state == curr_state) { goto ldv_44259; } else { } return (dev_state); } } void qla8xxx_dev_failed_handler(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; ql_log(0U, vha, 184, "Disabling the board.\n"); if ((ha->device_type & 16384U) != 0U) { qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_clear_drv_active(vha); qla8044_idc_unlock(ha); } else { } vha->device_flags = vha->device_flags | 32U; qla2x00_abort_all_cmds(vha, 65536); qla2x00_mark_all_devices_lost(vha, 0); vha->flags.online = 0U; vha->flags.init_done = 0U; return; } } static void qla82xx_need_reset_handler(scsi_qla_host_t *vha ) { uint32_t dev_state ; uint32_t drv_state ; uint32_t drv_active ; uint32_t active_mask ; unsigned long reset_timeout ; struct qla_hw_data *ha ; struct req_que *req ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; char *tmp___6 ; char *tmp___7 ; int tmp___8 ; { active_mask = 0U; ha = vha->hw; req = *(ha->req_q_map); if (*((unsigned long *)vha + 19UL) != 0UL) { qla82xx_idc_unlock(ha); qla2x00_abort_isp_cleanup(vha); (*((ha->isp_ops)->get_flash_version))(vha, (void *)req->ring); (*((ha->isp_ops)->nvram_config))(vha); qla82xx_idc_lock(ha); } else { } tmp = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp; if (*((unsigned long *)ha + 2UL) == 0UL) { ql_dbg(524288U, vha, 45096, "reset_acknowledged by 0x%x\n", (int )ha->portnum); qla82xx_set_rst_ready(ha); } else { active_mask = (uint32_t )(~ (1 << (int )ha->portnum * 4)); drv_active = drv_active & active_mask; ql_dbg(524288U, vha, 45097, "active_mask: 0x%08x\n", active_mask); } reset_timeout = (unsigned long )(ha->fcoe_reset_timeout * 250U) + (unsigned long )jiffies; tmp___0 = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp___0; tmp___1 = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp___1; tmp___2 = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp___2; ql_dbg(524288U, vha, 45098, "drv_state: 0x%08x, drv_active: 0x%08x, dev_state: 0x%08x, active_mask: 0x%08x\n", drv_state, drv_active, dev_state, active_mask); goto ldv_44283; ldv_44282: ; if ((long )((unsigned long )jiffies - reset_timeout) >= 0L) { ql_log(1U, vha, 181, "Reset timeout.\n"); goto ldv_44281; } else { } qla82xx_idc_unlock(ha); msleep(1000U); qla82xx_idc_lock(ha); tmp___3 = qla82xx_rd_32(ha, 136323396UL); drv_state = (uint32_t )tmp___3; tmp___4 = qla82xx_rd_32(ha, 136323384UL); drv_active = (uint32_t )tmp___4; if (*((unsigned long *)ha + 2UL) != 0UL) { drv_active = drv_active & active_mask; } else { } tmp___5 = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp___5; ldv_44283: ; if (drv_state != drv_active && dev_state != 2U) { goto ldv_44282; } else { } ldv_44281: ql_dbg(524288U, vha, 45099, "drv_state: 0x%08x, drv_active: 0x%08x, dev_state: 0x%08x, active_mask: 0x%08x\n", drv_state, drv_active, dev_state, active_mask); if (dev_state <= 7U) { tmp___6 = qdev_state(dev_state); tmp___7 = tmp___6; } else { tmp___7 = (char *)"Unknown"; } ql_log(2U, vha, 182, "Device state is 0x%x = %s.\n", dev_state, tmp___7); if (dev_state != 2U && dev_state != 1U) { ql_log(2U, vha, 183, "HW State: COLD/RE-INIT.\n"); qla82xx_wr_32(ha, 136323392UL, 1U); qla82xx_set_rst_ready(ha); if (ql2xmdenable != 0) { tmp___8 = qla82xx_md_collect(vha); if (tmp___8 != 0) { ql_log(1U, vha, 45100, "Minidump not collected.\n"); } else { } } else { ql_log(1U, vha, 45135, "Minidump disabled.\n"); } } else { } return; } } int qla82xx_check_md_needed(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint16_t fw_major_version ; uint16_t fw_minor_version ; uint16_t fw_subminor_version ; int rval ; { ha = vha->hw; rval = 0; fw_major_version = ha->fw_major_version; fw_minor_version = ha->fw_minor_version; fw_subminor_version = ha->fw_subminor_version; rval = qla2x00_get_fw_version(vha); if (rval != 0) { return (rval); } else { } if (ql2xmdenable != 0) { if (ha->fw_dumped == 0) { if (((int )ha->fw_major_version != (int )fw_major_version || (int )ha->fw_minor_version != (int )fw_minor_version) || (int )ha->fw_subminor_version != (int )fw_subminor_version) { ql_dbg(524288U, vha, 45101, "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d\n", (int )fw_major_version, (int )fw_minor_version, (int )fw_subminor_version, (int )ha->fw_major_version, (int )ha->fw_minor_version, (int )ha->fw_subminor_version); qla82xx_md_free(vha); qla82xx_md_prep(vha); } else { } } else { ql_log(2U, vha, 45102, "Firmware dump available to retrieve\n"); } } else { } return (rval); } } static int qla82xx_check_fw_alive(scsi_qla_host_t *vha ) { uint32_t fw_heartbeat_counter ; int status ; int tmp ; { status = 0; tmp = qla82xx_rd_32(vha->hw, 136323248UL); fw_heartbeat_counter = (uint32_t )tmp; if (fw_heartbeat_counter == 4294967295U) { ql_dbg(16777216U, vha, 24579, "FW heartbeat counter is 0xffffffff, returning status=%d.\n", status); return (status); } else { } if ((uint32_t )vha->fw_heartbeat_counter == fw_heartbeat_counter) { vha->seconds_since_last_heartbeat = vha->seconds_since_last_heartbeat + 1; if (vha->seconds_since_last_heartbeat == 2) { vha->seconds_since_last_heartbeat = 0; status = 1; } else { } } else { vha->seconds_since_last_heartbeat = 0; } vha->fw_heartbeat_counter = (int )fw_heartbeat_counter; if (status != 0) { ql_dbg(16777216U, vha, 24580, "Returning status=%d.\n", status); } else { } return (status); } } int qla82xx_device_state_handler(scsi_qla_host_t *vha ) { uint32_t dev_state ; uint32_t old_dev_state ; int rval ; unsigned long dev_init_timeout ; struct qla_hw_data *ha ; int loopcount ; int tmp ; char *tmp___0 ; char *tmp___1 ; int tmp___2 ; char *tmp___3 ; char *tmp___4 ; { rval = 0; ha = vha->hw; loopcount = 0; qla82xx_idc_lock(ha); if (*((unsigned long *)vha + 19UL) == 0UL) { qla82xx_set_drv_active(vha); qla82xx_set_idc_version(vha); } else { } tmp = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp; old_dev_state = dev_state; if (dev_state <= 7U) { tmp___0 = qdev_state(dev_state); tmp___1 = tmp___0; } else { tmp___1 = (char *)"Unknown"; } ql_log(2U, vha, 155, "Device state is 0x%x = %s.\n", dev_state, tmp___1); dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; ldv_44324: ; if ((long )((unsigned long )jiffies - dev_init_timeout) >= 0L) { ql_log(0U, vha, 156, "Device init failed.\n"); rval = 258; goto ldv_44312; } else { } tmp___2 = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp___2; if (old_dev_state != dev_state) { loopcount = 0; old_dev_state = dev_state; } else { } if (loopcount <= 4) { if (dev_state <= 7U) { tmp___3 = qdev_state(dev_state); tmp___4 = tmp___3; } else { tmp___4 = (char *)"Unknown"; } ql_log(2U, vha, 157, "Device state is 0x%x = %s.\n", dev_state, tmp___4); } else { } switch (dev_state) { case 3U: ha->flags.nic_core_reset_owner = 0U; goto rel_lock; case 1U: rval = qla82xx_device_bootstrap(vha); goto ldv_44316; case 2U: qla82xx_idc_unlock(ha); msleep(1000U); qla82xx_idc_lock(ha); goto ldv_44316; case 4U: ; if (ql2xdontresethba == 0) { qla82xx_need_reset_handler(vha); } else { qla82xx_idc_unlock(ha); msleep(1000U); qla82xx_idc_lock(ha); } dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; goto ldv_44316; case 5U: qla82xx_need_qsnt_handler(vha); dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; goto ldv_44316; case 7U: ; if (*((unsigned long *)ha + 2UL) != 0UL) { goto rel_lock; } else { } qla82xx_idc_unlock(ha); msleep(1000U); qla82xx_idc_lock(ha); dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; goto ldv_44316; case 6U: qla8xxx_dev_failed_handler(vha); rval = 258; goto exit; default: qla82xx_idc_unlock(ha); msleep(1000U); qla82xx_idc_lock(ha); } ldv_44316: loopcount = loopcount + 1; goto ldv_44324; ldv_44312: ; rel_lock: qla82xx_idc_unlock(ha); exit: ; return (rval); } } static int qla82xx_check_temp(scsi_qla_host_t *vha ) { uint32_t temp ; uint32_t temp_state ; uint32_t temp_val ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla82xx_rd_32(ha, 136324020UL); temp = (uint32_t )tmp; temp_state = temp & 65535U; temp_val = temp >> 16; if (temp_state == 3U) { ql_log(1U, vha, 24590, "Device temperature %d degrees C exceeds maximum allowed. Hardware has been shut down.\n", temp_val); return (1); } else if (temp_state == 2U) { ql_log(1U, vha, 24591, "Device temperature %d degrees C exceeds operating range. Immediate action needed.\n", temp_val); } else { } return (0); } } int qla82xx_read_temperature(scsi_qla_host_t *vha ) { uint32_t temp ; int tmp ; { tmp = qla82xx_rd_32(vha->hw, 136324020UL); temp = (uint32_t )tmp; return ((int )(temp >> 16)); } } void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; if (*((unsigned long *)ha + 2UL) != 0UL) { ha->flags.mbox_int = 1U; ha->flags.mbox_busy = 0U; ql_log(1U, vha, 24592, "Doing premature completion of mbx command.\n"); tmp = test_and_clear_bit(2L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (tmp != 0) { complete(& ha->mbx_intr_comp); } else { } } else { } return; } } void qla82xx_watchdog(scsi_qla_host_t *vha ) { uint32_t dev_state ; uint32_t halt_status ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; { ha = vha->hw; if (*((unsigned long *)ha + 2UL) == 0UL) { tmp = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp; tmp___11 = qla82xx_check_temp(vha); if (tmp___11 != 0) { set_bit(17L, (unsigned long volatile *)(& vha->dpc_flags)); ha->flags.isp82xx_fw_hung = 1U; qla82xx_clear_pending_mbx(vha); } else if (dev_state == 4U) { tmp___10 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___10 == 0) { ql_log(1U, vha, 24577, "Adapter reset needed.\n"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { goto _L___2; } } else _L___2: /* CIL Label */ if (dev_state == 5U) { tmp___9 = constant_test_bit(20L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___9 == 0) { ql_log(1U, vha, 24578, "Quiescent needed.\n"); set_bit(20L, (unsigned long volatile *)(& vha->dpc_flags)); } else { goto _L___1; } } else _L___1: /* CIL Label */ if (dev_state == 6U) { tmp___8 = constant_test_bit(17L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___8 == 0) { if (*((unsigned long *)vha + 19UL) != 0UL) { ql_log(1U, vha, 45141, "Adapter state is failed. Offlining.\n"); set_bit(17L, (unsigned long volatile *)(& vha->dpc_flags)); ha->flags.isp82xx_fw_hung = 1U; qla82xx_clear_pending_mbx(vha); } else { goto _L___0; } } else { goto _L___0; } } else { _L___0: /* CIL Label */ tmp___7 = qla82xx_check_fw_alive(vha); if (tmp___7 != 0) { ql_dbg(16777216U, vha, 24593, "disabling pause transmit on port 0 & 1.\n"); qla82xx_wr_32(ha, 106954904UL, 9U); tmp___0 = qla82xx_rd_32(ha, 136323240UL); halt_status = (uint32_t )tmp___0; tmp___1 = qla82xx_rd_32(ha, 116391996UL); tmp___2 = qla82xx_rd_32(ha, 121634876UL); tmp___3 = qla82xx_rd_32(ha, 120586300UL); tmp___4 = qla82xx_rd_32(ha, 119537724UL); tmp___5 = qla82xx_rd_32(ha, 118489148UL); tmp___6 = qla82xx_rd_32(ha, 136323244UL); ql_log(2U, vha, 24581, "dumping hw/fw registers:.\n PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n PEG_NET_4_PC: 0x%x.\n", halt_status, tmp___6, tmp___5, tmp___4, tmp___3, tmp___2, tmp___1); if ((halt_status & 536870656U) >> 8 == 103U) { ql_log(1U, vha, 45138, "Firmware aborted with error code 0x00006700. Device is being reset.\n"); } else { } if ((int )halt_status < 0) { set_bit(17L, (unsigned long volatile *)(& vha->dpc_flags)); } else { ql_log(2U, vha, 24582, "Detect abort needed.\n"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } ha->flags.isp82xx_fw_hung = 1U; ql_log(1U, vha, 24583, "Firmware hung.\n"); qla82xx_clear_pending_mbx(vha); } else { } } } else { } return; } } int qla82xx_load_risc(scsi_qla_host_t *vha , uint32_t *srisc_addr ) { int rval ; struct qla_hw_data *ha ; { rval = -1; ha = vha->hw; if ((ha->device_type & 16384U) != 0U) { rval = qla82xx_device_state_handler(vha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); qla83xx_reset_ownership(vha); qla8044_idc_unlock(ha); rval = qla8044_device_state_handler(vha); } else { } return (rval); } } void qla82xx_set_reset_owner(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t dev_state ; int tmp ; int tmp___0 ; char *tmp___1 ; char *tmp___2 ; { ha = vha->hw; dev_state = 0U; if ((ha->device_type & 16384U) != 0U) { tmp = qla82xx_rd_32(ha, 136323392UL); dev_state = (uint32_t )tmp; } else if ((ha->device_type & 262144U) != 0U) { tmp___0 = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp___0; } else { } if (dev_state == 3U) { ql_log(2U, vha, 45103, "HW State: NEED RESET\n"); if ((ha->device_type & 16384U) != 0U) { qla82xx_wr_32(ha, 136323392UL, 4U); ha->flags.nic_core_reset_owner = 1U; ql_dbg(524288U, vha, 45104, "reset_owner is 0x%x\n", (int )ha->portnum); } else if ((ha->device_type & 262144U) != 0U) { qla8044_wr_direct(vha, 4U, 4U); } else { } } else { if (dev_state <= 7U) { tmp___1 = qdev_state(dev_state); tmp___2 = tmp___1; } else { tmp___2 = (char *)"Unknown"; } ql_log(2U, vha, 45105, "Device state is 0x%x = %s.\n", dev_state, tmp___2); } return; } } int qla82xx_abort_isp(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; int tmp ; { rval = -1; ha = vha->hw; if ((vha->device_flags & 32U) != 0U) { ql_log(1U, vha, 32804, "Device in failed state, exiting.\n"); return (0); } else { } ha->flags.nic_core_reset_hdlr_active = 1U; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); if ((ha->device_type & 16384U) != 0U) { rval = qla82xx_device_state_handler(vha); } else if ((ha->device_type & 262144U) != 0U) { qla8044_idc_lock(ha); qla83xx_reset_ownership(vha); qla8044_idc_unlock(ha); rval = qla8044_device_state_handler(vha); } else { } qla82xx_idc_lock(ha); qla82xx_clear_rst_ready(ha); qla82xx_idc_unlock(ha); if (rval == 0) { ha->flags.isp82xx_fw_hung = 0U; ha->flags.nic_core_reset_hdlr_active = 0U; qla82xx_restart_isp(vha); } else { } if (rval != 0) { vha->flags.online = 1U; tmp = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp != 0) { if (ha->isp_abort_cnt == 0U) { ql_log(1U, vha, 32807, "ISP error recover failed - board disabled.\n"); (*((ha->isp_ops)->reset_adapter))(vha); vha->flags.online = 0U; clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); rval = 0; } else { ha->isp_abort_cnt = ha->isp_abort_cnt - 1U; ql_log(1U, vha, 32822, "ISP abort - retry remaining %d.\n", ha->isp_abort_cnt); rval = 258; } } else { ha->isp_abort_cnt = 5U; ql_dbg(4194304U, vha, 32809, "ISP error recovery - retrying (%d) more times.\n", ha->isp_abort_cnt); set_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); rval = 258; } } else { } return (rval); } } int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha ) { int rval ; { rval = 258; if (*((unsigned long *)vha + 19UL) != 0UL) { qla2x00_abort_isp_cleanup(vha); } else { } qla2x00_try_to_stop_firmware(vha); rval = qla82xx_restart_isp(vha); return (rval); } } int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha ) { int status ; unsigned long wait_reset ; long volatile __ret ; struct task_struct *tmp ; struct task_struct *tmp___0 ; struct task_struct *tmp___1 ; struct task_struct *tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { status = 258; wait_reset = (unsigned long )jiffies + 75000UL; goto ldv_44387; ldv_44386: __ret = 2L; switch (8UL) { case 1UL: tmp = get_current(); __asm__ volatile ("xchgb %b0, %1\n": "+q" (__ret), "+m" (tmp->state): : "memory", "cc"); goto ldv_44379; case 2UL: tmp___0 = get_current(); __asm__ volatile ("xchgw %w0, %1\n": "+r" (__ret), "+m" (tmp___0->state): : "memory", "cc"); goto ldv_44379; case 4UL: tmp___1 = get_current(); __asm__ volatile ("xchgl %0, %1\n": "+r" (__ret), "+m" (tmp___1->state): : "memory", "cc"); goto ldv_44379; case 8UL: tmp___2 = get_current(); __asm__ volatile ("xchgq %q0, %1\n": "+r" (__ret), "+m" (tmp___2->state): : "memory", "cc"); goto ldv_44379; default: __xchg_wrong_size(); } ldv_44379: schedule_timeout(250L); tmp___3 = constant_test_bit(18L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___3 == 0) { tmp___4 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___4 == 0) { status = 0; goto ldv_44385; } else { } } else { } ldv_44387: tmp___5 = constant_test_bit(18L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___5 != 0) { goto _L; } else { tmp___6 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___6 != 0) { _L: /* CIL Label */ if ((long )((unsigned long )jiffies - wait_reset) < 0L) { goto ldv_44386; } else { goto ldv_44385; } } else { goto ldv_44385; } } ldv_44385: ql_dbg(524288U, vha, 45095, "%s: status=%d.\n", "qla2x00_wait_for_fcoe_ctx_reset", status); return (status); } } void qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha ) { int i ; int fw_state ; unsigned long flags ; struct qla_hw_data *ha ; int cnt ; int que ; srb_t *sp ; struct req_que *req ; raw_spinlock_t *tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; int tmp___2 ; { fw_state = 0; ha = vha->hw; if (*((unsigned long *)ha + 2UL) == 0UL) { i = 0; goto ldv_44398; ldv_44397: msleep(1000U); if ((ha->device_type & 16384U) != 0U) { fw_state = qla82xx_check_fw_alive(vha); } else if ((ha->device_type & 262144U) != 0U) { fw_state = qla8044_check_fw_alive(vha); } else { } if (fw_state != 0) { ha->flags.isp82xx_fw_hung = 1U; qla82xx_clear_pending_mbx(vha); goto ldv_44396; } else { } i = i + 1; ldv_44398: ; if (i <= 1) { goto ldv_44397; } else { } ldv_44396: ; } else { } ql_dbg(1073741824U, vha, 176, "Entered %s fw_hung=%d.\n", "qla82xx_chip_reset_cleanup", (int )ha->flags.isp82xx_fw_hung); if (*((unsigned long *)ha + 2UL) == 0UL) { tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); que = 0; goto ldv_44415; ldv_44414: req = *(ha->req_q_map + (unsigned long )que); if ((unsigned long )req == (unsigned long )((struct req_que *)0)) { goto ldv_44407; } else { } cnt = 1; goto ldv_44412; ldv_44411: sp = *(req->outstanding_cmds + (unsigned long )cnt); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { if ((unsigned long )sp->u.scmd.ctx == (unsigned long )((void *)0) || ((int )sp->flags & 4096) != 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___0 = (*((ha->isp_ops)->abort_command))(sp); if (tmp___0 != 0) { ql_log(2U, vha, 177, "mbx abort failed.\n"); } else { ql_log(2U, vha, 178, "mbx abort success.\n"); } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); } else { } } else { } cnt = cnt + 1; ldv_44412: ; if ((int )req->num_outstanding_cmds > cnt) { goto ldv_44411; } else { } ldv_44407: que = que + 1; ldv_44415: ; if ((int )ha->max_req_queues > que) { goto ldv_44414; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___2 = qla2x00_eh_wait_for_pending_commands(vha, 0U, 0U, 0); if (tmp___2 != 0) { ql_dbg(1073741824U, vha, 179, "Done wait for pending commands.\n"); } else { } } else { } return; } } static int qla82xx_minidump_process_control(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; struct qla82xx_md_entry_crb *crb_entry ; uint32_t read_value ; uint32_t opcode ; uint32_t poll_time ; uint32_t addr ; uint32_t index ; uint32_t crb_addr ; unsigned long wtime ; struct qla82xx_md_template_hdr *tmplt_hdr ; uint32_t rval ; int i ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { ha = vha->hw; rval = 0U; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr; crb_addr = crb_entry->addr; i = 0; goto ldv_44443; ldv_44442: opcode = (uint32_t )crb_entry->crb_ctrl.opcode; if ((int )opcode & 1) { qla82xx_md_rw_32(ha, crb_addr, crb_entry->value_1, 1); opcode = opcode & 4294967294U; } else { } if ((opcode & 2U) != 0U) { tmp = qla82xx_md_rw_32(ha, crb_addr, 0U, 0); read_value = (uint32_t )tmp; qla82xx_md_rw_32(ha, crb_addr, read_value, 1); opcode = opcode & 4294967293U; } else { } if ((opcode & 4U) != 0U) { tmp___0 = qla82xx_md_rw_32(ha, crb_addr, 0U, 0); read_value = (uint32_t )tmp___0; read_value = crb_entry->value_2 & read_value; opcode = opcode & 4294967291U; if ((opcode & 8U) != 0U) { read_value = crb_entry->value_3 | read_value; opcode = opcode & 4294967287U; } else { } qla82xx_md_rw_32(ha, crb_addr, read_value, 1); } else { } if ((opcode & 8U) != 0U) { tmp___1 = qla82xx_md_rw_32(ha, crb_addr, 0U, 0); read_value = (uint32_t )tmp___1; read_value = crb_entry->value_3 | read_value; qla82xx_md_rw_32(ha, crb_addr, read_value, 1); opcode = opcode & 4294967287U; } else { } if ((opcode & 16U) != 0U) { poll_time = (uint32_t )crb_entry->crb_strd.poll_timeout; wtime = (unsigned long )poll_time + (unsigned long )jiffies; tmp___2 = qla82xx_md_rw_32(ha, crb_addr, 0U, 0); read_value = (uint32_t )tmp___2; ldv_44441: ; if ((crb_entry->value_2 & read_value) == crb_entry->value_1) { goto ldv_44434; } else if ((long )((unsigned long )jiffies - wtime) >= 0L) { rval = 258U; goto ldv_44434; } else { tmp___3 = qla82xx_md_rw_32(ha, crb_addr, 0U, 0); read_value = (uint32_t )tmp___3; } goto ldv_44441; ldv_44434: opcode = opcode & 4294967279U; } else { } if ((opcode & 32U) != 0U) { if ((unsigned int )crb_entry->crb_strd.state_index_a != 0U) { index = (uint32_t )crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } tmp___4 = qla82xx_md_rw_32(ha, addr, 0U, 0); read_value = (uint32_t )tmp___4; index = (uint32_t )crb_entry->crb_ctrl.state_index_v; tmplt_hdr->saved_state_array[index] = read_value; opcode = opcode & 4294967263U; } else { } if ((opcode & 64U) != 0U) { if ((unsigned int )crb_entry->crb_strd.state_index_a != 0U) { index = (uint32_t )crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } if ((unsigned int )crb_entry->crb_ctrl.state_index_v != 0U) { index = (uint32_t )crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; } else { read_value = crb_entry->value_1; } qla82xx_md_rw_32(ha, addr, read_value, 1); opcode = opcode & 4294967231U; } else { } if ((opcode & 128U) != 0U) { index = (uint32_t )crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; read_value = read_value << (int )crb_entry->crb_ctrl.shl; read_value = read_value >> (int )crb_entry->crb_ctrl.shr; if (crb_entry->value_2 != 0U) { read_value = crb_entry->value_2 & read_value; } else { } read_value = crb_entry->value_3 | read_value; read_value = crb_entry->value_1 + read_value; tmplt_hdr->saved_state_array[index] = read_value; opcode = opcode & 4294967167U; } else { } crb_addr = (uint32_t )crb_entry->crb_strd.addr_stride + crb_addr; i = i + 1; ldv_44443: ; if ((uint32_t )i < crb_entry->op_count) { goto ldv_44442; } else { } return ((int )rval); } } static void qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t r_addr ; uint32_t r_stride ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla82xx_md_entry_rdocm *ocm_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; { ha = vha->hw; data_ptr = *d_ptr; ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr; r_addr = ocm_hdr->read_addr; r_stride = ocm_hdr->read_addr_stride; loop_cnt = ocm_hdr->op_count; i = 0U; goto ldv_44459; ldv_44458: r_value = readl((void const volatile *)((unsigned long )r_addr + ha->nx_pcibase)); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; r_addr = r_addr + r_stride; i = i + 1U; ldv_44459: ; if (i < loop_cnt) { goto ldv_44458; } else { } *d_ptr = data_ptr; return; } } static void qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t r_addr ; uint32_t s_stride ; uint32_t s_addr ; uint32_t s_value ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla82xx_md_entry_mux *mux_hdr ; uint32_t *data_ptr ; int tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; { ha = vha->hw; data_ptr = *d_ptr; mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr; r_addr = mux_hdr->read_addr; s_addr = mux_hdr->select_addr; s_stride = mux_hdr->select_value_stride; s_value = mux_hdr->select_value; loop_cnt = mux_hdr->op_count; i = 0U; goto ldv_44477; ldv_44476: qla82xx_md_rw_32(ha, s_addr, s_value, 1); tmp = qla82xx_md_rw_32(ha, r_addr, 0U, 0); r_value = (uint32_t )tmp; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = s_value; tmp___1 = data_ptr; data_ptr = data_ptr + 1; *tmp___1 = r_value; s_value = s_value + s_stride; i = i + 1U; ldv_44477: ; if (i < loop_cnt) { goto ldv_44476; } else { } *d_ptr = data_ptr; return; } } static void qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t r_addr ; uint32_t r_stride ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla82xx_md_entry_crb *crb_hdr ; uint32_t *data_ptr ; int tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; { ha = vha->hw; data_ptr = *d_ptr; crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr; r_addr = crb_hdr->addr; r_stride = (uint32_t )crb_hdr->crb_strd.addr_stride; loop_cnt = crb_hdr->op_count; i = 0U; goto ldv_44493; ldv_44492: tmp = qla82xx_md_rw_32(ha, r_addr, 0U, 0); r_value = (uint32_t )tmp; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_addr; tmp___1 = data_ptr; data_ptr = data_ptr + 1; *tmp___1 = r_value; r_addr = r_addr + r_stride; i = i + 1U; ldv_44493: ; if (i < loop_cnt) { goto ldv_44492; } else { } *d_ptr = data_ptr; return; } } static int qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t addr ; uint32_t r_addr ; uint32_t c_addr ; uint32_t t_r_addr ; uint32_t i ; uint32_t k ; uint32_t loop_count ; uint32_t t_value ; uint32_t r_cnt ; uint32_t r_value ; unsigned long p_wait ; unsigned long w_time ; unsigned long p_mask ; uint32_t c_value_w ; uint32_t c_value_r ; struct qla82xx_md_entry_cache *cache_hdr ; int rval ; uint32_t *data_ptr ; int tmp ; int tmp___0 ; uint32_t *tmp___1 ; { ha = vha->hw; rval = 258; data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = (uint32_t )cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = (uint32_t )cache_hdr->addr_ctrl.init_tag_value; r_cnt = (uint32_t )cache_hdr->read_ctrl.read_addr_cnt; p_wait = (unsigned long )cache_hdr->cache_ctrl.poll_wait; p_mask = (unsigned long )cache_hdr->cache_ctrl.poll_mask; i = 0U; goto ldv_44531; ldv_44530: qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); if (c_value_w != 0U) { qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); } else { } if (p_mask != 0UL) { w_time = (unsigned long )jiffies + p_wait; ldv_44526: tmp = qla82xx_md_rw_32(ha, c_addr, 0U, 0); c_value_r = (uint32_t )tmp; if (((unsigned long )c_value_r & p_mask) == 0UL) { goto ldv_44519; } else if ((long )((unsigned long )jiffies - w_time) >= 0L) { ql_dbg(524288U, vha, 45106, "c_value_r: 0x%x, poll_mask: 0x%lx, w_time: 0x%lx\n", c_value_r, p_mask, w_time); return (rval); } else { } goto ldv_44526; ldv_44519: ; } else { } addr = r_addr; k = 0U; goto ldv_44528; ldv_44527: tmp___0 = qla82xx_md_rw_32(ha, addr, 0U, 0); r_value = (uint32_t )tmp___0; tmp___1 = data_ptr; data_ptr = data_ptr + 1; *tmp___1 = r_value; addr = (uint32_t )cache_hdr->read_ctrl.read_addr_stride + addr; k = k + 1U; ldv_44528: ; if (k < r_cnt) { goto ldv_44527; } else { } t_value = (uint32_t )cache_hdr->addr_ctrl.tag_value_stride + t_value; i = i + 1U; ldv_44531: ; if (i < loop_count) { goto ldv_44530; } else { } *d_ptr = data_ptr; return (0); } } static void qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t addr ; uint32_t r_addr ; uint32_t c_addr ; uint32_t t_r_addr ; uint32_t i ; uint32_t k ; uint32_t loop_count ; uint32_t t_value ; uint32_t r_cnt ; uint32_t r_value ; uint32_t c_value_w ; struct qla82xx_md_entry_cache *cache_hdr ; uint32_t *data_ptr ; int tmp ; uint32_t *tmp___0 ; { ha = vha->hw; data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = (uint32_t )cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = (uint32_t )cache_hdr->addr_ctrl.init_tag_value; r_cnt = (uint32_t )cache_hdr->read_ctrl.read_addr_cnt; i = 0U; goto ldv_44556; ldv_44555: qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); addr = r_addr; k = 0U; goto ldv_44553; ldv_44552: tmp = qla82xx_md_rw_32(ha, addr, 0U, 0); r_value = (uint32_t )tmp; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; addr = (uint32_t )cache_hdr->read_ctrl.read_addr_stride + addr; k = k + 1U; ldv_44553: ; if (k < r_cnt) { goto ldv_44552; } else { } t_value = (uint32_t )cache_hdr->addr_ctrl.tag_value_stride + t_value; i = i + 1U; ldv_44556: ; if (i < loop_count) { goto ldv_44555; } else { } *d_ptr = data_ptr; return; } } static void qla82xx_minidump_process_queue(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t s_addr ; uint32_t r_addr ; uint32_t r_stride ; uint32_t r_value ; uint32_t r_cnt ; uint32_t qid ; uint32_t i ; uint32_t k ; uint32_t loop_cnt ; struct qla82xx_md_entry_queue *q_hdr ; uint32_t *data_ptr ; int tmp ; uint32_t *tmp___0 ; { ha = vha->hw; qid = 0U; data_ptr = *d_ptr; q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr; s_addr = q_hdr->select_addr; r_cnt = (uint32_t )q_hdr->rd_strd.read_addr_cnt; r_stride = (uint32_t )q_hdr->rd_strd.read_addr_stride; loop_cnt = q_hdr->op_count; i = 0U; goto ldv_44579; ldv_44578: qla82xx_md_rw_32(ha, s_addr, qid, 1); r_addr = q_hdr->read_addr; k = 0U; goto ldv_44576; ldv_44575: tmp = qla82xx_md_rw_32(ha, r_addr, 0U, 0); r_value = (uint32_t )tmp; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; r_addr = r_addr + r_stride; k = k + 1U; ldv_44576: ; if (k < r_cnt) { goto ldv_44575; } else { } qid = (uint32_t )q_hdr->q_strd.queue_id_stride + qid; i = i + 1U; ldv_44579: ; if (i < loop_cnt) { goto ldv_44578; } else { } *d_ptr = data_ptr; return; } } static void qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t r_addr ; uint32_t r_value ; uint32_t i ; uint32_t loop_cnt ; struct qla82xx_md_entry_rdrom *rom_hdr ; uint32_t *data_ptr ; int tmp ; uint32_t *tmp___0 ; { ha = vha->hw; data_ptr = *d_ptr; rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr; r_addr = rom_hdr->read_addr; loop_cnt = rom_hdr->read_data_size / 4U; i = 0U; goto ldv_44594; ldv_44593: qla82xx_md_rw_32(ha, 1108410416U, r_addr & 4294901760U, 1); tmp = qla82xx_md_rw_32(ha, (r_addr & 65535U) + 1108672512U, 0U, 0); r_value = (uint32_t )tmp; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; r_addr = r_addr + 4U; i = i + 1U; ldv_44594: ; if (i < loop_cnt) { goto ldv_44593; } else { } *d_ptr = data_ptr; return; } } static int qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; uint32_t r_addr ; uint32_t r_value ; uint32_t r_data ; uint32_t i ; uint32_t j ; uint32_t loop_cnt ; struct qla82xx_md_entry_rdmem *m_hdr ; int rval ; uint32_t *data_ptr ; int tmp ; struct ratelimit_state _rs ; int tmp___0 ; int tmp___1 ; uint32_t *tmp___2 ; { ha = vha->hw; rval = 258; data_ptr = *d_ptr; m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr; r_addr = m_hdr->read_addr; loop_cnt = m_hdr->read_data_size / 16U; if ((r_addr & 15U) != 0U) { ql_log(1U, vha, 45107, "Read addr 0x%x not 16 bytes aligned\n", r_addr); return (rval); } else { } if ((m_hdr->read_data_size & 15U) != 0U) { ql_log(1U, vha, 45108, "Read data[0x%x] not multiple of 16 bytes\n", m_hdr->read_data_size); return (rval); } else { } ql_dbg(524288U, vha, 45109, "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", "qla82xx_minidump_process_rdmem", r_addr, m_hdr->read_data_size, loop_cnt); ldv_write_lock_irqsave(& ha->hw_lock); i = 0U; goto ldv_44622; ldv_44621: qla82xx_md_rw_32(ha, 1090519188U, r_addr, 1); r_value = 0U; qla82xx_md_rw_32(ha, 1090519192U, r_value, 1); r_value = 2U; qla82xx_md_rw_32(ha, 1090519184U, r_value, 1); r_value = 3U; qla82xx_md_rw_32(ha, 1090519184U, r_value, 1); j = 0U; goto ldv_44615; ldv_44614: tmp = qla82xx_md_rw_32(ha, 1090519184U, 0U, 0); r_value = (uint32_t )tmp; if ((r_value & 8U) == 0U) { goto ldv_44613; } else { } j = j + 1U; ldv_44615: ; if (j <= 999U) { goto ldv_44614; } else { } ldv_44613: ; if (j > 999U) { _rs.lock.raw_lock.ldv_1464.head_tail = 0U; _rs.lock.magic = 3735899821U; _rs.lock.owner_cpu = 4294967295U; _rs.lock.owner = (void *)-1; _rs.lock.dep_map.key = 0; _rs.lock.dep_map.class_cache[0] = 0; _rs.lock.dep_map.class_cache[1] = 0; _rs.lock.dep_map.name = "_rs.lock"; _rs.lock.dep_map.cpu = 0; _rs.lock.dep_map.ip = 0UL; _rs.interval = 1250; _rs.burst = 10; _rs.printed = 0; _rs.missed = 0; _rs.begin = 0UL; tmp___0 = ___ratelimit(& _rs, "qla82xx_minidump_process_rdmem"); if (tmp___0 != 0) { printk("\vfailed to read through agent\n"); } else { } ldv_write_unlock_irqrestore(& ha->hw_lock); return (rval); } else { } j = 0U; goto ldv_44619; ldv_44618: tmp___1 = qla82xx_md_rw_32(ha, (uint32_t )MD_MIU_TEST_AGT_RDDATA[j], 0U, 0); r_data = (uint32_t )tmp___1; tmp___2 = data_ptr; data_ptr = data_ptr + 1; *tmp___2 = r_data; j = j + 1U; ldv_44619: ; if (j <= 3U) { goto ldv_44618; } else { } r_addr = r_addr + 16U; i = i + 1U; ldv_44622: ; if (i < loop_cnt) { goto ldv_44621; } else { } ldv_write_unlock_irqrestore(& ha->hw_lock); *d_ptr = data_ptr; return (0); } } int qla82xx_validate_template_chksum(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint64_t chksum ; uint32_t *d_ptr ; int count ; uint32_t *tmp ; int tmp___0 ; { ha = vha->hw; chksum = 0ULL; d_ptr = (uint32_t *)ha->md_tmplt_hdr; count = (int )(ha->md_template_size / 4U); goto ldv_44632; ldv_44631: tmp = d_ptr; d_ptr = d_ptr + 1; chksum = (uint64_t )*tmp + chksum; ldv_44632: tmp___0 = count; count = count - 1; if (tmp___0 > 0) { goto ldv_44631; } else { } goto ldv_44635; ldv_44634: chksum = (chksum & 4294967295ULL) + (chksum >> 32); ldv_44635: ; if (chksum >> 32 != 0ULL) { goto ldv_44634; } else { } return (~ ((int )chksum)); } } static void qla82xx_mark_entry_skipped(scsi_qla_host_t *vha , qla82xx_md_entry_hdr_t *entry_hdr , int index ) { { entry_hdr->d_ctrl.driver_flags = (uint8_t )((unsigned int )entry_hdr->d_ctrl.driver_flags | 128U); ql_dbg(524288U, vha, 45110, "Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", index, entry_hdr->entry_type, (int )entry_hdr->d_ctrl.entry_capture_mask); return; } } int qla82xx_md_collect(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int no_entry_hdr ; qla82xx_md_entry_hdr_t *entry_hdr ; struct qla82xx_md_template_hdr *tmplt_hdr ; uint32_t *data_ptr ; uint32_t total_data_size ; uint32_t f_capture_mask ; uint32_t data_collected ; int i ; int rval ; int tmp ; { ha = vha->hw; no_entry_hdr = 0; total_data_size = 0U; data_collected = 0U; i = 0; rval = 258; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; data_ptr = (uint32_t *)ha->md_dump; if (ha->fw_dumped != 0) { ql_log(1U, vha, 45111, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto md_failed; } else { } ha->fw_dumped = 0; if ((unsigned long )ha->md_tmplt_hdr == (unsigned long )((void *)0) || (unsigned long )ha->md_dump == (unsigned long )((void *)0)) { ql_log(1U, vha, 45112, "Memory not allocated for minidump capture\n"); goto md_failed; } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { ql_log(1U, vha, 45140, "Forced reset from application, ignore minidump capture\n"); ha->flags.isp82xx_no_md_cap = 0U; goto md_failed; } else { } tmp = qla82xx_validate_template_chksum(vha); if (tmp != 0) { ql_log(2U, vha, 45113, "Template checksum validation error\n"); goto md_failed; } else { } no_entry_hdr = (int )tmplt_hdr->num_of_entries; ql_dbg(524288U, vha, 45114, "No of entry headers in Template: 0x%x\n", no_entry_hdr); ql_dbg(524288U, vha, 45115, "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); f_capture_mask = tmplt_hdr->capture_debug_level & 255U; if ((f_capture_mask & 3U) != 3U) { ql_log(1U, vha, 45116, "Minimum required capture mask[0x%x] level not set\n", f_capture_mask); goto md_failed; } else { } tmplt_hdr->driver_capture_mask = (uint32_t )ql2xmdcapmask; tmplt_hdr->driver_info[0] = (uint32_t )vha->host_no; tmplt_hdr->driver_info[1] = 134610944U; total_data_size = ha->md_dump_size; ql_dbg(524288U, vha, 45117, "Total minidump data_size 0x%x to be captured\n", total_data_size); if (tmplt_hdr->entry_type != 99U) { ql_log(1U, vha, 45134, "Bad template header entry type: 0x%x obtained\n", tmplt_hdr->entry_type); goto md_failed; } else { } entry_hdr = (qla82xx_md_entry_hdr_t *)ha->md_tmplt_hdr + (unsigned long )tmplt_hdr->first_entry_offset; i = 0; goto ldv_44677; ldv_44676: ; if (data_collected > total_data_size) { ql_log(1U, vha, 45118, "More MiniDump data collected: [0x%x]\n", data_collected); goto md_failed; } else { } if (((int )entry_hdr->d_ctrl.entry_capture_mask & ql2xmdcapmask) == 0) { entry_hdr->d_ctrl.driver_flags = (uint8_t )((unsigned int )entry_hdr->d_ctrl.driver_flags | 128U); ql_dbg(524288U, vha, 45119, "Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", i, entry_hdr->entry_type, (int )entry_hdr->d_ctrl.entry_capture_mask); goto skip_nxt_entry; } else { } ql_dbg(524288U, vha, 45120, "[%s]: data ptr[%d]: %p, entry_hdr: %p\nentry_type: 0x%x, captrue_mask: 0x%x\n", "qla82xx_md_collect", i, data_ptr, entry_hdr, entry_hdr->entry_type, (int )entry_hdr->d_ctrl.entry_capture_mask); ql_dbg(524288U, vha, 45121, "Data collected: [0x%x], Dump size left:[0x%x]\n", data_collected, ha->md_dump_size - data_collected); switch (entry_hdr->entry_type) { case 255U: qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto ldv_44659; case 98U: rval = qla82xx_minidump_process_control(vha, entry_hdr, & data_ptr); if (rval != 0) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } else { } goto ldv_44659; case 1U: qla82xx_minidump_process_rdcrb(vha, entry_hdr, & data_ptr); goto ldv_44659; case 72U: rval = qla82xx_minidump_process_rdmem(vha, entry_hdr, & data_ptr); if (rval != 0) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } else { } goto ldv_44659; case 4U: ; case 71U: qla82xx_minidump_process_rdrom(vha, entry_hdr, & data_ptr); goto ldv_44659; case 21U: ; case 22U: ; case 23U: ; case 24U: rval = qla82xx_minidump_process_l2tag(vha, entry_hdr, & data_ptr); if (rval != 0) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } else { } goto ldv_44659; case 11U: ; case 12U: qla82xx_minidump_process_l1cache(vha, entry_hdr, & data_ptr); goto ldv_44659; case 6U: qla82xx_minidump_process_rdocm(vha, entry_hdr, & data_ptr); goto ldv_44659; case 2U: qla82xx_minidump_process_rdmux(vha, entry_hdr, & data_ptr); goto ldv_44659; case 3U: qla82xx_minidump_process_queue(vha, entry_hdr, & data_ptr); goto ldv_44659; case 0U: ; default: qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto ldv_44659; } ldv_44659: ql_dbg(524288U, vha, 45122, "[%s]: data ptr[%d]: %p\n", "qla82xx_md_collect", i, data_ptr); data_collected = (uint32_t )((long )data_ptr) - (uint32_t )((long )ha->md_dump); skip_nxt_entry: entry_hdr = entry_hdr + (unsigned long )entry_hdr->entry_size; i = i + 1; ldv_44677: ; if (i < no_entry_hdr) { goto ldv_44676; } else { } if (data_collected != total_data_size) { ql_dbg(524288U, vha, 45123, "MiniDump data mismatch: Data collected: [0x%x],total_data_size:[0x%x]\n", data_collected, total_data_size); goto md_failed; } else { } ql_log(2U, vha, 45124, "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, 0U); md_failed: ; return (rval); } } int qla82xx_md_alloc(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int i ; int k ; struct qla82xx_md_template_hdr *tmplt_hdr ; { ha = vha->hw; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; if (ql2xmdcapmask <= 2 || ql2xmdcapmask > 127) { ql2xmdcapmask = (int )tmplt_hdr->capture_debug_level & 255; ql_log(2U, vha, 45125, "Forcing driver capture mask to firmware default capture mask: 0x%x.\n", ql2xmdcapmask); } else { } i = 2; k = 1; goto ldv_44687; ldv_44686: ; if ((i & ql2xmdcapmask) != 0) { ha->md_dump_size = ha->md_dump_size + tmplt_hdr->capture_size_array[k]; } else { } i = i << 1; k = k + 1; ldv_44687: ; if ((i & 255) != 0) { goto ldv_44686; } else { } if ((unsigned long )ha->md_dump != (unsigned long )((void *)0)) { ql_log(1U, vha, 45126, "Firmware dump previously allocated.\n"); return (1); } else { } ha->md_dump = vmalloc((unsigned long )ha->md_dump_size); if ((unsigned long )ha->md_dump == (unsigned long )((void *)0)) { ql_log(1U, vha, 45127, "Unable to allocate memory for Minidump size (0x%x).\n", ha->md_dump_size); return (1); } else { } return (0); } } void qla82xx_md_free(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((unsigned long )ha->md_tmplt_hdr != (unsigned long )((void *)0)) { ql_log(2U, vha, 45128, "Free MiniDump template: %p, size (%d KB)\n", ha->md_tmplt_hdr, ha->md_template_size / 1024U); dma_free_attrs(& (ha->pdev)->dev, (size_t )ha->md_template_size, ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma, (struct dma_attrs *)0); ha->md_tmplt_hdr = (void *)0; } else { } if ((unsigned long )ha->md_dump != (unsigned long )((void *)0)) { ql_log(2U, vha, 45129, "Free MiniDump memory: %p, size (%d KB)\n", ha->md_dump, ha->md_dump_size / 1024U); vfree((void const *)ha->md_dump); ha->md_dump_size = 0U; ha->md_dump = (void *)0; } else { } return; } } void qla82xx_md_prep(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int rval ; { ha = vha->hw; rval = qla82xx_md_get_template_size(vha); if (rval == 0) { ql_log(2U, vha, 45130, "MiniDump Template size obtained (%d KB)\n", ha->md_template_size / 1024U); if ((ha->device_type & 262144U) != 0U) { rval = qla8044_md_get_template(vha); } else { rval = qla82xx_md_get_template(vha); } if (rval == 0) { ql_dbg(524288U, vha, 45131, "MiniDump Template obtained\n"); rval = qla82xx_md_alloc(vha); if (rval == 0) { ql_log(2U, vha, 45132, "MiniDump memory allocated (%d KB)\n", ha->md_dump_size / 1024U); } else { ql_log(2U, vha, 45133, "Free MiniDump template: %p, size: (%d KB)\n", ha->md_tmplt_hdr, ha->md_template_size / 1024U); dma_free_attrs(& (ha->pdev)->dev, (size_t )ha->md_template_size, ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma, (struct dma_attrs *)0); ha->md_tmplt_hdr = (void *)0; } } else { } } else { } return; } } int qla82xx_beacon_on(struct scsi_qla_host *vha ) { int rval ; struct qla_hw_data *ha ; { ha = vha->hw; qla82xx_idc_lock(ha); rval = qla82xx_mbx_beacon_ctl(vha, 1); if (rval != 0) { ql_log(1U, vha, 45136, "mbx set led config failed in %s\n", "qla82xx_beacon_on"); goto exit; } else { } ha->beacon_blink_led = 1U; exit: qla82xx_idc_unlock(ha); return (rval); } } int qla82xx_beacon_off(struct scsi_qla_host *vha ) { int rval ; struct qla_hw_data *ha ; { ha = vha->hw; qla82xx_idc_lock(ha); rval = qla82xx_mbx_beacon_ctl(vha, 0); if (rval != 0) { ql_log(1U, vha, 45137, "mbx set led config failed in %s\n", "qla82xx_beacon_off"); goto exit; } else { } ha->beacon_blink_led = 0U; exit: qla82xx_idc_unlock(ha); return (rval); } } void activate_pending_timer_14(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_14 == (unsigned long )timer) { if (ldv_timer_state_14 == 2 || pending_flag != 0) { ldv_timer_list_14 = timer; ldv_timer_list_14->data = data; ldv_timer_state_14 = 1; } else { } return; } else { } reg_timer_14(timer); ldv_timer_list_14->data = data; return; } } int reg_timer_14(struct timer_list *timer ) { { ldv_timer_list_14 = timer; ldv_timer_state_14 = 1; return (0); } } void disable_suitable_timer_14(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_14) { ldv_timer_state_14 = 0; return; } else { } return; } } void choose_timer_14(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_14 = 2; return; } } int ldv_del_timer_69(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_70(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } extern unsigned long find_next_bit(unsigned long const * , unsigned long , unsigned long ) ; extern unsigned long find_first_bit(unsigned long const * , unsigned long ) ; extern char *strim(char * ) ; extern void wait_for_completion(struct completion * ) ; extern void do_gettimeofday(struct timeval * ) ; int ldv_del_timer_73(struct timer_list *ldv_func_arg1 ) ; __inline static void memcpy_fromio(void *dst , void const volatile *src , size_t count ) { size_t __len ; void *__ret ; { __len = count; __ret = __builtin_memcpy(dst, (void const *)src, __len); return; } } __inline static void memcpy_toio(void volatile *dst , void const *src , size_t count ) { size_t __len ; void *__ret ; { __len = count; __ret = __builtin_memcpy((void *)dst, src, __len); return; } } int reg_timer_15(struct timer_list *timer ) ; void activate_pending_timer_15(struct timer_list *timer , unsigned long data , int pending_flag ) ; void choose_timer_15(struct timer_list *timer ) ; void disable_suitable_timer_15(struct timer_list *timer ) ; int ldv_scsi_add_host_with_dma_74(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; static char const * const port_state_str___4[5U] = { "Unknown", "UNCONFIGURED", "DEAD", "LOST", "ONLINE"}; __inline static void host_to_adap(uint8_t *src , uint8_t *dst , uint32_t bsize ) { uint32_t *isrc ; __le32 *odest ; uint32_t iter ; __le32 *tmp ; uint32_t *tmp___0 ; { isrc = (uint32_t *)src; odest = (__le32 *)dst; iter = bsize >> 2; goto ldv_43333; ldv_43332: tmp = odest; odest = odest + 1; tmp___0 = isrc; isrc = isrc + 1; *tmp = *tmp___0; iter = iter - 1U; ldv_43333: ; if (iter != 0U) { goto ldv_43332; } else { } return; } } __inline static void qla2x00_set_fcport_state___3(fc_port_t *fcport , int state ) { int old_state ; { old_state = atomic_read((atomic_t const *)(& fcport->state)); atomic_set(& fcport->state, state); if (old_state != 0 && old_state != state) { ql_dbg(268435456U, fcport->vha, 8317, "FCPort state transitioned from %s to %s - portid=%02x%02x%02x.\n", port_state_str___4[old_state], port_state_str___4[state], (int )fcport->d_id.b.domain, (int )fcport->d_id.b.area, (int )fcport->d_id.b.al_pa); } else { } return; } } extern void usleep_range(unsigned long , unsigned long ) ; __inline static struct new_utsname *utsname(void) { struct task_struct *tmp ; { tmp = get_current(); return (& ((tmp->nsproxy)->uts_ns)->name); } } static int qlafx00_mailbox_command(scsi_qla_host_t *vha , struct mbx_cmd_32 *mcp ) { int rval ; unsigned long flags ; device_reg_t *reg ; uint8_t abort_active ; uint8_t io_lock_on ; uint16_t command ; uint32_t *iptr ; uint32_t *optr ; uint32_t cnt ; uint32_t mboxes ; unsigned long wait_time ; struct qla_hw_data *ha ; scsi_qla_host_t *base_vha ; void *tmp ; int tmp___0 ; unsigned long tmp___1 ; raw_spinlock_t *tmp___2 ; uint32_t *iptr2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; { flags = 0UL; command = 0U; ha = vha->hw; tmp = pci_get_drvdata(ha->pdev); base_vha = (scsi_qla_host_t *)tmp; if ((ha->pdev)->error_state > 2U) { ql_log(1U, vha, 4444, "error_state is greater than pci_channel_io_frozen, exiting.\n"); return (256); } else { } if ((vha->device_flags & 32U) != 0U) { ql_log(1U, vha, 4447, "Device in failed state, exiting.\n"); return (256); } else { } reg = ha->iobase; io_lock_on = (uint8_t )base_vha->flags.init_done; rval = 0; tmp___0 = constant_test_bit(3L, (unsigned long const volatile *)(& base_vha->dpc_flags)); abort_active = (uint8_t )tmp___0; if (*((unsigned long *)ha + 2UL) != 0UL) { ql_log(1U, vha, 4469, "Perm failure on EEH timeout MBX, exiting.\n"); return (256); } else { } if (*((unsigned long *)ha + 2UL) != 0UL) { mcp->mb[0] = 16395U; ql_log(1U, vha, 4470, "FW hung = %d.\n", (int )ha->flags.isp82xx_fw_hung); rval = 258; goto premature_exit; } else { } tmp___1 = wait_for_completion_timeout(& ha->mbx_cmd_comp, (unsigned long )(mcp->tov * 250U)); if (tmp___1 == 0UL) { ql_log(1U, vha, 4471, "Cmd access timeout, cmd=0x%x, Exiting.\n", mcp->mb[0]); return (256); } else { } ha->flags.mbox_busy = 1U; ha->mcp32 = mcp; ql_dbg(536870912U, vha, 4472, "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); optr = & reg->ispfx00.mailbox0; iptr = (uint32_t *)(& mcp->mb); command = (uint16_t )mcp->mb[0]; mboxes = mcp->out_mb; cnt = 0U; goto ldv_43566; ldv_43565: ; if ((int )mboxes & 1) { writel(*iptr, (void volatile *)optr); } else { } mboxes = mboxes >> 1; optr = optr + 1; iptr = iptr + 1; cnt = cnt + 1U; ldv_43566: ; if ((uint32_t )ha->mbx_count > cnt) { goto ldv_43565; } else { } ha->flags.mbox_int = 0U; clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); ql_dump_buffer(537001984U, vha, 4466, (uint8_t *)(& mcp->mb), 16U); ql_dump_buffer(537001984U, vha, 4467, (uint8_t *)(& mcp->mb) + 16UL, 16U); ql_dump_buffer(537001984U, vha, 4468, (uint8_t *)(& mcp->mb) + 32UL, 8U); ql_dbg(536870912U, vha, 4473, "Going to unlock irq & waiting for interrupts. jiffies=%lx.\n", jiffies); if (((unsigned int )abort_active == 0U && (unsigned int )io_lock_on != 0U) || ((((ha->device_type & 2048U) != 0U || (ha->device_type & 8192U) != 0U) || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) && *((unsigned long *)ha + 2UL) != 0UL)) { set_bit(2L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); writel(ha->mbx_intr_code, (void volatile *)ha->cregbase + 133636U); spin_unlock_irqrestore(& ha->hardware_lock, flags); wait_for_completion_timeout(& ha->mbx_intr_comp, (unsigned long )(mcp->tov * 250U)); } else { ql_dbg(536870912U, vha, 4396, "Cmd=%x Polling Mode.\n", (int )command); writel(ha->mbx_intr_code, (void volatile *)ha->cregbase + 133636U); spin_unlock_irqrestore(& ha->hardware_lock, flags); wait_time = (unsigned long )(mcp->tov * 250U) + (unsigned long )jiffies; goto ldv_43576; ldv_43575: ; if ((long )(wait_time - (unsigned long )jiffies) < 0L) { goto ldv_43574; } else { } qla2x00_poll(*(ha->rsp_q_map)); if (*((unsigned long *)ha + 2UL) == 0UL && ((ha->device_type & 2U) == 0U || (unsigned int )command != 11U)) { usleep_range(10000UL, 11000UL); } else { } ldv_43576: ; if (*((unsigned long *)ha + 2UL) == 0UL) { goto ldv_43575; } else { } ldv_43574: ql_dbg(536870912U, vha, 4397, "Waited %d sec.\n", (unsigned int )((((unsigned long )(mcp->tov * 250U) - wait_time) + (unsigned long )jiffies) / 250UL)); } if (*((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(536870912U, vha, 4398, "Cmd=%x completed.\n", (int )command); ha->flags.mbox_int = 0U; clear_bit(1L, (unsigned long volatile *)(& ha->mbx_cmd_flags)); if (ha->mailbox_out32[0] != 16384U) { rval = 258; } else { } iptr2 = (uint32_t *)(& mcp->mb); iptr = (uint32_t *)(& ha->mailbox_out32); mboxes = mcp->in_mb; cnt = 0U; goto ldv_43579; ldv_43578: ; if ((int )mboxes & 1) { *iptr2 = *iptr; } else { } mboxes = mboxes >> 1; iptr2 = iptr2 + 1; iptr = iptr + 1; cnt = cnt + 1U; ldv_43579: ; if ((uint32_t )ha->mbx_count > cnt) { goto ldv_43578; } else { } } else { rval = 256; } ha->flags.mbox_busy = 0U; ha->mcp32 = (struct mbx_cmd_32 *)0; if (((unsigned int )abort_active != 0U || (unsigned int )io_lock_on == 0U) && ((((ha->device_type & 2048U) == 0U && (ha->device_type & 8192U) == 0U) && ((ha->device_type & 32768U) == 0U && (ha->device_type & 65536U) == 0U)) || *((unsigned long *)ha + 2UL) == 0UL)) { ql_dbg(536870912U, vha, 4410, "checking for additional resp interrupt.\n"); qla2x00_poll(*(ha->rsp_q_map)); } else { } if (rval == 256 && mcp->mb[0] != 42U) { if (((unsigned int )io_lock_on == 0U || ((int )mcp->flags & 4) != 0) || *((unsigned long *)ha + 2UL) != 0UL) { ql_dbg(536870912U, vha, 4445, "Timeout, schedule isp_abort_needed.\n"); tmp___3 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___3 == 0) { tmp___4 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___4 == 0) { tmp___5 = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___5 == 0) { ql_log(2U, base_vha, 4446, "Mailbox cmd timeout occurred, cmd=0x%x, mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP abort.\n", (int )command, mcp->mb[0], (int )ha->flags.eeh_busy); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else { } } else { } } else { } } else if ((unsigned int )abort_active == 0U) { ql_dbg(536870912U, vha, 4448, "Timeout, calling abort_isp.\n"); tmp___7 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___7 == 0) { tmp___8 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___8 == 0) { tmp___9 = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___9 == 0) { ql_log(2U, base_vha, 4449, "Mailbox cmd timeout occurred, cmd=0x%x, mb[0]=0x%x. Scheduling ISP abort ", (int )command, mcp->mb[0]); set_bit(3L, (unsigned long volatile *)(& vha->dpc_flags)); clear_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); tmp___6 = (*((ha->isp_ops)->abort_isp))(vha); if (tmp___6 != 0) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); } else { } clear_bit(3L, (unsigned long volatile *)(& vha->dpc_flags)); ql_dbg(536870912U, vha, 4450, "Finished abort_isp.\n"); } else { } } else { } } else { } } else { } } else { } premature_exit: complete(& ha->mbx_cmd_comp); if (rval != 0) { ql_log(1U, base_vha, 4451, "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], (int )command); } else { ql_dbg(536870912U, base_vha, 4452, "Done %s.\n", "qlafx00_mailbox_command"); } return (rval); } } int qlafx00_driver_shutdown(scsi_qla_host_t *vha , int tmo ) { int rval ; struct mbx_cmd_32 mc ; struct mbx_cmd_32 *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4454, "Entered %s.\n", "qlafx00_driver_shutdown"); mcp->mb[0] = 106U; mcp->out_mb = 1U; mcp->in_mb = 1U; if (tmo != 0) { mcp->tov = (uint32_t )tmo; } else { mcp->tov = 30U; } mcp->flags = 0U; rval = qlafx00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4455, "Failed=%x.\n", rval); } else { ql_dbg(536903680U, vha, 4456, "Done %s.\n", "qlafx00_driver_shutdown"); } return (rval); } } static int qlafx00_get_firmware_state(scsi_qla_host_t *vha , uint32_t *states ) { int rval ; struct mbx_cmd_32 mc ; struct mbx_cmd_32 *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4457, "Entered %s.\n", "qlafx00_get_firmware_state"); mcp->mb[0] = 105U; mcp->out_mb = 1U; mcp->in_mb = 3U; mcp->tov = 30U; mcp->flags = 0U; rval = qlafx00_mailbox_command(vha, mcp); *states = mcp->mb[1]; if (rval != 0) { ql_dbg(536870912U, vha, 4458, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4459, "Done %s.\n", "qlafx00_get_firmware_state"); } return (rval); } } int qlafx00_init_firmware(scsi_qla_host_t *vha , uint16_t size ) { int rval ; struct mbx_cmd_32 mc ; struct mbx_cmd_32 *mcp ; struct qla_hw_data *ha ; { mcp = & mc; ha = vha->hw; ql_dbg(536903680U, vha, 4460, "Entered %s.\n", "qlafx00_init_firmware"); mcp->mb[0] = 96U; mcp->mb[1] = 0U; mcp->mb[2] = (unsigned int )(ha->init_cb_dma >> 32ULL); mcp->mb[3] = (unsigned int )ha->init_cb_dma; mcp->out_mb = 15U; mcp->in_mb = 1U; mcp->buf_size = (long )size; mcp->flags = 2U; mcp->tov = 30U; rval = qlafx00_mailbox_command(vha, mcp); if (rval != 0) { ql_dbg(536870912U, vha, 4461, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4462, "Done %s.\n", "qlafx00_init_firmware"); } return (rval); } } static int qlafx00_mbx_reg_test(scsi_qla_host_t *vha ) { int rval ; struct mbx_cmd_32 mc ; struct mbx_cmd_32 *mcp ; { mcp = & mc; ql_dbg(536903680U, vha, 4463, "Entered %s.\n", "qlafx00_mbx_reg_test"); mcp->mb[0] = 6U; mcp->mb[1] = 43690U; mcp->mb[2] = 21845U; mcp->mb[3] = 43605U; mcp->mb[4] = 21930U; mcp->mb[5] = 42405U; mcp->mb[6] = 23130U; mcp->mb[7] = 9509U; mcp->mb[8] = 48059U; mcp->mb[9] = 26214U; mcp->mb[10] = 47974U; mcp->mb[11] = 26299U; mcp->mb[12] = 46774U; mcp->mb[13] = 27499U; mcp->mb[14] = 13878U; mcp->mb[15] = 52428U; mcp->out_mb = 65535U; mcp->in_mb = 65535U; mcp->buf_size = 0L; mcp->flags = 2U; mcp->tov = 30U; rval = qlafx00_mailbox_command(vha, mcp); if (rval == 0) { if (((mcp->mb[17] != 43690U || mcp->mb[18] != 21845U) || mcp->mb[19] != 43605U) || mcp->mb[20] != 21930U) { rval = 258; } else { } if (((mcp->mb[21] != 42405U || mcp->mb[22] != 23130U) || mcp->mb[23] != 9509U) || mcp->mb[24] != 48059U) { rval = 258; } else { } if (((mcp->mb[25] != 26214U || mcp->mb[26] != 47974U) || mcp->mb[27] != 26299U) || mcp->mb[28] != 46774U) { rval = 258; } else { } if ((mcp->mb[29] != 27499U || mcp->mb[30] != 13878U) || mcp->mb[31] != 52428U) { rval = 258; } else { } } else { } if (rval != 0) { ql_dbg(536870912U, vha, 4464, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(536903680U, vha, 4465, "Done %s.\n", "qlafx00_mbx_reg_test"); } return (rval); } } int qlafx00_pci_config(struct scsi_qla_host *vha ) { uint16_t w ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & w); w = (uint16_t )((unsigned int )w | 320U); w = (unsigned int )w & 64511U; pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )w); tmp = pci_find_capability(ha->pdev, 16); if (tmp != 0) { pcie_set_readrq(ha->pdev, 2048); } else { } ha->chip_revision = (uint16_t )(ha->pdev)->revision; return (0); } } __inline static void qlafx00_soc_cpu_reset(scsi_qla_host_t *vha ) { unsigned long flags ; struct qla_hw_data *ha ; int i ; int core ; uint32_t cnt ; raw_spinlock_t *tmp ; { flags = 0UL; ha = vha->hw; i = 0; goto ldv_43628; ldv_43627: writel(3841U, (void volatile *)ha->cregbase + (unsigned long )((i + 16640) * 8)); i = i + 1; ldv_43628: ; if (i <= 3) { goto ldv_43627; } else { } i = 0; goto ldv_43631; ldv_43630: writel(16843009U, (void volatile *)ha->cregbase + (unsigned long )(i * 8 + 133124)); i = i + 1; ldv_43631: ; if (i <= 3) { goto ldv_43630; } else { } writel(18809089U, (void volatile *)ha->cregbase + 133184U); i = 0; goto ldv_43634; ldv_43633: writel(0U, (void volatile *)ha->cregbase + (unsigned long )((i + 33472) * 4)); i = i + 1; ldv_43634: ; if (i <= 114) { goto ldv_43633; } else { } core = 0; goto ldv_43640; ldv_43639: i = 0; goto ldv_43637; ldv_43636: writel(0U, (void volatile *)ha->cregbase + (unsigned long )(((core * 64 + i) + 34324) * 4)); i = i + 1; ldv_43637: ; if (i <= 7) { goto ldv_43636; } else { } core = core + 1; ldv_43640: ; if (core <= 3) { goto ldv_43639; } else { } core = 0; goto ldv_43643; ldv_43642: writel(1023U, (void volatile *)ha->cregbase + (unsigned long )(core * 256 + 137396)); core = core + 1; ldv_43643: ; if (core <= 3) { goto ldv_43642; } else { } writel(2U, (void volatile *)ha->cregbase + 131584U); writel(3U, (void volatile *)ha->cregbase + 131588U); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); writel(0U, (void volatile *)ha->cregbase + 133184U); writel(3840U, (void volatile *)ha->cregbase + 133120U); cnt = 10U; goto ldv_43649; ldv_43648: msleep(1000U); __asm__ volatile ("": : : "memory"); cnt = cnt - 1U; ldv_43649: ; if (cnt != 0U) { goto ldv_43648; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } void qlafx00_soft_reset(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int tmp ; long tmp___0 ; long tmp___1 ; { ha = vha->hw; tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { tmp___1 = ldv__builtin_expect(*((unsigned long *)ha + 2UL) != 0UL, 0L); if (tmp___1 != 0L) { return; } else { } } else { } (*((ha->isp_ops)->disable_intrs))(ha); qlafx00_soc_cpu_reset(vha); (*((ha->isp_ops)->enable_intrs))(ha); return; } } int qlafx00_chip_diag(scsi_qla_host_t *vha ) { int rval ; struct qla_hw_data *ha ; struct req_que *req ; { rval = 0; ha = vha->hw; req = *(ha->req_q_map); ha->fw_transfer_size = (uint32_t )req->length * 64U; rval = qlafx00_mbx_reg_test(vha); if (rval != 0) { ql_log(1U, vha, 4453, "Failed mailbox send register test\n"); } else { rval = 0; } return (rval); } } void qlafx00_config_rings(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct device_reg_fx00 *reg ; struct init_cb_fx *icb ; struct req_que *req ; struct rsp_que *rsp ; { ha = vha->hw; reg = & (ha->iobase)->ispfx00; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); icb = (struct init_cb_fx *)ha->init_cb; icb->request_q_outpointer = 0U; icb->response_q_inpointer = 0U; icb->request_q_length = req->length; icb->response_q_length = rsp->length; icb->request_q_address[0] = (unsigned int )req->dma; icb->request_q_address[1] = (unsigned int )(req->dma >> 32ULL); icb->response_q_address[0] = (unsigned int )rsp->dma; icb->response_q_address[1] = (unsigned int )(rsp->dma >> 32ULL); writel(0U, (void volatile *)(& reg->req_q_in)); writel(0U, (void volatile *)(& reg->req_q_out)); writel(0U, (void volatile *)(& reg->rsp_q_in)); writel(0U, (void volatile *)(& reg->rsp_q_out)); readl((void const volatile *)(& reg->rsp_q_out)); return; } } char *qlafx00_pci_info_str(struct scsi_qla_host *vha , char *str ) { struct qla_hw_data *ha ; int pcie_reg ; { ha = vha->hw; pcie_reg = pci_find_capability(ha->pdev, 16); if (pcie_reg != 0) { strcpy(str, "PCIe iSA"); return (str); } else { } return (str); } } char *qlafx00_fw_version_str(struct scsi_qla_host *vha , char *str ) { struct qla_hw_data *ha ; { ha = vha->hw; sprintf(str, "%s", (uint8_t *)(& ha->mr.fw_version)); return (str); } } void qlafx00_enable_intrs(struct qla_hw_data *ha ) { unsigned long flags ; raw_spinlock_t *tmp ; unsigned int tmp___0 ; { flags = 0UL; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ha->interrupts_on = 1U; tmp___0 = readl((void const volatile *)ha->cregbase + 133896U); writel(tmp___0 | 2147483648U, (void volatile *)ha->cregbase + 133896U); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } void qlafx00_disable_intrs(struct qla_hw_data *ha ) { unsigned long flags ; raw_spinlock_t *tmp ; unsigned int tmp___0 ; { flags = 0UL; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); ha->interrupts_on = 0U; tmp___0 = readl((void const volatile *)ha->cregbase + 133896U); writel(tmp___0 & 2147483647U, (void volatile *)ha->cregbase + 133896U); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qlafx00_tmf_iocb_timeout(void *data ) { srb_t *sp ; struct srb_iocb *tmf ; { sp = (srb_t *)data; tmf = & sp->u.iocb_cmd; tmf->u.tmf.comp_status = 6U; complete(& tmf->u.tmf.comp); return; } } static void qlafx00_tmf_sp_done(void *data , void *ptr , int res ) { srb_t *sp ; struct srb_iocb *tmf ; { sp = (srb_t *)ptr; tmf = & sp->u.iocb_cmd; complete(& tmf->u.tmf.comp); return; } } static int qlafx00_async_tm_cmd(fc_port_t *fcport , uint32_t flags , uint32_t lun , uint32_t tag ) { scsi_qla_host_t *vha ; struct srb_iocb *tm_iocb ; srb_t *sp ; int rval ; unsigned long tmp ; { vha = fcport->vha; rval = 258; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } tm_iocb = & sp->u.iocb_cmd; sp->type = 7U; sp->name = (char *)"tmf"; tmp = qla2x00_get_async_timeout(vha); qla2x00_init_timer(sp, tmp); tm_iocb->u.tmf.flags = flags; tm_iocb->u.tmf.lun = lun; tm_iocb->u.tmf.data = tag; sp->done = & qlafx00_tmf_sp_done; tm_iocb->timeout = & qlafx00_tmf_iocb_timeout; init_completion(& tm_iocb->u.tmf.comp); rval = qla2x00_start_sp(sp); if (rval != 0) { goto done_free_sp; } else { } ql_dbg(33554432U, vha, 20603, "Task management command issued target_id=%x\n", (int )fcport->tgt_id); wait_for_completion(& tm_iocb->u.tmf.comp); rval = (unsigned int )tm_iocb->u.tmf.comp_status == 0U ? 0 : 258; done_free_sp: (*(sp->free))((void *)vha, (void *)sp); done: ; return (rval); } } int qlafx00_abort_target(fc_port_t *fcport , unsigned int l , int tag ) { int tmp ; { tmp = qlafx00_async_tm_cmd(fcport, 2U, l, (uint32_t )tag); return (tmp); } } int qlafx00_lun_reset(fc_port_t *fcport , unsigned int l , int tag ) { int tmp ; { tmp = qlafx00_async_tm_cmd(fcport, 16U, l, (uint32_t )tag); return (tmp); } } int qlafx00_loop_reset(scsi_qla_host_t *vha ) { int ret ; struct fc_port *fcport ; struct qla_hw_data *ha ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { ha = vha->hw; if (ql2xtargetreset != 0) { __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (struct fc_port *)__mptr; goto ldv_43740; ldv_43739: ; if ((unsigned int )fcport->port_type != 5U) { goto ldv_43738; } else { } ret = (*((ha->isp_ops)->target_reset))(fcport, 0U, 0); if (ret != 0) { ql_dbg(4194304U, vha, 32829, "Bus Reset failed: Reset=%d d_id=%x.\n", ret, (int )fcport->d_id.b24); } else { } ldv_43738: __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (struct fc_port *)__mptr___0; ldv_43740: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_43739; } else { } } else { } return (0); } } int qlafx00_iospace_config(struct qla_hw_data *ha ) { char const *tmp ; int tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; char const *tmp___4 ; char const *tmp___5 ; void *tmp___6 ; char const *tmp___7 ; uint8_t tmp___8 ; { tmp___0 = pci_request_selected_regions(ha->pdev, ha->bars, "qla2xxx"); if (tmp___0 != 0) { tmp = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 334, "Failed to reserve PIO/MMIO regions (%s), aborting.\n", tmp); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[0].flags & 512UL) == 0UL) { tmp___1 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 335, "Invalid pci I/O region size (%s).\n", tmp___1); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[0].start == 0ULL && (ha->pdev)->resource[0].end == (ha->pdev)->resource[0].start) || ((ha->pdev)->resource[0].end - (ha->pdev)->resource[0].start) + 1ULL <= 1048575ULL) { tmp___2 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 295, "Invalid PCI mem BAR0 region size (%s), aborting\n", tmp___2); goto iospace_error_exit; } else { } ha->cregbase = ioremap_nocache((ha->pdev)->resource[0].start, 1048576UL); if ((unsigned long )ha->cregbase == (unsigned long )((void *)0)) { tmp___3 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 296, "cannot remap MMIO (%s), aborting\n", tmp___3); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[2].flags & 512UL) == 0UL) { tmp___4 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 297, "region #2 not an MMIO resource (%s), aborting\n", tmp___4); goto iospace_error_exit; } else { } if (((ha->pdev)->resource[2].start == 0ULL && (ha->pdev)->resource[2].end == (ha->pdev)->resource[2].start) || ((ha->pdev)->resource[2].end - (ha->pdev)->resource[2].start) + 1ULL <= 131071ULL) { tmp___5 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(1U, ha->pdev, 298, "Invalid PCI mem BAR2 region size (%s), aborting\n", tmp___5); goto iospace_error_exit; } else { } tmp___6 = ioremap_nocache((ha->pdev)->resource[2].start, 131072UL); ha->iobase = (device_reg_t *)tmp___6; if ((unsigned long )ha->iobase == (unsigned long )((device_reg_t *)0)) { tmp___7 = pci_name((struct pci_dev const *)ha->pdev); ql_log_pci(0U, ha->pdev, 299, "cannot remap MMIO (%s), aborting\n", tmp___7); goto iospace_error_exit; } else { } tmp___8 = 1U; ha->max_rsp_queues = tmp___8; ha->max_req_queues = tmp___8; ql_log_pci(2U, ha->pdev, 300, "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n", ha->bars, ha->cregbase, ha->iobase); return (0); iospace_error_exit: ; return (-12); } } static void qlafx00_save_queue_ptrs(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; { ha = vha->hw; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); req->length_fx00 = req->length; req->ring_fx00 = req->ring; req->dma_fx00 = req->dma; rsp->length_fx00 = rsp->length; rsp->ring_fx00 = rsp->ring; rsp->dma_fx00 = rsp->dma; ql_dbg(1073741824U, vha, 301, "req: %p, ring_fx00: %p, length_fx00: 0x%x,req->dma_fx00: 0x%llx\n", req, req->ring_fx00, (int )req->length_fx00, req->dma_fx00); ql_dbg(1073741824U, vha, 302, "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00, (int )rsp->length_fx00, rsp->dma_fx00); return; } } static int qlafx00_config_queues(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct req_que *req ; struct rsp_que *rsp ; dma_addr_t bar2_hdl ; { ha = vha->hw; req = *(ha->req_q_map); rsp = *(ha->rsp_q_map); bar2_hdl = (ha->pdev)->resource[2].start; req->length = (uint16_t )ha->req_que_len; req->ring = (request_t *)ha->iobase + (unsigned long )ha->req_que_off; req->dma = (dma_addr_t )ha->req_que_off + bar2_hdl; if ((unsigned long )req->ring == (unsigned long )((request_t *)0) || (unsigned int )req->length == 0U) { ql_log_pci(2U, ha->pdev, 303, "Unable to allocate memory for req_ring\n"); return (258); } else { } ql_dbg(1073741824U, vha, 304, "req: %p req_ring pointer %p req len 0x%x req off 0x%x\n, req->dma: 0x%llx", req, req->ring, (int )req->length, ha->req_que_off, req->dma); rsp->length = (uint16_t )ha->rsp_que_len; rsp->ring = (response_t *)ha->iobase + (unsigned long )ha->rsp_que_off; rsp->dma = (dma_addr_t )ha->rsp_que_off + bar2_hdl; if ((unsigned long )rsp->ring == (unsigned long )((response_t *)0) || (unsigned int )rsp->length == 0U) { ql_log_pci(2U, ha->pdev, 305, "Unable to allocate memory for rsp_ring\n"); return (258); } else { } ql_dbg(1073741824U, vha, 306, "rsp: %p rsp_ring pointer %p rsp len 0x%x rsp off 0x%x, rsp->dma: 0x%llx\n", rsp, rsp->ring, (int )rsp->length, ha->rsp_que_off, rsp->dma); return (0); } } static int qlafx00_init_fw_ready(scsi_qla_host_t *vha ) { int rval ; unsigned long wtime ; uint16_t wait_time ; struct qla_hw_data *ha ; struct device_reg_fx00 *reg ; uint32_t aenmbx ; uint32_t aenmbx7 ; uint32_t pseudo_aen ; uint32_t state[5U] ; bool done ; unsigned int tmp ; { rval = 0; ha = vha->hw; reg = & (ha->iobase)->ispfx00; aenmbx7 = 0U; done = 0; wait_time = 30U; pseudo_aen = readl((void const volatile *)(& reg->pseudoaen)); if (pseudo_aen == 1U) { aenmbx7 = readl((void const volatile *)(& reg->initval7)); ha->mbx_intr_code = (uint32_t )((unsigned short )(aenmbx7 >> 16)); ha->rqstq_intr_code = (uint32_t )((unsigned short )aenmbx7); rval = qlafx00_driver_shutdown(vha, 10); if (rval != 0) { qlafx00_soft_reset(vha); } else { } } else { } wtime = (unsigned long )((int )wait_time * 250) + (unsigned long )jiffies; ldv_43788: aenmbx = readl((void const volatile *)(& reg->aenmailbox0)); __asm__ volatile ("": : : "memory"); ql_dbg(536870912U, vha, 307, "aenmbx: 0x%x\n", aenmbx); switch (aenmbx) { case 32848U: ; case 32849U: ; goto ldv_43774; case 32770U: ; case 32771U: ; case 32772U: ; case 33793U: qlafx00_soft_reset(vha); goto ldv_43774; case 32864U: aenmbx7 = readl((void const volatile *)(& reg->aenmailbox7)); ha->mbx_intr_code = (uint32_t )((unsigned short )(aenmbx7 >> 16)); ha->rqstq_intr_code = (uint32_t )((unsigned short )aenmbx7); ha->req_que_off = readl((void const volatile *)(& reg->aenmailbox1)); ha->rsp_que_off = readl((void const volatile *)(& reg->aenmailbox3)); ha->req_que_len = readl((void const volatile *)(& reg->aenmailbox5)); ha->rsp_que_len = readl((void const volatile *)(& reg->aenmailbox6)); writel(0U, (void volatile *)(& reg->aenmailbox0)); __readl((void const volatile *)(& reg->aenmailbox0)); ql_dbg(1073741824U, vha, 308, "f/w returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x\n", ha->mbx_intr_code, ha->rqstq_intr_code); writel(4294967288U, (void volatile *)ha->cregbase + 138096U); rval = 0; done = 1; goto ldv_43774; default: aenmbx7 = readl((void const volatile *)(& reg->initval7)); ha->mbx_intr_code = (uint32_t )((unsigned short )(aenmbx7 >> 16)); ha->rqstq_intr_code = (uint32_t )((unsigned short )aenmbx7); ha->req_que_off = readl((void const volatile *)(& reg->initval1)); ha->rsp_que_off = readl((void const volatile *)(& reg->initval3)); ha->req_que_len = readl((void const volatile *)(& reg->initval5)); ha->rsp_que_len = readl((void const volatile *)(& reg->initval6)); ql_dbg(1073741824U, vha, 309, "f/w returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x\n", ha->mbx_intr_code, ha->rqstq_intr_code); writel(4294967288U, (void volatile *)ha->cregbase + 138096U); rval = qlafx00_get_firmware_state(vha, (uint32_t *)(& state)); if (rval != 0) { goto ldv_43774; } else { } if (state[0] == 0U) { rval = 0; done = 1; goto ldv_43774; } else { } ql_dbg(1073741824U, vha, 310, "Sending Driver shutdown fw_state 0x%x\n", state[0]); rval = qlafx00_driver_shutdown(vha, 10); if (rval != 0) { rval = 258; goto ldv_43774; } else { } msleep(500U); wtime = (unsigned long )((int )wait_time * 250) + (unsigned long )jiffies; goto ldv_43774; } ldv_43774: ; if (! done) { if ((long )((unsigned long )jiffies - wtime) >= 0L) { tmp = readl((void const volatile *)(& reg->aenmailbox7)); ql_dbg(1073741824U, vha, 311, "Init f/w failed: aen[7]: 0x%x\n", tmp); rval = 258; done = 1; goto ldv_43787; } else { } msleep(500U); } else { } if (! done) { goto ldv_43788; } else { } ldv_43787: ; if (rval != 0) { ql_dbg(1073741824U, vha, 312, "%s **** FAILED ****.\n", "qlafx00_init_fw_ready"); } else { ql_dbg(1073741824U, vha, 313, "%s **** SUCCESS ****.\n", "qlafx00_init_fw_ready"); } return (rval); } } int qlafx00_fw_ready(scsi_qla_host_t *vha ) { int rval ; unsigned long wtime ; uint16_t wait_time ; uint32_t state[5U] ; { rval = 0; wait_time = 10U; wtime = (unsigned long )((int )wait_time * 250) + (unsigned long )jiffies; if (*((unsigned long *)vha + 19UL) == 0UL) { ql_dbg(1073741824U, vha, 314, "Waiting for init to complete...\n"); } else { } ldv_43804: rval = qlafx00_get_firmware_state(vha, (uint32_t *)(& state)); if (rval == 0) { if (state[0] == 4096U) { ql_dbg(1073741824U, vha, 315, "fw_state=%x\n", state[0]); rval = 0; goto ldv_43797; } else { } } else { } rval = 258; if ((long )((unsigned long )jiffies - wtime) >= 0L) { goto ldv_43797; } else { } msleep(500U); ql_dbg(1073741824U, vha, 316, "fw_state=%x curr time=%lx.\n", state[0], jiffies); goto ldv_43804; ldv_43797: ; if (rval != 0) { ql_dbg(1073741824U, vha, 317, "Firmware ready **** FAILED ****.\n"); } else { ql_dbg(1073741824U, vha, 318, "Firmware ready **** SUCCESS ****.\n"); } return (rval); } } static int qlafx00_find_all_targets(scsi_qla_host_t *vha , struct list_head *new_fcports ) { int rval ; uint16_t tgt_id ; fc_port_t *fcport ; fc_port_t *new_fcport ; int found ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; unsigned long tmp___3 ; struct list_head const *__mptr ; int tmp___4 ; int tmp___5 ; u64 tmp___6 ; u64 tmp___7 ; int tmp___8 ; u64 tmp___9 ; u64 tmp___10 ; int tmp___11 ; struct list_head const *__mptr___0 ; unsigned long tmp___12 ; { ha = vha->hw; rval = 0; tmp = constant_test_bit(5L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp == 0) { return (258); } else { } tmp___0 = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp___0 != 0) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); return (258); } else { tmp___1 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); return (258); } else { tmp___2 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 != 0) { atomic_set(& vha->loop_down_timer, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); return (258); } else { } } } ql_dbg(1342177280U, vha, 8328, "Listing Target bit map...\n"); ql_dump_buffer(1342177280U, vha, 8329, (uint8_t *)ha->gid_list, 32U); new_fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )new_fcport == (unsigned long )((fc_port_t *)0)) { return (259); } else { } tmp___3 = find_first_bit((unsigned long const *)ha->gid_list, 128UL); tgt_id = (uint16_t )tmp___3; goto ldv_43826; ldv_43825: new_fcport->tgt_id = tgt_id; rval = qlafx00_fx_disc(vha, new_fcport, 128); if (rval != 0) { ql_log(1U, vha, 8330, "Target info scan failed -- assuming zero-entry result...\n"); goto ldv_43815; } else { } found = 0; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_43824; ldv_43823: tmp___4 = memcmp((void const *)(& new_fcport->port_name), (void const *)(& fcport->port_name), 8UL); if (tmp___4 != 0) { goto ldv_43820; } else { } found = found + 1; if ((int )fcport->tgt_id == (int )new_fcport->tgt_id) { tmp___5 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___5 == 4) { goto ldv_43821; } else { } } else { } tmp___6 = wwn_to_u64((u8 *)(& fcport->port_name)); tmp___7 = wwn_to_u64((u8 *)(& fcport->node_name)); tmp___8 = atomic_read((atomic_t const *)(& fcport->state)); ql_dbg(1342177280U, vha, 8331, "TGT-ID Change(%s): Present tgt id: 0x%x state: 0x%x wwnn = %llx wwpn = %llx.\n", "qlafx00_find_all_targets", (int )fcport->tgt_id, tmp___8, tmp___7, tmp___6); tmp___9 = wwn_to_u64((u8 *)(& new_fcport->port_name)); tmp___10 = wwn_to_u64((u8 *)(& new_fcport->node_name)); ql_log(2U, vha, 8332, "TGT-ID Announce(%s): Discovered tgt id 0x%x wwnn = %llx wwpn = %llx.\n", "qlafx00_find_all_targets", (int )new_fcport->tgt_id, tmp___10, tmp___9); tmp___11 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___11 != 4) { fcport->old_tgt_id = fcport->tgt_id; fcport->tgt_id = new_fcport->tgt_id; ql_log(2U, vha, 8333, "TGT-ID: New fcport Added: %p\n", fcport); qla2x00_update_fcport(vha, fcport); } else { ql_log(2U, vha, 8334, " Existing TGT-ID %x did not get offline event from firmware.\n", (int )fcport->old_tgt_id); qla2x00_mark_device_lost(vha, fcport, 0, 0); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); kfree((void const *)new_fcport); return (rval); } goto ldv_43821; ldv_43820: __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_43824: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_43823; } else { } ldv_43821: ; if (found != 0) { goto ldv_43815; } else { } list_add_tail(& new_fcport->list, new_fcports); new_fcport = qla2x00_alloc_fcport(vha, 208U); if ((unsigned long )new_fcport == (unsigned long )((fc_port_t *)0)) { return (259); } else { } ldv_43815: tmp___12 = find_next_bit((unsigned long const *)ha->gid_list, 128UL, (unsigned long )((int )tgt_id + 1)); tgt_id = (uint16_t )tmp___12; ldv_43826: ; if ((unsigned int )tgt_id <= 127U) { goto ldv_43825; } else { } kfree((void const *)new_fcport); return (rval); } } static int qlafx00_configure_all_targets(scsi_qla_host_t *vha ) { int rval ; fc_port_t *fcport ; fc_port_t *rmptemp ; struct list_head new_fcports ; struct list_head const *__mptr ; int tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; int tmp___1 ; u64 tmp___2 ; u64 tmp___3 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; struct list_head const *__mptr___6 ; { new_fcports.next = & new_fcports; new_fcports.prev = & new_fcports; rval = qlafx00_fx_disc(vha, & (vha->hw)->mr.fcport, 129); if (rval != 0) { set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); return (rval); } else { } rval = qlafx00_find_all_targets(vha, & new_fcports); if (rval != 0) { set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); return (rval); } else { } __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_43841; ldv_43840: tmp = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp != 0) { goto ldv_43839; } else { } tmp___0 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___0 == 3) { if ((unsigned int )fcport->port_type != 4U) { qla2x00_mark_device_lost(vha, fcport, 0, 0); } else { } } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_43841: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_43840; } else { } ldv_43839: __mptr___1 = (struct list_head const *)new_fcports.next; fcport = (fc_port_t *)__mptr___1; __mptr___2 = (struct list_head const *)fcport->list.next; rmptemp = (fc_port_t *)__mptr___2; goto ldv_43850; ldv_43849: tmp___1 = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { goto ldv_43848; } else { } qla2x00_update_fcport(vha, fcport); list_move_tail(& fcport->list, & vha->vp_fcports); tmp___2 = wwn_to_u64((u8 *)(& fcport->port_name)); tmp___3 = wwn_to_u64((u8 *)(& fcport->node_name)); ql_log(2U, vha, 8335, "Attach new target id 0x%x wwnn = %llx wwpn = %llx.\n", (int )fcport->tgt_id, tmp___3, tmp___2); fcport = rmptemp; __mptr___3 = (struct list_head const *)rmptemp->list.next; rmptemp = (fc_port_t *)__mptr___3; ldv_43850: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& new_fcports)) { goto ldv_43849; } else { } ldv_43848: __mptr___4 = (struct list_head const *)new_fcports.next; fcport = (fc_port_t *)__mptr___4; __mptr___5 = (struct list_head const *)fcport->list.next; rmptemp = (fc_port_t *)__mptr___5; goto ldv_43858; ldv_43857: list_del(& fcport->list); kfree((void const *)fcport); fcport = rmptemp; __mptr___6 = (struct list_head const *)rmptemp->list.next; rmptemp = (fc_port_t *)__mptr___6; ldv_43858: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& new_fcports)) { goto ldv_43857; } else { } return (rval); } } int qlafx00_configure_devices(scsi_qla_host_t *vha ) { int rval ; unsigned long flags ; unsigned long save_flags ; int tmp ; { rval = 0; flags = vha->dpc_flags; save_flags = flags; ql_dbg(268435456U, vha, 8336, "Configure devices -- dpc flags =0x%lx\n", flags); rval = qlafx00_configure_all_targets(vha); if (rval == 0) { tmp = constant_test_bit(4L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp != 0) { rval = 258; } else { atomic_set(& vha->loop_state, 5); ql_log(2U, vha, 8337, "Device Ready\n"); } } else { } if (rval != 0) { ql_dbg(268435456U, vha, 8338, "%s *** FAILED ***.\n", "qlafx00_configure_devices"); } else { ql_dbg(268435456U, vha, 8339, "%s: exiting normally.\n", "qlafx00_configure_devices"); } return (rval); } } static void qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha , bool critemp ) { struct qla_hw_data *ha ; fc_port_t *fcport ; int tmp ; int tmp___0 ; struct list_head const *__mptr ; int tmp___1 ; struct list_head const *__mptr___0 ; { ha = vha->hw; vha->flags.online = 0U; ha->mr.fw_hbt_en = 0U; if (! critemp) { ha->flags.chip_reset_done = 0U; clear_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); vha->qla_stats.total_isp_aborts = vha->qla_stats.total_isp_aborts + 1U; ql_log(2U, vha, 319, "Performing ISP error recovery - ha = %p.\n", ha); (*((ha->isp_ops)->reset_chip))(vha); } else { } tmp___0 = atomic_read((atomic_t const *)(& vha->loop_state)); if (tmp___0 != 2) { atomic_set(& vha->loop_state, 2); atomic_set(& vha->loop_down_timer, 615); } else { tmp = atomic_read((atomic_t const *)(& vha->loop_down_timer)); if (tmp == 0) { atomic_set(& vha->loop_down_timer, 615); } else { } } __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_43878; ldv_43877: fcport->flags = 0U; tmp___1 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___1 == 4) { qla2x00_set_fcport_state___3(fcport, 3); } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_43878: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_43877; } else { } if (*((unsigned long *)ha + 2UL) == 0UL) { if ((int )critemp) { qla2x00_abort_all_cmds(vha, 65536); } else { qla2x00_abort_all_cmds(vha, 524288); } } else { } qla2x00_free_irqs(vha); if ((int )critemp) { set_bit(27L, (unsigned long volatile *)(& vha->dpc_flags)); } else { set_bit(25L, (unsigned long volatile *)(& vha->dpc_flags)); } writel(4294967288U, (void volatile *)ha->cregbase + 138096U); ql_log(2U, vha, 320, "%s Done done - ha=%p.\n", "qlafx00_abort_isp_cleanup", ha); return; } } void qlafx00_init_response_q_entries(struct rsp_que *rsp ) { uint16_t cnt ; response_t *pkt ; { rsp->ring_ptr = rsp->ring; rsp->ring_index = 0U; rsp->status_srb = (srb_t *)0; pkt = rsp->ring_ptr; cnt = 0U; goto ldv_43887; ldv_43886: pkt->signature = 3735936685U; writel(3735936685U, (void volatile *)(& pkt->signature)); pkt = pkt + 1; cnt = (uint16_t )((int )cnt + 1); ldv_43887: ; if ((int )rsp->length > (int )cnt) { goto ldv_43886; } else { } return; } } int qlafx00_rescan_isp(scsi_qla_host_t *vha ) { uint32_t status ; struct qla_hw_data *ha ; struct device_reg_fx00 *reg ; uint32_t aenmbx7 ; int tmp ; int tmp___0 ; { status = 258U; ha = vha->hw; reg = & (ha->iobase)->ispfx00; qla2x00_request_irqs(ha, *(ha->rsp_q_map)); aenmbx7 = readl((void const volatile *)(& reg->aenmailbox7)); ha->mbx_intr_code = (uint32_t )((unsigned short )(aenmbx7 >> 16)); ha->rqstq_intr_code = (uint32_t )((unsigned short )aenmbx7); ha->req_que_off = readl((void const volatile *)(& reg->aenmailbox1)); ha->rsp_que_off = readl((void const volatile *)(& reg->aenmailbox3)); ha->req_que_len = readl((void const volatile *)(& reg->aenmailbox5)); ha->rsp_que_len = readl((void const volatile *)(& reg->aenmailbox6)); ql_dbg(268435456U, vha, 8340, "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x Req que offset 0x%x Rsp que offset 0x%x\n", ha->mbx_intr_code, ha->rqstq_intr_code, ha->req_que_off, ha->rsp_que_len); writel(4294967288U, (void volatile *)ha->cregbase + 138096U); tmp = qla2x00_init_rings(vha); status = (uint32_t )tmp; if (status == 0U) { vha->flags.online = 1U; if ((vha->device_flags & 2U) != 0U) { status = 0U; } else { } tmp___0 = qlafx00_fx_disc(vha, & (vha->hw)->mr.fcport, 153); if (tmp___0 != 0) { ql_dbg(268435456U, vha, 8341, "failed to register host info\n"); } else { } } else { } scsi_unblock_requests(vha->host); return ((int )status); } } void qlafx00_timer_routine(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; uint32_t fw_heart_beat ; uint32_t aenmbx0 ; struct device_reg_fx00 *reg ; uint32_t tempc ; int tmp ; int tmp___0 ; uint32_t data0 ; uint32_t data1 ; int tmp___1 ; unsigned int tmp___2 ; int tmp___3 ; { ha = vha->hw; reg = & (ha->iobase)->ispfx00; if ((unsigned int )ha->mr.fw_hbt_cnt != 0U) { ha->mr.fw_hbt_cnt = (uint8_t )((int )ha->mr.fw_hbt_cnt - 1); } else { if (*((unsigned long *)ha + 2UL) == 0UL) { tmp = constant_test_bit(15L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp == 0) { tmp___0 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___0 == 0) { if ((unsigned int )ha->mr.fw_hbt_en != 0U) { fw_heart_beat = readl((void const volatile *)(& reg->fwheartbeat)); if (ha->mr.old_fw_hbt_cnt != fw_heart_beat) { ha->mr.old_fw_hbt_cnt = fw_heart_beat; ha->mr.fw_hbt_miss_cnt = 0U; } else { ha->mr.fw_hbt_miss_cnt = (uint8_t )((int )ha->mr.fw_hbt_miss_cnt + 1); if ((unsigned int )ha->mr.fw_hbt_miss_cnt == 3U) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); ha->mr.fw_hbt_miss_cnt = 0U; } else { } } } else { } } else { } } else { } } else { } ha->mr.fw_hbt_cnt = 6U; } tmp___1 = constant_test_bit(25L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___1 != 0) { aenmbx0 = readl((void const volatile *)(& reg->aenmailbox0)); if ((unsigned int )ha->mr.fw_reset_timer_exp != 0U) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); ha->mr.fw_reset_timer_exp = 0U; } else if (aenmbx0 == 32864U) { set_bit(26L, (unsigned long volatile *)(& vha->dpc_flags)); clear_bit(25L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); ha->mr.fw_reset_timer_tick = 120U; } else if (aenmbx0 == 32849U && (unsigned int )ha->mr.fw_hbt_en == 0U) { ha->mr.fw_hbt_en = 1U; } else if ((unsigned int )ha->mr.fw_reset_timer_tick == 0U) { if (ha->mr.old_aenmbx0_state == aenmbx0) { ha->mr.fw_reset_timer_exp = 1U; } else { } ha->mr.fw_reset_timer_tick = 120U; } else if (aenmbx0 == 4294967295U) { data0 = readl((void const volatile *)ha->cregbase + 262168U); data1 = readl((void const volatile *)ha->cregbase + 268324U); data0 = data0 & 4294901760U; data1 = data1 & 65535U; writel(data0 | data1, (void volatile *)ha->cregbase + 268324U); } else if ((aenmbx0 & 65280U) == 34304U) { ha->mr.fw_reset_timer_tick = 600U; } else if (aenmbx0 == 34050U) { ha->mr.fw_reset_timer_tick = 600U; } else { } ha->mr.old_aenmbx0_state = aenmbx0; ha->mr.fw_reset_timer_tick = (uint16_t )((int )ha->mr.fw_reset_timer_tick - 1); } else { } tmp___3 = constant_test_bit(27L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___3 != 0) { if ((unsigned int )ha->mr.fw_critemp_timer_tick == 0U) { tmp___2 = readl((void const volatile *)ha->cregbase + 99524U); tempc = (3153000U - ((tmp___2 & 1022U) >> 1) * 10000U) / 13825U; ql_dbg(16777216U, vha, 24594, "ISPFx00(%s): Critical temp timer, current SOC temperature: %d\n", "qlafx00_timer_routine", tempc); if (ha->mr.critical_temperature > tempc) { set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); clear_bit(27L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else { } ha->mr.fw_critemp_timer_tick = 60U; } else { ha->mr.fw_critemp_timer_tick = (uint16_t )((int )ha->mr.fw_critemp_timer_tick - 1); } } else { } return; } } int qlafx00_reset_initialize(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((vha->device_flags & 32U) != 0U) { ql_dbg(1073741824U, vha, 322, "Device in failed state\n"); return (0); } else { } ha->flags.mr_reset_hdlr_active = 1U; if (*((unsigned long *)vha + 19UL) != 0UL) { scsi_block_requests(vha->host); qlafx00_abort_isp_cleanup(vha, 0); } else { } ql_log(2U, vha, 323, "(%s): succeeded.\n", "qlafx00_reset_initialize"); ha->flags.mr_reset_hdlr_active = 0U; return (0); } } int qlafx00_abort_isp(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; int tmp ; long tmp___0 ; long tmp___1 ; { ha = vha->hw; if (*((unsigned long *)vha + 19UL) != 0UL) { tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { tmp___1 = ldv__builtin_expect(*((unsigned long *)ha + 2UL) != 0UL, 0L); if (tmp___1 != 0L) { clear_bit(10L, (unsigned long volatile *)(& vha->dpc_flags)); return (0); } else { } } else { } scsi_block_requests(vha->host); qlafx00_abort_isp_cleanup(vha, 0); } else { scsi_block_requests(vha->host); clear_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); vha->qla_stats.total_isp_aborts = vha->qla_stats.total_isp_aborts + 1U; (*((ha->isp_ops)->reset_chip))(vha); set_bit(25L, (unsigned long volatile *)(& vha->dpc_flags)); writel(4294967288U, (void volatile *)ha->cregbase + 138096U); } ql_log(2U, vha, 325, "(%s): succeeded.\n", "qlafx00_abort_isp"); return (0); } } __inline static fc_port_t *qlafx00_get_fcport(struct scsi_qla_host *vha , int tgt_id ) { fc_port_t *fcport ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { fcport = (fc_port_t *)0; __mptr = (struct list_head const *)vha->vp_fcports.next; fcport = (fc_port_t *)__mptr; goto ldv_43928; ldv_43927: ; if ((int )fcport->tgt_id == tgt_id) { ql_dbg(33554432U, vha, 20594, "Matching fcport(%p) found with TGT-ID: 0x%x and Remote TGT_ID: 0x%x\n", fcport, (int )fcport->tgt_id, tgt_id); goto ldv_43926; } else { } __mptr___0 = (struct list_head const *)fcport->list.next; fcport = (fc_port_t *)__mptr___0; ldv_43928: ; if ((unsigned long )(& fcport->list) != (unsigned long )(& vha->vp_fcports)) { goto ldv_43927; } else { } ldv_43926: ; return (fcport); } } static void qlafx00_tgt_detach(struct scsi_qla_host *vha , int tgt_id ) { fc_port_t *fcport ; { ql_log(2U, vha, 20595, "Detach TGT-ID: 0x%x\n", tgt_id); fcport = qlafx00_get_fcport(vha, tgt_id); if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return; } else { } qla2x00_mark_device_lost(vha, fcport, 0, 0); return; } } int qlafx00_process_aen(struct scsi_qla_host *vha , struct qla_work_evt *evt ) { int rval ; uint32_t aen_code ; uint32_t aen_data ; u32 tmp ; { rval = 0; aen_code = 65535U; aen_data = evt->u.aenfx.evtcode; switch (evt->u.aenfx.evtcode) { case 32788U: ; if (evt->u.aenfx.mbx[1] == 0U) { if (evt->u.aenfx.mbx[2] == 1U) { if (*((unsigned long *)vha + 19UL) == 0UL) { vha->flags.fw_tgt_reported = 1U; } else { } atomic_set(& vha->loop_down_timer, 0); atomic_set(& vha->loop_state, 3); set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else if (evt->u.aenfx.mbx[2] == 2U) { qlafx00_tgt_detach(vha, (int )evt->u.aenfx.mbx[3]); } else { } } else if (evt->u.aenfx.mbx[1] == 65535U) { if (evt->u.aenfx.mbx[2] == 1U) { if (*((unsigned long *)vha + 19UL) == 0UL) { vha->flags.fw_tgt_reported = 1U; } else { } set_bit(4L, (unsigned long volatile *)(& vha->dpc_flags)); } else if (evt->u.aenfx.mbx[2] == 2U) { vha->device_flags = vha->device_flags | 2U; qla2x00_mark_all_devices_lost(vha, 1); } else { } } else { } goto ldv_43942; case 32785U: aen_code = 2U; aen_data = 0U; goto ldv_43942; case 32786U: aen_code = 3U; aen_data = 0U; goto ldv_43942; case 32775U: ql_log(2U, vha, 20610, "Process critical temperature event aenmb[0]: %x\n", evt->u.aenfx.evtcode); scsi_block_requests(vha->host); qlafx00_abort_isp_cleanup(vha, 1); scsi_unblock_requests(vha->host); goto ldv_43942; } ldv_43942: tmp = fc_get_event_number(); fc_host_post_event(vha->host, tmp, (enum fc_host_event_code )aen_code, aen_data); return (rval); } } static void qlafx00_update_host_attr(scsi_qla_host_t *vha , struct port_info_data *pinfo ) { u64 port_name ; u64 node_name ; { port_name = 0ULL; node_name = 0ULL; port_name = wwn_to_u64((u8 *)(& pinfo->port_name)); node_name = wwn_to_u64((u8 *)(& pinfo->node_name)); ((struct fc_host_attrs *)(vha->host)->shost_data)->node_name = node_name; ((struct fc_host_attrs *)(vha->host)->shost_data)->port_name = port_name; if ((unsigned int )pinfo->port_type == 0U) { (vha->hw)->current_topology = 8U; } else { } if ((unsigned int )pinfo->link_status == 17U) { atomic_set(& vha->loop_state, 5); } else if ((unsigned int )pinfo->link_status == 16U) { atomic_set(& vha->loop_state, 2); } else { } (vha->hw)->link_data_rate = (unsigned short )pinfo->link_config; return; } } static void qla2x00_fxdisc_iocb_timeout(void *data ) { srb_t *sp ; struct srb_iocb *lio ; { sp = (srb_t *)data; lio = & sp->u.iocb_cmd; complete(& lio->u.fxiocb.fxiocb_comp); return; } } static void qla2x00_fxdisc_sp_done(void *data , void *ptr , int res ) { srb_t *sp ; struct srb_iocb *lio ; { sp = (srb_t *)ptr; lio = & sp->u.iocb_cmd; complete(& lio->u.fxiocb.fxiocb_comp); return; } } int qlafx00_fx_disc(scsi_qla_host_t *vha , fc_port_t *fcport , uint16_t fx_type ) { srb_t *sp ; struct srb_iocb *fdisc ; int rval ; struct qla_hw_data *ha ; struct host_system_info *phost_info ; struct register_host_info *preg_hsi ; struct new_utsname *p_sysid ; struct timeval tv ; struct config_info_data *pinfo ; size_t __len ; void *__ret ; size_t __len___0 ; void *__ret___0 ; size_t __len___1 ; void *__ret___1 ; size_t __len___2 ; void *__ret___2 ; size_t __len___3 ; void *__ret___3 ; size_t __len___4 ; void *__ret___4 ; size_t __len___5 ; void *__ret___5 ; struct port_info_data *pinfo___0 ; size_t __len___6 ; void *__ret___6 ; size_t __len___7 ; void *__ret___7 ; struct qlafx00_tgt_node_info *pinfo___1 ; size_t __len___8 ; void *__ret___8 ; size_t __len___9 ; void *__ret___9 ; struct qlafx00_tgt_node_info *pinfo___2 ; size_t __len___10 ; void *__ret___10 ; { rval = 258; ha = vha->hw; p_sysid = (struct new_utsname *)0; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } fdisc = & sp->u.iocb_cmd; switch ((int )fx_type) { case 1: fdisc->u.fxiocb.flags = 2U; fdisc->u.fxiocb.rsp_len = 500U; goto ldv_43979; case 2: fdisc->u.fxiocb.flags = 6U; fdisc->u.fxiocb.rsp_len = 304U; fdisc->u.fxiocb.req_data = (unsigned int )fcport->port_id; goto ldv_43979; case 128: fdisc->u.fxiocb.flags = 6U; fdisc->u.fxiocb.rsp_len = 212U; fdisc->u.fxiocb.req_data = (unsigned int )fcport->tgt_id; goto ldv_43979; case 129: fdisc->u.fxiocb.flags = 6U; fdisc->u.fxiocb.rsp_len = 128U; goto ldv_43979; case 153: fdisc->u.fxiocb.flags = 1U; fdisc->u.fxiocb.req_len = 1036U; p_sysid = utsname(); if ((unsigned long )p_sysid == (unsigned long )((struct new_utsname *)0)) { ql_log(1U, vha, 12348, "Not able to get the system information\n"); goto done_free_sp; } else { } goto ldv_43979; default: ; goto ldv_43979; } ldv_43979: ; if ((int )fdisc->u.fxiocb.flags & 1) { fdisc->u.fxiocb.req_addr = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )fdisc->u.fxiocb.req_len, & fdisc->u.fxiocb.req_dma_handle, 208U, (struct dma_attrs *)0); if ((unsigned long )fdisc->u.fxiocb.req_addr == (unsigned long )((void *)0)) { goto done_free_sp; } else { } if ((unsigned int )fx_type == 153U) { preg_hsi = (struct register_host_info *)fdisc->u.fxiocb.req_addr; phost_info = & preg_hsi->hsi; memset((void *)preg_hsi, 0, 1036UL); phost_info->os_type = 2U; strncpy((char *)(& phost_info->sysname), (char const *)(& p_sysid->sysname), 128UL); strncpy((char *)(& phost_info->nodename), (char const *)(& p_sysid->nodename), 64UL); strncpy((char *)(& phost_info->release), (char const *)(& p_sysid->release), 64UL); strncpy((char *)(& phost_info->version), (char const *)(& p_sysid->version), 64UL); strncpy((char *)(& phost_info->machine), (char const *)(& p_sysid->machine), 64UL); strncpy((char *)(& phost_info->domainname), (char const *)(& p_sysid->domainname), 64UL); strncpy((char *)(& phost_info->hostdriver), "8.06.00.08-k", 64UL); do_gettimeofday(& tv); preg_hsi->utc = (unsigned long long )tv.tv_sec; ql_dbg(1073741824U, vha, 329, "ISP%04X: Host registration with firmware\n", (int )(ha->pdev)->device); ql_dbg(1073741824U, vha, 330, "os_type = \'%d\', sysname = \'%s\', nodname = \'%s\'\n", phost_info->os_type, (char *)(& phost_info->sysname), (char *)(& phost_info->nodename)); ql_dbg(1073741824U, vha, 331, "release = \'%s\', version = \'%s\'\n", (char *)(& phost_info->release), (char *)(& phost_info->version)); ql_dbg(1073741824U, vha, 332, "machine = \'%s\' domainname = \'%s\', hostdriver = \'%s\'\n", (char *)(& phost_info->machine), (char *)(& phost_info->domainname), (char *)(& phost_info->hostdriver)); ql_dump_buffer(1342177280U, vha, 333, (uint8_t *)phost_info, 772U); } else { } } else { } if (((int )fdisc->u.fxiocb.flags & 2) != 0) { fdisc->u.fxiocb.rsp_addr = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )fdisc->u.fxiocb.rsp_len, & fdisc->u.fxiocb.rsp_dma_handle, 208U, (struct dma_attrs *)0); if ((unsigned long )fdisc->u.fxiocb.rsp_addr == (unsigned long )((void *)0)) { goto done_unmap_req; } else { } } else { } sp->type = 10U; sp->name = (char *)"fxdisc"; qla2x00_init_timer(sp, 20UL); fdisc->timeout = & qla2x00_fxdisc_iocb_timeout; fdisc->u.fxiocb.req_func_type = fx_type; sp->done = & qla2x00_fxdisc_sp_done; rval = qla2x00_start_sp(sp); if (rval != 0) { goto done_unmap_dma; } else { } wait_for_completion(& fdisc->u.fxiocb.fxiocb_comp); if ((unsigned int )fx_type == 1U) { pinfo = (struct config_info_data *)fdisc->u.fxiocb.rsp_addr; __len = 256UL; if (__len > 63UL) { __ret = __memcpy((void *)(& (vha->hw)->mr.product_name), (void const *)(& pinfo->product_name), __len); } else { __ret = __builtin_memcpy((void *)(& (vha->hw)->mr.product_name), (void const *)(& pinfo->product_name), __len); } __len___0 = 64UL; if (__len___0 > 63UL) { __ret___0 = __memcpy((void *)(& (vha->hw)->mr.symbolic_name), (void const *)(& pinfo->symbolic_name), __len___0); } else { __ret___0 = __builtin_memcpy((void *)(& (vha->hw)->mr.symbolic_name), (void const *)(& pinfo->symbolic_name), __len___0); } __len___1 = 32UL; if (__len___1 > 63UL) { __ret___1 = __memcpy((void *)(& (vha->hw)->mr.serial_num), (void const *)(& pinfo->serial_num), __len___1); } else { __ret___1 = __builtin_memcpy((void *)(& (vha->hw)->mr.serial_num), (void const *)(& pinfo->serial_num), __len___1); } __len___2 = 16UL; if (__len___2 > 63UL) { __ret___2 = __memcpy((void *)(& (vha->hw)->mr.hw_version), (void const *)(& pinfo->hw_version), __len___2); } else { __ret___2 = __builtin_memcpy((void *)(& (vha->hw)->mr.hw_version), (void const *)(& pinfo->hw_version), __len___2); } __len___3 = 16UL; if (__len___3 > 63UL) { __ret___3 = __memcpy((void *)(& (vha->hw)->mr.fw_version), (void const *)(& pinfo->fw_version), __len___3); } else { __ret___3 = __builtin_memcpy((void *)(& (vha->hw)->mr.fw_version), (void const *)(& pinfo->fw_version), __len___3); } strim((char *)(& (vha->hw)->mr.fw_version)); __len___4 = 16UL; if (__len___4 > 63UL) { __ret___4 = __memcpy((void *)(& (vha->hw)->mr.uboot_version), (void const *)(& pinfo->uboot_version), __len___4); } else { __ret___4 = __builtin_memcpy((void *)(& (vha->hw)->mr.uboot_version), (void const *)(& pinfo->uboot_version), __len___4); } __len___5 = 32UL; if (__len___5 > 63UL) { __ret___5 = __memcpy((void *)(& (vha->hw)->mr.fru_serial_num), (void const *)(& pinfo->fru_serial_num), __len___5); } else { __ret___5 = __builtin_memcpy((void *)(& (vha->hw)->mr.fru_serial_num), (void const *)(& pinfo->fru_serial_num), __len___5); } (vha->hw)->mr.critical_temperature = pinfo->nominal_temp_value != 0U ? pinfo->nominal_temp_value : 80U; ha->mr.extended_io_enabled = (pinfo->enabled_capabilities & 32U) != 0U; } else if ((unsigned int )fx_type == 2U) { pinfo___0 = (struct port_info_data *)fdisc->u.fxiocb.rsp_addr; __len___6 = 8UL; if (__len___6 > 63UL) { __ret___6 = __memcpy((void *)(& vha->node_name), (void const *)(& pinfo___0->node_name), __len___6); } else { __ret___6 = __builtin_memcpy((void *)(& vha->node_name), (void const *)(& pinfo___0->node_name), __len___6); } __len___7 = 8UL; if (__len___7 > 63UL) { __ret___7 = __memcpy((void *)(& vha->port_name), (void const *)(& pinfo___0->port_name), __len___7); } else { __ret___7 = __builtin_memcpy((void *)(& vha->port_name), (void const *)(& pinfo___0->port_name), __len___7); } vha->d_id.b.domain = pinfo___0->port_id[0]; vha->d_id.b.area = pinfo___0->port_id[1]; vha->d_id.b.al_pa = pinfo___0->port_id[2]; qlafx00_update_host_attr(vha, pinfo___0); ql_dump_buffer(1073872896U, vha, 321, (uint8_t *)pinfo___0, 16U); } else if ((unsigned int )fx_type == 128U) { pinfo___1 = (struct qlafx00_tgt_node_info *)fdisc->u.fxiocb.rsp_addr; __len___8 = 8UL; if (__len___8 > 63UL) { __ret___8 = __memcpy((void *)(& fcport->node_name), (void const *)(& pinfo___1->tgt_node_wwnn), __len___8); } else { __ret___8 = __builtin_memcpy((void *)(& fcport->node_name), (void const *)(& pinfo___1->tgt_node_wwnn), __len___8); } __len___9 = 8UL; if (__len___9 > 63UL) { __ret___9 = __memcpy((void *)(& fcport->port_name), (void const *)(& pinfo___1->tgt_node_wwpn), __len___9); } else { __ret___9 = __builtin_memcpy((void *)(& fcport->port_name), (void const *)(& pinfo___1->tgt_node_wwpn), __len___9); } fcport->port_type = 5; ql_dump_buffer(1073872896U, vha, 324, (uint8_t *)pinfo___1, 16U); } else if ((unsigned int )fx_type == 129U) { pinfo___2 = (struct qlafx00_tgt_node_info *)fdisc->u.fxiocb.rsp_addr; ql_dump_buffer(1073872896U, vha, 326, (uint8_t *)pinfo___2, 16U); __len___10 = 128UL; if (__len___10 > 63UL) { __ret___10 = __memcpy((void *)(vha->hw)->gid_list, (void const *)pinfo___2, __len___10); } else { __ret___10 = __builtin_memcpy((void *)(vha->hw)->gid_list, (void const *)pinfo___2, __len___10); } } else { } rval = (int )fdisc->u.fxiocb.result; done_unmap_dma: ; if ((unsigned long )fdisc->u.fxiocb.rsp_addr != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )fdisc->u.fxiocb.rsp_len, fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle, (struct dma_attrs *)0); } else { } done_unmap_req: ; if ((unsigned long )fdisc->u.fxiocb.req_addr != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )fdisc->u.fxiocb.req_len, fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle, (struct dma_attrs *)0); } else { } done_free_sp: (*(sp->free))((void *)vha, (void *)sp); done: ; return (rval); } } static void qlafx00_abort_iocb_timeout(void *data ) { srb_t *sp ; struct srb_iocb *abt ; { sp = (srb_t *)data; abt = & sp->u.iocb_cmd; abt->u.abt.comp_status = 6U; complete(& abt->u.abt.comp); return; } } static void qlafx00_abort_sp_done(void *data , void *ptr , int res ) { srb_t *sp ; struct srb_iocb *abt ; { sp = (srb_t *)ptr; abt = & sp->u.iocb_cmd; complete(& abt->u.abt.comp); return; } } static int qlafx00_async_abt_cmd(srb_t *cmd_sp ) { scsi_qla_host_t *vha ; fc_port_t *fcport ; struct srb_iocb *abt_iocb ; srb_t *sp ; int rval ; { vha = (cmd_sp->fcport)->vha; fcport = cmd_sp->fcport; rval = 258; sp = qla2x00_get_sp(vha, fcport, 208U); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { goto done; } else { } abt_iocb = & sp->u.iocb_cmd; sp->type = 12U; sp->name = (char *)"abort"; qla2x00_init_timer(sp, 20UL); abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; sp->done = & qlafx00_abort_sp_done; abt_iocb->timeout = & qlafx00_abort_iocb_timeout; init_completion(& abt_iocb->u.abt.comp); rval = qla2x00_start_sp(sp); if (rval != 0) { goto done_free_sp; } else { } ql_dbg(33554432U, vha, 20604, "Abort command issued - hdl=%x, target_id=%x\n", cmd_sp->handle, (int )fcport->tgt_id); wait_for_completion(& abt_iocb->u.abt.comp); rval = (unsigned int )abt_iocb->u.abt.comp_status == 0U ? 0 : 258; done_free_sp: (*(sp->free))((void *)vha, (void *)sp); done: ; return (rval); } } int qlafx00_abort_command(srb_t *sp ) { unsigned long flags ; uint32_t handle ; fc_port_t *fcport ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct req_que *req ; raw_spinlock_t *tmp ; int tmp___0 ; { flags = 0UL; fcport = sp->fcport; vha = fcport->vha; ha = vha->hw; req = vha->req; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); handle = 1U; goto ldv_44064; ldv_44063: ; if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )sp) { goto ldv_44062; } else { } handle = handle + 1U; ldv_44064: ; if (handle <= 1023U) { goto ldv_44063; } else { } ldv_44062: spin_unlock_irqrestore(& ha->hardware_lock, flags); if (handle == 1024U) { return (258); } else { } tmp___0 = qlafx00_async_abt_cmd(sp); return (tmp___0); } } int qlafx00_initialize_adapter(struct scsi_qla_host *vha ) { int rval ; struct qla_hw_data *ha ; uint32_t tempc ; unsigned int tmp ; { ha = vha->hw; vha->flags.online = 0U; ha->flags.chip_reset_done = 0U; vha->flags.reset_active = 0U; ha->flags.pci_channel_io_perm_failure = 0U; ha->flags.eeh_busy = 0U; atomic_set(& vha->loop_down_timer, 255); atomic_set(& vha->loop_state, 2); vha->device_flags = 2U; vha->dpc_flags = 0UL; vha->flags.management_server_logged_in = 0U; vha->marker_needed = 0U; ha->isp_abort_cnt = 0U; ha->beacon_blink_led = 0U; set_bit(0L, (unsigned long volatile *)(& ha->req_qid_map)); set_bit(0L, (unsigned long volatile *)(& ha->rsp_qid_map)); ql_dbg(1073741824U, vha, 327, "Configuring PCI space...\n"); rval = (*((ha->isp_ops)->pci_config))(vha); if (rval != 0) { ql_log(1U, vha, 328, "Unable to configure PCI space.\n"); return (rval); } else { } rval = qlafx00_init_fw_ready(vha); if (rval != 0) { return (rval); } else { } qlafx00_save_queue_ptrs(vha); rval = qlafx00_config_queues(vha); if (rval != 0) { return (rval); } else { } rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); if (rval != 0) { return (rval); } else { } rval = qla2x00_init_rings(vha); ha->flags.chip_reset_done = 1U; tmp = readl((void const volatile *)ha->cregbase + 99524U); tempc = (3153000U - ((tmp & 1022U) >> 1) * 10000U) / 13825U; ql_dbg(1073741824U, vha, 338, "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n", "qlafx00_initialize_adapter", tempc); return (rval); } } uint32_t qlafx00_fw_state_show(struct device *dev , struct device_attribute *attr , char *buf ) { scsi_qla_host_t *vha ; struct device const *__mptr ; void *tmp ; int rval ; uint32_t state[1U] ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = shost_priv((struct Scsi_Host *)__mptr + 0xfffffffffffff7d8UL); vha = (scsi_qla_host_t *)tmp; rval = 258; tmp___0 = qla2x00_reset_active(vha); if (tmp___0 != 0) { ql_log(1U, vha, 28878, "ISP reset active.\n"); } else if (*((unsigned long *)vha->hw + 2UL) == 0UL) { rval = qlafx00_get_firmware_state(vha, (uint32_t *)(& state)); } else { } if (rval != 0) { memset((void *)(& state), -1, 4UL); } else { } return (state[0]); } } void qlafx00_get_host_speed(struct Scsi_Host *shost ) { struct qla_hw_data *ha ; void *tmp ; u32 speed ; { tmp = shost_priv(shost); ha = ((struct scsi_qla_host *)tmp)->hw; speed = 0U; switch ((int )ha->link_data_rate) { case 2: speed = 2U; goto ldv_44088; case 4: speed = 8U; goto ldv_44088; case 8: speed = 16U; goto ldv_44088; case 10: speed = 4U; goto ldv_44088; } ldv_44088: ((struct fc_host_attrs *)shost->shost_data)->speed = speed; return; } } __inline static void qlafx00_handle_sense(srb_t *sp , uint8_t *sense_data , uint32_t par_sense_len , uint32_t sense_len , struct rsp_que *rsp , int res ) { struct scsi_qla_host *vha ; struct scsi_cmnd *cp ; uint32_t track_sense_len ; size_t __len ; void *__ret ; { vha = (sp->fcport)->vha; cp = sp->u.scmd.cmd; sp->u.scmd.fw_sense_length = sense_len; if (sense_len > 95U) { sense_len = 96U; } else { } sp->u.scmd.request_sense_length = sense_len; sp->u.scmd.request_sense_ptr = cp->sense_buffer; track_sense_len = sense_len; if (sense_len > par_sense_len) { sense_len = par_sense_len; } else { } __len = (size_t )sense_len; __ret = __builtin_memcpy((void *)cp->sense_buffer, (void const *)sense_data, __len); sp->u.scmd.fw_sense_length = sp->u.scmd.fw_sense_length - sense_len; sp->u.scmd.request_sense_ptr = cp->sense_buffer + (unsigned long )sense_len; track_sense_len = track_sense_len - sense_len; sp->u.scmd.request_sense_length = track_sense_len; ql_dbg(134217728U, vha, 12365, "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n", sense_len, par_sense_len, track_sense_len); if (sp->u.scmd.fw_sense_length != 0U) { rsp->status_srb = sp; cp->result = res; } else { } if (sense_len != 0U) { ql_dbg(134348800U, vha, 12345, "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", ((sp->fcport)->vha)->host_no, (cp->device)->id, (cp->device)->lun, cp); ql_dump_buffer(134348800U, vha, 12361, cp->sense_buffer, sense_len); } else { } return; } } static void qlafx00_tm_iocb_entry(scsi_qla_host_t *vha , struct req_que *req , struct tsk_mgmt_entry_fx00 *pkt , srb_t *sp , __le16 sstatus , __le16 cpstatus ) { struct srb_iocb *tmf ; { tmf = & sp->u.iocb_cmd; if ((unsigned int )cpstatus != 0U || ((int )sstatus & 256) != 0) { cpstatus = 1U; } else { } tmf->u.tmf.comp_status = cpstatus; (*(sp->done))((void *)vha, (void *)sp, 0); return; } } static void qlafx00_abort_iocb_entry(scsi_qla_host_t *vha , struct req_que *req , struct abort_iocb_entry_fx00 *pkt ) { char func[9U] ; srb_t *sp ; struct srb_iocb *abt ; { func[0] = 'A'; func[1] = 'B'; func[2] = 'T'; func[3] = '_'; func[4] = 'I'; func[5] = 'O'; func[6] = 'C'; func[7] = 'B'; func[8] = '\000'; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)pkt); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } abt = & sp->u.iocb_cmd; abt->u.abt.comp_status = pkt->tgt_id_sts; (*(sp->done))((void *)vha, (void *)sp, 0); return; } } static void qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha , struct req_que *req , struct ioctl_iocb_entry_fx00 *pkt ) { char func[10U] ; srb_t *sp ; struct fc_bsg_job *bsg_job ; struct srb_iocb *iocb_job ; int res ; struct qla_mt_iocb_rsp_fx00 fstatus ; uint8_t *fw_sts_ptr ; size_t __len ; void *__ret ; size_t __len___0 ; void *__ret___0 ; { func[0] = 'I'; func[1] = 'O'; func[2] = 'S'; func[3] = 'B'; func[4] = '_'; func[5] = 'I'; func[6] = 'O'; func[7] = 'C'; func[8] = 'B'; func[9] = '\000'; sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)pkt); if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { return; } else { } if ((unsigned int )sp->type == 10U) { iocb_job = & sp->u.iocb_cmd; iocb_job->u.fxiocb.seq_number = pkt->seq_no; iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags; iocb_job->u.fxiocb.result = pkt->status; if (((int )iocb_job->u.fxiocb.flags & 8) != 0) { iocb_job->u.fxiocb.req_data = pkt->dataword_r; } else { } } else { bsg_job = sp->u.bsg_job; memset((void *)(& fstatus), 0, 56UL); fstatus.reserved_1 = pkt->reserved_0; fstatus.func_type = pkt->comp_func_num; fstatus.ioctl_flags = pkt->fw_iotcl_flags; fstatus.ioctl_data = pkt->dataword_r; fstatus.adapid = pkt->adapid; fstatus.adapid_hi = pkt->adapid_hi; fstatus.reserved_2 = pkt->reserved_1; fstatus.res_count = (int32_t )pkt->residuallen; fstatus.status = pkt->status; fstatus.seq_number = pkt->seq_no; __len = 20UL; if (__len > 63UL) { __ret = __memcpy((void *)(& fstatus.reserved_3), (void const *)(& pkt->reserved_2), __len); } else { __ret = __builtin_memcpy((void *)(& fstatus.reserved_3), (void const *)(& pkt->reserved_2), __len); } fw_sts_ptr = (uint8_t *)(bsg_job->req)->sense + 16UL; __len___0 = 56UL; if (__len___0 > 63UL) { __ret___0 = __memcpy((void *)fw_sts_ptr, (void const *)(& fstatus), __len___0); } else { __ret___0 = __builtin_memcpy((void *)fw_sts_ptr, (void const *)(& fstatus), __len___0); } bsg_job->reply_len = 73U; ql_dump_buffer(8421376U, (sp->fcport)->vha, 20608, (uint8_t *)pkt, 64U); ql_dump_buffer(8421376U, (sp->fcport)->vha, 20596, fw_sts_ptr, 56U); (bsg_job->reply)->result = 0U; res = 0; (bsg_job->reply)->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; } (*(sp->done))((void *)vha, (void *)sp, res); return; } } static void qlafx00_status_entry(scsi_qla_host_t *vha , struct rsp_que *rsp , void *pkt ) { srb_t *sp ; fc_port_t *fcport ; struct scsi_cmnd *cp ; struct sts_entry_fx00 *sts ; __le16 comp_status ; __le16 scsi_status ; uint16_t ox_id ; __le16 lscsi_status ; int32_t resid ; uint32_t sense_len ; uint32_t par_sense_len ; uint32_t rsp_info_len ; uint32_t resid_len ; uint32_t fw_resid_len ; uint8_t *rsp_info ; uint8_t *sense_data ; struct qla_hw_data *ha ; uint32_t hindex ; uint32_t handle ; uint16_t que ; struct req_que *req ; int logit ; int res ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; unsigned int tmp___2 ; unsigned int tmp___3 ; unsigned int tmp___4 ; int tmp___5 ; int tmp___6 ; unsigned int tmp___7 ; { rsp_info = (uint8_t *)0U; sense_data = (uint8_t *)0U; ha = vha->hw; logit = 1; res = 0; sts = (struct sts_entry_fx00 *)pkt; comp_status = sts->comp_status; scsi_status = (unsigned int )sts->scsi_status & 4095U; hindex = sts->handle; handle = (uint32_t )((unsigned short )hindex); que = (unsigned short )(hindex >> 16); req = *(ha->req_q_map + (unsigned long )que); if ((uint32_t )req->num_outstanding_cmds > handle) { sp = *(req->outstanding_cmds + (unsigned long )handle); } else { sp = (srb_t *)0; } if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_dbg(134217728U, vha, 12340, "Invalid status handle (0x%x).\n", handle); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); return; } else { } if ((unsigned int )sp->type == 7U) { *(req->outstanding_cmds + (unsigned long )handle) = (srb_t *)0; qlafx00_tm_iocb_entry(vha, req, (struct tsk_mgmt_entry_fx00 *)pkt, sp, (int )scsi_status, (int )comp_status); return; } else { } if ((unsigned int )comp_status == 0U && (unsigned int )scsi_status == 0U) { qla2x00_do_host_ramp_up(vha); qla2x00_process_completed_request(vha, req, handle); return; } else { } *(req->outstanding_cmds + (unsigned long )handle) = (srb_t *)0; cp = sp->u.scmd.cmd; if ((unsigned long )cp == (unsigned long )((struct scsi_cmnd *)0)) { ql_dbg(134217728U, vha, 12360, "Command already returned (0x%x/%p).\n", handle, sp); return; } else { } lscsi_status = (unsigned int )scsi_status & 254U; fcport = sp->fcport; ox_id = 0U; fw_resid_len = 0U; resid_len = fw_resid_len; rsp_info_len = resid_len; par_sense_len = rsp_info_len; sense_len = par_sense_len; if (((int )scsi_status & 512) != 0) { sense_len = sts->sense_len; } else { } if (((int )scsi_status & 3072) != 0) { resid_len = sts->residual_len; } else { } if ((unsigned int )comp_status == 21U) { fw_resid_len = sts->residual_len; } else { } sense_data = (uint8_t *)(& sts->data); rsp_info = sense_data; par_sense_len = 32U; if ((unsigned int )comp_status == 0U && ((int )scsi_status & 1024) != 0) { comp_status = 7U; } else { } switch ((int )comp_status) { case 0: ; case 28: ; if ((unsigned int )scsi_status == 0U) { res = 0; goto ldv_44171; } else { } if (((int )scsi_status & 3072) != 0) { resid = (int32_t )resid_len; scsi_set_resid(cp, resid); if ((unsigned int )lscsi_status == 0U) { tmp___0 = scsi_bufflen(cp); if (tmp___0 - (unsigned int )resid < cp->underflow) { tmp = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12368, "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", resid, tmp); res = 458752; goto ldv_44171; } else { } } else { } } else { } res = (int )lscsi_status; if ((unsigned int )lscsi_status == 40U) { ql_dbg(134217728U, fcport->vha, 12369, "QUEUE FULL detected.\n"); goto ldv_44171; } else { } logit = 0; if ((unsigned int )lscsi_status != 2U) { goto ldv_44171; } else { } memset((void *)cp->sense_buffer, 0, 96UL); if (((int )scsi_status & 512) == 0) { goto ldv_44171; } else { } qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); goto ldv_44171; case 21: ; if ((ha->device_type & 134217728U) != 0U || (ha->device_type & 131072U) != 0U) { resid = (int32_t )fw_resid_len; } else { resid = (int32_t )resid_len; } scsi_set_resid(cp, resid); if (((int )scsi_status & 2048) != 0) { if (((ha->device_type & 134217728U) != 0U || (ha->device_type & 131072U) != 0U) && fw_resid_len != resid_len) { tmp___1 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12370, "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, tmp___1); res = (int )lscsi_status | 458752; goto check_scsi_status; } else { } if ((unsigned int )lscsi_status == 0U) { tmp___3 = scsi_bufflen(cp); if (tmp___3 - (unsigned int )resid < cp->underflow) { tmp___2 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12371, "Mid-layer underflow detected (0x%x of 0x%x bytes, cp->underflow: 0x%x).\n", resid, tmp___2, cp->underflow); res = 458752; goto ldv_44171; } else { } } else { } } else if ((unsigned int )lscsi_status != 40U && (unsigned int )lscsi_status != 8U) { tmp___4 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12372, "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, tmp___4); res = (int )lscsi_status | 458752; goto check_scsi_status; } else { ql_dbg(134217728U, fcport->vha, 12373, "scsi_status: 0x%x, lscsi_status: 0x%x\n", (int )scsi_status, (int )lscsi_status); } res = (int )lscsi_status; logit = 0; check_scsi_status: ; if ((unsigned int )lscsi_status != 0U) { if ((unsigned int )lscsi_status == 40U) { ql_dbg(134217728U, fcport->vha, 12374, "QUEUE FULL detected.\n"); logit = 1; goto ldv_44171; } else { } if ((unsigned int )lscsi_status != 2U) { goto ldv_44171; } else { } memset((void *)cp->sense_buffer, 0, 96UL); if (((int )scsi_status & 512) == 0) { goto ldv_44171; } else { } qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); } else { } goto ldv_44171; case 41: ; case 42: ; case 43: ; case 1: ; case 40: ; case 6: ; case 4: res = 917504; tmp___5 = atomic_read((atomic_t const *)(& fcport->state)); ql_dbg(134217728U, fcport->vha, 12375, "Port down status: port-state=0x%x.\n", tmp___5); tmp___6 = atomic_read((atomic_t const *)(& fcport->state)); if (tmp___6 == 4) { qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); } else { } goto ldv_44171; case 5: res = 524288; goto ldv_44171; default: res = 458752; goto ldv_44171; } ldv_44171: ; if (logit != 0) { tmp___7 = scsi_bufflen(cp); ql_dbg(134217728U, fcport->vha, 12376, "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n", (int )comp_status, (int )scsi_status, res, vha->host_no, (cp->device)->id, (cp->device)->lun, (int )fcport->tgt_id, (int )lscsi_status, cp->cmnd, tmp___7, rsp_info_len, resid_len, fw_resid_len, sense_len, par_sense_len, rsp_info_len); } else { } if (res == 0) { qla2x00_do_host_ramp_up(vha); } else { } if ((unsigned long )rsp->status_srb == (unsigned long )((srb_t *)0)) { (*(sp->done))((void *)ha, (void *)sp, res); } else { } return; } } static void qlafx00_status_cont_entry(struct rsp_que *rsp , sts_cont_entry_t *pkt ) { uint8_t sense_sz ; struct qla_hw_data *ha ; struct scsi_qla_host *vha ; void *tmp ; srb_t *sp ; struct scsi_cmnd *cp ; uint32_t sense_len ; uint8_t *sense_ptr ; size_t __len ; void *__ret ; { sense_sz = 0U; ha = rsp->hw; tmp = pci_get_drvdata(ha->pdev); vha = (struct scsi_qla_host *)tmp; sp = rsp->status_srb; if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_dbg(134217728U, vha, 12343, "no SP, sp = %p\n", sp); return; } else { } if (sp->u.scmd.fw_sense_length == 0U) { ql_dbg(134217728U, vha, 12363, "no fw sense data, sp = %p\n", sp); return; } else { } cp = sp->u.scmd.cmd; if ((unsigned long )cp == (unsigned long )((struct scsi_cmnd *)0)) { ql_log(1U, vha, 12347, "cmd is NULL: already returned to OS (sp=%p).\n", sp); rsp->status_srb = (srb_t *)0; return; } else { } if (sp->u.scmd.request_sense_length == 0U) { ql_dbg(134217728U, vha, 12364, "no sense data, sp = %p\n", sp); } else { sense_len = sp->u.scmd.request_sense_length; sense_ptr = sp->u.scmd.request_sense_ptr; ql_dbg(134217728U, vha, 12367, "sp=%p sense_len=0x%x sense_ptr=%p.\n", sp, sense_len, sense_ptr); if (sense_len > 60U) { sense_sz = 60U; } else { sense_sz = (uint8_t )sense_len; } ql_dump_buffer(134348800U, vha, 12366, (uint8_t *)pkt, 64U); __len = (size_t )sense_sz; __ret = __builtin_memcpy((void *)sense_ptr, (void const *)(& pkt->data), __len); ql_dump_buffer(134348800U, vha, 12362, sense_ptr, (uint32_t )sense_sz); sense_len = sense_len - (uint32_t )sense_sz; sense_ptr = sense_ptr + (unsigned long )sense_sz; sp->u.scmd.request_sense_ptr = sense_ptr; sp->u.scmd.request_sense_length = sense_len; } sense_len = sp->u.scmd.fw_sense_length; sense_len = sense_len > 60U ? sense_len - 60U : 0U; sp->u.scmd.fw_sense_length = sense_len; if (sense_len == 0U) { rsp->status_srb = (srb_t *)0; (*(sp->done))((void *)ha, (void *)sp, cp->result); } else { } return; } } static void qlafx00_multistatus_entry(struct scsi_qla_host *vha , struct rsp_que *rsp , void *pkt ) { srb_t *sp ; struct multi_sts_entry_fx00 *stsmfx ; struct qla_hw_data *ha ; uint32_t handle ; uint32_t hindex ; uint32_t handle_count ; uint32_t i ; uint16_t que ; struct req_que *req ; __le32 *handle_ptr ; { ha = vha->hw; stsmfx = (struct multi_sts_entry_fx00 *)pkt; handle_count = (uint32_t )stsmfx->handle_count; if (handle_count > 15U) { ql_dbg(134217728U, vha, 12341, "Invalid handle count (0x%x).\n", handle_count); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); return; } else { } handle_ptr = (__le32 *)(& stsmfx->handles); i = 0U; goto ldv_44213; ldv_44212: hindex = *handle_ptr; handle = (uint32_t )((unsigned short )hindex); que = (unsigned short )(hindex >> 16); req = *(ha->req_q_map + (unsigned long )que); if ((uint32_t )req->num_outstanding_cmds > handle) { sp = *(req->outstanding_cmds + (unsigned long )handle); } else { sp = (srb_t *)0; } if ((unsigned long )sp == (unsigned long )((srb_t *)0)) { ql_dbg(134217728U, vha, 12356, "Invalid status handle (0x%x).\n", handle); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); return; } else { } qla2x00_process_completed_request(vha, req, handle); handle_ptr = handle_ptr + 1; i = i + 1U; ldv_44213: ; if (i < handle_count) { goto ldv_44212; } else { } return; } } static void qlafx00_error_entry(scsi_qla_host_t *vha , struct rsp_que *rsp , struct sts_entry_fx00 *pkt , uint8_t estatus , uint8_t etype ) { srb_t *sp ; struct qla_hw_data *ha ; char func[11U] ; uint16_t que ; struct req_que *req ; int res ; { ha = vha->hw; func[0] = 'E'; func[1] = 'R'; func[2] = 'R'; func[3] = 'O'; func[4] = 'R'; func[5] = '-'; func[6] = 'I'; func[7] = 'O'; func[8] = 'C'; func[9] = 'B'; func[10] = '\000'; que = (unsigned short )(pkt->handle >> 16); req = (struct req_que *)0; res = 458752; ql_dbg(33554432U, vha, 20607, "type of error status in response: 0x%x\n", (int )estatus); req = *(ha->req_q_map + (unsigned long )que); sp = qla2x00_get_sp_from_handle(vha, (char const *)(& func), req, (void *)pkt); if ((unsigned long )sp != (unsigned long )((srb_t *)0)) { (*(sp->done))((void *)ha, (void *)sp, res); return; } else { } set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); return; } } static void qlafx00_process_response_queue(struct scsi_qla_host *vha , struct rsp_que *rsp ) { struct sts_entry_fx00 *pkt ; response_t *lptr ; unsigned int tmp ; { goto ldv_44243; ldv_44242: lptr = rsp->ring_ptr; memcpy_fromio((void *)(& rsp->rsp_pkt), (void const volatile *)lptr, 64UL); pkt = (struct sts_entry_fx00 *)(& rsp->rsp_pkt); rsp->ring_index = (uint16_t )((int )rsp->ring_index + 1); if ((int )rsp->ring_index == (int )rsp->length) { rsp->ring_index = 0U; rsp->ring_ptr = rsp->ring; } else { rsp->ring_ptr = rsp->ring_ptr + 1; } if ((unsigned int )pkt->entry_status != 0U && (unsigned int )pkt->entry_type != 12U) { qlafx00_error_entry(vha, rsp, pkt, (int )pkt->entry_status, (int )pkt->entry_type); goto next_iter; } else { } switch ((int )pkt->entry_type) { case 1: qlafx00_status_entry(vha, rsp, (void *)pkt); goto ldv_44236; case 4: qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); goto ldv_44236; case 13: qlafx00_multistatus_entry(vha, rsp, (void *)pkt); goto ldv_44236; case 8: qlafx00_abort_iocb_entry(vha, rsp->req, (struct abort_iocb_entry_fx00 *)pkt); goto ldv_44236; case 12: qlafx00_ioctl_iosb_entry(vha, rsp->req, (struct ioctl_iocb_entry_fx00 *)pkt); goto ldv_44236; default: ql_dbg(33554432U, vha, 20609, "Received unknown response pkt type %x entry status=%x.\n", (int )pkt->entry_type, (int )pkt->entry_status); goto ldv_44236; } ldv_44236: ; next_iter: writel(3735936685U, (void volatile *)(& lptr->signature)); __asm__ volatile ("sfence": : : "memory"); ldv_44243: tmp = readl((void const volatile *)(& (rsp->ring_ptr)->signature)); if (tmp != 3735936685U) { goto ldv_44242; } else { } writel((unsigned int )rsp->ring_index, (void volatile *)rsp->rsp_q_out); return; } } static void qlafx00_async_event(scsi_qla_host_t *vha ) { struct qla_hw_data *ha ; struct device_reg_fx00 *reg ; int data_size ; unsigned short tmp ; unsigned short tmp___0 ; unsigned short tmp___1 ; unsigned short tmp___2 ; unsigned short tmp___3 ; unsigned short tmp___4 ; unsigned short tmp___5 ; unsigned short tmp___6 ; unsigned short tmp___7 ; unsigned short tmp___8 ; { ha = vha->hw; data_size = 1; reg = & (ha->iobase)->ispfx00; switch (ha->aenmb[0]) { case 32770U: ql_log(1U, vha, 20601, "ISP System Error - mbx1=%x\n", ha->aenmb[0]); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_44252; case 32866U: ql_dbg(33554432U, vha, 20598, "Asynchronous FW shutdown requested.\n"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); goto ldv_44252; case 32788U: tmp = readw((void const volatile *)(& reg->aenmailbox1)); ha->aenmb[1] = (uint32_t )tmp; tmp___0 = readw((void const volatile *)(& reg->aenmailbox2)); ha->aenmb[2] = (uint32_t )tmp___0; tmp___1 = readw((void const volatile *)(& reg->aenmailbox3)); ha->aenmb[3] = (uint32_t )tmp___1; ql_dbg(33554432U, vha, 20599, "Asynchronous port Update received aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]); data_size = 4; goto ldv_44252; case 32773U: ql_log(2U, vha, 20613, "Asynchronous over temperature event received aenmb[0]: %x\n", ha->aenmb[0]); goto ldv_44252; case 32774U: ql_log(2U, vha, 20614, "Asynchronous normal temperature event received aenmb[0]: %x\n", ha->aenmb[0]); goto ldv_44252; case 32775U: ql_log(2U, vha, 20611, "Asynchronous critical temperature event received aenmb[0]: %x\n", ha->aenmb[0]); goto ldv_44252; default: tmp___2 = readw((void const volatile *)(& reg->aenmailbox1)); ha->aenmb[1] = (uint32_t )tmp___2; tmp___3 = readw((void const volatile *)(& reg->aenmailbox2)); ha->aenmb[2] = (uint32_t )tmp___3; tmp___4 = readw((void const volatile *)(& reg->aenmailbox3)); ha->aenmb[3] = (uint32_t )tmp___4; tmp___5 = readw((void const volatile *)(& reg->aenmailbox4)); ha->aenmb[4] = (uint32_t )tmp___5; tmp___6 = readw((void const volatile *)(& reg->aenmailbox5)); ha->aenmb[5] = (uint32_t )tmp___6; tmp___7 = readw((void const volatile *)(& reg->aenmailbox6)); ha->aenmb[6] = (uint32_t )tmp___7; tmp___8 = readw((void const volatile *)(& reg->aenmailbox7)); ha->aenmb[7] = (uint32_t )tmp___8; ql_dbg(33554432U, vha, 20600, "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n", ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]); goto ldv_44252; } ldv_44252: qlafx00_post_aenfx_work(vha, ha->aenmb[0], (uint32_t *)(& ha->aenmb), data_size); return; } } static void qlafx00_mbx_completion(scsi_qla_host_t *vha , uint32_t mb0 ) { uint16_t cnt ; uint16_t *wptr ; struct qla_hw_data *ha ; struct device_reg_fx00 *reg ; unsigned short tmp ; { ha = vha->hw; reg = & (ha->iobase)->ispfx00; if ((unsigned long )ha->mcp32 == (unsigned long )((struct mbx_cmd_32 *)0)) { ql_dbg(33554432U, vha, 20606, "MBX pointer OLD_ERROR.\n"); } else { } ha->flags.mbox_int = 1U; ha->mailbox_out32[0] = mb0; wptr = (uint16_t *)(& reg->mailbox17); cnt = 1U; goto ldv_44268; ldv_44267: tmp = readw((void const volatile *)wptr); ha->mailbox_out32[(int )cnt] = (uint32_t )tmp; wptr = wptr + 1; cnt = (uint16_t )((int )cnt + 1); ldv_44268: ; if ((int )((unsigned short )ha->mbx_count) > (int )cnt) { goto ldv_44267; } else { } return; } } irqreturn_t qlafx00_intr_handler(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct device_reg_fx00 *reg ; int status ; unsigned long iter ; uint32_t stat ; uint32_t mb[8U] ; struct rsp_que *rsp ; unsigned long flags ; uint32_t clr_intr ; int tmp ; long tmp___0 ; raw_spinlock_t *tmp___1 ; void *tmp___2 ; unsigned short tmp___3 ; unsigned short tmp___4 ; unsigned long tmp___5 ; { clr_intr = 0U; rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 20605, "%s: NULL response queue pointer.\n", "qlafx00_intr_handler"); return (0); } else { } ha = rsp->hw; reg = & (ha->iobase)->ispfx00; status = 0; tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (1); } else { } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); tmp___2 = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp___2; iter = 50UL; goto ldv_44299; ldv_44298: stat = readl((void const volatile *)ha->cregbase + 138096U); if ((stat & 7U) == 0U) { goto ldv_44288; } else { } switch (stat & 7U) { case 1U: ; case 3U: ; case 5U: ; case 7U: tmp___3 = readw((void const volatile *)(& reg->mailbox16)); mb[0] = (uint32_t )tmp___3; qlafx00_mbx_completion(vha, mb[0]); status = status | 1; clr_intr = clr_intr | 1U; goto ldv_44293; case 4U: ; case 6U: tmp___4 = readw((void const volatile *)(& reg->aenmailbox0)); ha->aenmb[0] = (uint32_t )tmp___4; qlafx00_async_event(vha); clr_intr = clr_intr | 4U; goto ldv_44293; case 2U: qlafx00_process_response_queue(vha, rsp); clr_intr = clr_intr | 2U; goto ldv_44293; default: ql_dbg(33554432U, vha, 20602, "Unrecognized interrupt type (%d).\n", stat); goto ldv_44293; } ldv_44293: writel(~ clr_intr, (void volatile *)ha->cregbase + 138096U); readl((void const volatile *)ha->cregbase + 138096U); clr_intr = 0U; ldv_44299: tmp___5 = iter; iter = iter - 1UL; if (tmp___5 != 0UL) { goto ldv_44298; } else { } ldv_44288: qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } __inline static cont_a64_entry_t *qlafx00_prep_cont_type1_iocb(struct req_que *req , cont_a64_entry_t *lcont_pkt ) { cont_a64_entry_t *cont_pkt ; { req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } cont_pkt = (cont_a64_entry_t *)req->ring_ptr; lcont_pkt->entry_type = 3U; return (cont_pkt); } } __inline static void qlafx00_build_scsi_iocbs(srb_t *sp , struct cmd_type_7_fx00 *cmd_pkt , uint16_t tot_dsds , struct cmd_type_7_fx00 *lcmd_pkt ) { uint16_t avail_dsds ; __le32 *cur_dsd ; scsi_qla_host_t *vha ; struct scsi_cmnd *cmd ; struct scatterlist *sg ; int i ; int cont ; struct req_que *req ; cont_a64_entry_t lcont_pkt ; cont_a64_entry_t *cont_pkt ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; dma_addr_t sle_dma ; __le32 *tmp___2 ; __le32 *tmp___3 ; __le32 *tmp___4 ; { vha = (sp->fcport)->vha; req = vha->req; cmd = sp->u.scmd.cmd; cont = 0; cont_pkt = (cont_a64_entry_t *)0; lcmd_pkt->entry_type = 7U; tmp = scsi_bufflen(cmd); if (tmp == 0U || (unsigned int )cmd->sc_data_direction == 3U) { lcmd_pkt->byte_count = 0U; return; } else { } if ((unsigned int )cmd->sc_data_direction == 1U) { lcmd_pkt->cntrl_flags = 1U; tmp___0 = scsi_bufflen(cmd); vha->qla_stats.output_bytes = vha->qla_stats.output_bytes + (uint64_t )tmp___0; } else if ((unsigned int )cmd->sc_data_direction == 2U) { lcmd_pkt->cntrl_flags = 2U; tmp___1 = scsi_bufflen(cmd); vha->qla_stats.input_bytes = vha->qla_stats.input_bytes + (uint64_t )tmp___1; } else { } avail_dsds = 1U; cur_dsd = (__le32 *)(& lcmd_pkt->dseg_0_address); i = 0; sg = scsi_sglist(cmd); goto ldv_44323; ldv_44322: ; if ((unsigned int )avail_dsds == 0U) { memset((void *)(& lcont_pkt), 0, 64UL); cont_pkt = qlafx00_prep_cont_type1_iocb(req, & lcont_pkt); cur_dsd = (__le32 *)(& lcont_pkt.dseg_0_address); avail_dsds = 5U; cont = 1; } else { } sle_dma = sg->dma_address; tmp___2 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___2 = (unsigned int )sle_dma; tmp___3 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___3 = (unsigned int )(sle_dma >> 32ULL); tmp___4 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___4 = sg->dma_length; avail_dsds = (uint16_t )((int )avail_dsds - 1); if ((unsigned int )avail_dsds == 0U && cont == 1) { cont = 0; memcpy_toio((void volatile *)cont_pkt, (void const *)(& lcont_pkt), 64UL); } else { } i = i + 1; sg = sg_next(sg); ldv_44323: ; if ((int )tot_dsds > i) { goto ldv_44322; } else { } if ((unsigned int )avail_dsds != 0U && cont == 1) { memcpy_toio((void volatile *)cont_pkt, (void const *)(& lcont_pkt), 64UL); } else { } return; } } int qlafx00_start_scsi(srb_t *sp ) { int ret ; int nseg ; unsigned long flags ; uint32_t index ; uint32_t handle ; uint16_t cnt ; uint16_t req_cnt ; uint16_t tot_dsds ; struct req_que *req ; struct rsp_que *rsp ; struct scsi_cmnd *cmd ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct cmd_type_7_fx00 *cmd_pkt ; struct cmd_type_7_fx00 lcmd_pkt ; struct scsi_lun llun ; char tag[2U] ; int tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; struct scatterlist *tmp___2 ; long tmp___3 ; unsigned int tmp___4 ; unsigned int tmp___5 ; int tmp___6 ; { req = (struct req_que *)0; rsp = (struct rsp_que *)0; cmd = sp->u.scmd.cmd; vha = (sp->fcport)->vha; ha = vha->hw; ret = 0; rsp = *(ha->rsp_q_map); req = vha->req; tot_dsds = 0U; vha->marker_needed = 0U; if ((unsigned int )vha->marker_needed != 0U) { tmp = qla2x00_marker(vha, req, rsp, 0, 0, 2); if (tmp != 0) { return (258); } else { } vha->marker_needed = 0U; } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); handle = req->current_outstanding_cmd; index = 1U; goto ldv_44350; ldv_44349: handle = handle + 1U; if ((uint32_t )req->num_outstanding_cmds == handle) { handle = 1U; } else { } if ((unsigned long )*(req->outstanding_cmds + (unsigned long )handle) == (unsigned long )((srb_t *)0)) { goto ldv_44348; } else { } index = index + 1U; ldv_44350: ; if ((uint32_t )req->num_outstanding_cmds > index) { goto ldv_44349; } else { } ldv_44348: ; if ((uint32_t )req->num_outstanding_cmds == index) { goto queuing_error; } else { } tmp___4 = scsi_sg_count(cmd); if (tmp___4 != 0U) { tmp___1 = scsi_sg_count(cmd); tmp___2 = scsi_sglist(cmd); nseg = dma_map_sg_attrs(& (ha->pdev)->dev, tmp___2, (int )tmp___1, cmd->sc_data_direction, (struct dma_attrs *)0); tmp___3 = ldv__builtin_expect(nseg == 0, 0L); if (tmp___3 != 0L) { goto queuing_error; } else { } } else { nseg = 0; } tot_dsds = (uint16_t )nseg; req_cnt = qla24xx_calc_iocbs(vha, (int )tot_dsds); if ((int )req->cnt < (int )req_cnt + 2) { tmp___5 = __readl((void const volatile *)req->req_q_out); cnt = (uint16_t )tmp___5; if ((int )req->ring_index < (int )cnt) { req->cnt = (int )cnt - (int )req->ring_index; } else { req->cnt = (int )req->length + ((int )cnt - (int )req->ring_index); } if ((int )req->cnt < (int )req_cnt + 2) { goto queuing_error; } else { } } else { } req->current_outstanding_cmd = handle; *(req->outstanding_cmds + (unsigned long )handle) = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)((unsigned long )handle); req->cnt = (int )req->cnt - (int )req_cnt; cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr; memset((void *)(& lcmd_pkt), 0, 64UL); lcmd_pkt.handle = ((unsigned int )req->id << 16) | sp->handle; lcmd_pkt.handle_hi = 0U; lcmd_pkt.dseg_count = tot_dsds; lcmd_pkt.tgt_idx = (sp->fcport)->tgt_id; int_to_scsilun((cmd->device)->lun, & llun); host_to_adap((uint8_t *)(& llun), (uint8_t *)(& lcmd_pkt.lun), 8U); tmp___6 = scsi_populate_tag_msg(cmd, (char *)(& tag)); if (tmp___6 != 0) { switch ((int )tag[0]) { case 33: lcmd_pkt.task = 1U; goto ldv_44353; case 34: lcmd_pkt.task = 2U; goto ldv_44353; } ldv_44353: ; } else { } host_to_adap(cmd->cmnd, (uint8_t *)(& lcmd_pkt.fcp_cdb), 16U); lcmd_pkt.byte_count = scsi_bufflen(cmd); qlafx00_build_scsi_iocbs(sp, cmd_pkt, (int )tot_dsds, & lcmd_pkt); lcmd_pkt.entry_count = (unsigned char )req_cnt; lcmd_pkt.entry_status = (unsigned char )rsp->id; ql_dump_buffer(134348800U, vha, 12334, cmd->cmnd, (uint32_t )cmd->cmd_len); ql_dump_buffer(134348800U, vha, 12338, (uint8_t *)(& lcmd_pkt), 64U); memcpy_toio((void volatile *)cmd_pkt, (void const *)(& lcmd_pkt), 64UL); __asm__ volatile ("sfence": : : "memory"); req->ring_index = (uint16_t )((int )req->ring_index + 1); if ((int )req->ring_index == (int )req->length) { req->ring_index = 0U; req->ring_ptr = req->ring; } else { req->ring_ptr = req->ring_ptr + 1; } sp->flags = (uint16_t )((unsigned int )sp->flags | 1U); writel((unsigned int )req->ring_index, (void volatile *)req->req_q_in); writel(ha->rqstq_intr_code, (void volatile *)ha->cregbase + 133636U); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); queuing_error: ; if ((unsigned int )tot_dsds != 0U) { scsi_dma_unmap(cmd); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (258); } } void qlafx00_tm_iocb(srb_t *sp , struct tsk_mgmt_entry_fx00 *ptm_iocb ) { struct srb_iocb *fxio ; scsi_qla_host_t *vha ; struct req_que *req ; struct tsk_mgmt_entry_fx00 tm_iocb ; struct scsi_lun llun ; unsigned long tmp ; size_t __len ; void *__ret ; { fxio = & sp->u.iocb_cmd; vha = (sp->fcport)->vha; req = vha->req; memset((void *)(& tm_iocb), 0, 64UL); tm_iocb.entry_type = 5U; tm_iocb.entry_count = 1U; tm_iocb.handle = ((unsigned int )req->id << 16) | sp->handle; tm_iocb.handle_hi = 0U; tmp = qla2x00_get_async_timeout(vha); tm_iocb.timeout = (unsigned int )((unsigned short )tmp) + 2U; tm_iocb.tgt_id = (sp->fcport)->tgt_id; tm_iocb.control_flags = fxio->u.tmf.flags; if (tm_iocb.control_flags == 16U) { int_to_scsilun(fxio->u.tmf.lun, & llun); host_to_adap((uint8_t *)(& llun), (uint8_t *)(& tm_iocb.lun), 8U); } else { } __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)ptm_iocb, (void const *)(& tm_iocb), __len); } else { __ret = __builtin_memcpy((void *)ptm_iocb, (void const *)(& tm_iocb), __len); } __asm__ volatile ("sfence": : : "memory"); return; } } void qlafx00_abort_iocb(srb_t *sp , struct abort_iocb_entry_fx00 *pabt_iocb ) { struct srb_iocb *fxio ; scsi_qla_host_t *vha ; struct req_que *req ; struct abort_iocb_entry_fx00 abt_iocb ; size_t __len ; void *__ret ; { fxio = & sp->u.iocb_cmd; vha = (sp->fcport)->vha; req = vha->req; memset((void *)(& abt_iocb), 0, 64UL); abt_iocb.entry_type = 8U; abt_iocb.entry_count = 1U; abt_iocb.handle = ((unsigned int )req->id << 16) | sp->handle; abt_iocb.abort_handle = ((unsigned int )req->id << 16) | fxio->u.abt.cmd_hndl; abt_iocb.tgt_id_sts = (sp->fcport)->tgt_id; abt_iocb.req_que_no = req->id; __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)pabt_iocb, (void const *)(& abt_iocb), __len); } else { __ret = __builtin_memcpy((void *)pabt_iocb, (void const *)(& abt_iocb), __len); } __asm__ volatile ("sfence": : : "memory"); return; } } void qlafx00_fxdisc_iocb(srb_t *sp , struct fxdisc_entry_fx00 *pfxiocb ) { struct srb_iocb *fxio ; struct qla_mt_iocb_rqst_fx00 *piocb_rqst ; struct fc_bsg_job *bsg_job ; struct fxdisc_entry_fx00 fx_iocb ; uint8_t entry_cnt ; struct scatterlist *sg ; int avail_dsds ; int tot_dsds ; cont_a64_entry_t lcont_pkt ; cont_a64_entry_t *cont_pkt ; __le32 *cur_dsd ; int index ; int cont ; dma_addr_t sle_dma ; __le32 *tmp ; __le32 *tmp___0 ; __le32 *tmp___1 ; int avail_dsds___0 ; int tot_dsds___0 ; cont_a64_entry_t lcont_pkt___0 ; cont_a64_entry_t *cont_pkt___0 ; __le32 *cur_dsd___0 ; int index___0 ; int cont___0 ; dma_addr_t sle_dma___0 ; __le32 *tmp___2 ; __le32 *tmp___3 ; __le32 *tmp___4 ; size_t __len ; void *__ret ; { fxio = & sp->u.iocb_cmd; entry_cnt = 1U; memset((void *)(& fx_iocb), 0, 64UL); fx_iocb.entry_type = 11U; fx_iocb.handle = sp->handle; fx_iocb.entry_count = entry_cnt; if ((unsigned int )sp->type == 10U) { fx_iocb.func_num = sp->u.iocb_cmd.u.fxiocb.req_func_type; fx_iocb.adapid = fxio->u.fxiocb.adapter_id; fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi; fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0; fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1; fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra; if ((int )fxio->u.fxiocb.flags & 1) { fx_iocb.req_dsdcnt = 1U; fx_iocb.req_xfrcnt = (unsigned short )fxio->u.fxiocb.req_len; fx_iocb.dseg_rq_address[0] = (unsigned int )fxio->u.fxiocb.req_dma_handle; fx_iocb.dseg_rq_address[1] = (unsigned int )(fxio->u.fxiocb.req_dma_handle >> 32ULL); fx_iocb.dseg_rq_len = fxio->u.fxiocb.req_len; } else { } if (((int )fxio->u.fxiocb.flags & 2) != 0) { fx_iocb.rsp_dsdcnt = 1U; fx_iocb.rsp_xfrcnt = (unsigned short )fxio->u.fxiocb.rsp_len; fx_iocb.dseg_rsp_address[0] = (unsigned int )fxio->u.fxiocb.rsp_dma_handle; fx_iocb.dseg_rsp_address[1] = (unsigned int )(fxio->u.fxiocb.rsp_dma_handle >> 32ULL); fx_iocb.dseg_rsp_len = fxio->u.fxiocb.rsp_len; } else { } if (((int )fxio->u.fxiocb.flags & 4) != 0) { fx_iocb.dataword = fxio->u.fxiocb.req_data; } else { } fx_iocb.flags = fxio->u.fxiocb.flags; } else { bsg_job = sp->u.bsg_job; piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)(& (bsg_job->request)->rqst_data.h_vendor.vendor_cmd) + 1U; fx_iocb.func_num = piocb_rqst->func_type; fx_iocb.adapid = piocb_rqst->adapid; fx_iocb.adapid_hi = piocb_rqst->adapid_hi; fx_iocb.reserved_0 = piocb_rqst->reserved_0; fx_iocb.reserved_1 = piocb_rqst->reserved_1; fx_iocb.dataword_extra = piocb_rqst->dataword_extra; fx_iocb.dataword = piocb_rqst->dataword; fx_iocb.req_xfrcnt = piocb_rqst->req_len; fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len; if ((int )piocb_rqst->flags & 1) { cont_pkt = (cont_a64_entry_t *)0; index = 0; cont = 0; fx_iocb.req_dsdcnt = (unsigned short )bsg_job->request_payload.sg_cnt; tot_dsds = bsg_job->request_payload.sg_cnt; cur_dsd = (__le32 *)(& fx_iocb.dseg_rq_address); avail_dsds = 1; index = 0; sg = bsg_job->request_payload.sg_list; goto ldv_44397; ldv_44396: ; if (avail_dsds == 0) { memset((void *)(& lcont_pkt), 0, 64UL); cont_pkt = qlafx00_prep_cont_type1_iocb(((sp->fcport)->vha)->req, & lcont_pkt); cur_dsd = (__le32 *)(& lcont_pkt.dseg_0_address); avail_dsds = 5; cont = 1; entry_cnt = (uint8_t )((int )entry_cnt + 1); } else { } sle_dma = sg->dma_address; tmp = cur_dsd; cur_dsd = cur_dsd + 1; *tmp = (unsigned int )sle_dma; tmp___0 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___0 = (unsigned int )(sle_dma >> 32ULL); tmp___1 = cur_dsd; cur_dsd = cur_dsd + 1; *tmp___1 = sg->dma_length; avail_dsds = avail_dsds - 1; if (avail_dsds == 0 && cont == 1) { cont = 0; memcpy_toio((void volatile *)cont_pkt, (void const *)(& lcont_pkt), 64UL); ql_dump_buffer(8421376U, (sp->fcport)->vha, 12354, (uint8_t *)(& lcont_pkt), 64U); } else { } index = index + 1; sg = sg_next(sg); ldv_44397: ; if (index < tot_dsds) { goto ldv_44396; } else { } if (avail_dsds != 0 && cont == 1) { memcpy_toio((void volatile *)cont_pkt, (void const *)(& lcont_pkt), 64UL); ql_dump_buffer(8421376U, (sp->fcport)->vha, 12355, (uint8_t *)(& lcont_pkt), 64U); } else { } } else { } if (((int )piocb_rqst->flags & 2) != 0) { cont_pkt___0 = (cont_a64_entry_t *)0; index___0 = 0; cont___0 = 0; fx_iocb.rsp_dsdcnt = (unsigned short )bsg_job->reply_payload.sg_cnt; tot_dsds___0 = bsg_job->reply_payload.sg_cnt; cur_dsd___0 = (__le32 *)(& fx_iocb.dseg_rsp_address); avail_dsds___0 = 1; index___0 = 0; sg = bsg_job->reply_payload.sg_list; goto ldv_44408; ldv_44407: ; if (avail_dsds___0 == 0) { memset((void *)(& lcont_pkt___0), 0, 64UL); cont_pkt___0 = qlafx00_prep_cont_type1_iocb(((sp->fcport)->vha)->req, & lcont_pkt___0); cur_dsd___0 = (__le32 *)(& lcont_pkt___0.dseg_0_address); avail_dsds___0 = 5; cont___0 = 1; entry_cnt = (uint8_t )((int )entry_cnt + 1); } else { } sle_dma___0 = sg->dma_address; tmp___2 = cur_dsd___0; cur_dsd___0 = cur_dsd___0 + 1; *tmp___2 = (unsigned int )sle_dma___0; tmp___3 = cur_dsd___0; cur_dsd___0 = cur_dsd___0 + 1; *tmp___3 = (unsigned int )(sle_dma___0 >> 32ULL); tmp___4 = cur_dsd___0; cur_dsd___0 = cur_dsd___0 + 1; *tmp___4 = sg->dma_length; avail_dsds___0 = avail_dsds___0 - 1; if (avail_dsds___0 == 0 && cont___0 == 1) { cont___0 = 0; memcpy_toio((void volatile *)cont_pkt___0, (void const *)(& lcont_pkt___0), 64UL); ql_dump_buffer(8421376U, (sp->fcport)->vha, 12357, (uint8_t *)(& lcont_pkt___0), 64U); } else { } index___0 = index___0 + 1; sg = sg_next(sg); ldv_44408: ; if (index___0 < tot_dsds___0) { goto ldv_44407; } else { } if (avail_dsds___0 != 0 && cont___0 == 1) { memcpy_toio((void volatile *)cont_pkt___0, (void const *)(& lcont_pkt___0), 64UL); ql_dump_buffer(8421376U, (sp->fcport)->vha, 12358, (uint8_t *)(& lcont_pkt___0), 64U); } else { } } else { } if (((int )piocb_rqst->flags & 4) != 0) { fx_iocb.dataword = piocb_rqst->dataword; } else { } fx_iocb.flags = piocb_rqst->flags; fx_iocb.entry_count = entry_cnt; } ql_dump_buffer(8421376U, (sp->fcport)->vha, 12359, (uint8_t *)(& fx_iocb), 64U); __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)pfxiocb, (void const *)(& fx_iocb), __len); } else { __ret = __builtin_memcpy((void *)pfxiocb, (void const *)(& fx_iocb), __len); } __asm__ volatile ("sfence": : : "memory"); return; } } int reg_timer_15(struct timer_list *timer ) { { ldv_timer_list_15 = timer; ldv_timer_state_15 = 1; return (0); } } void activate_pending_timer_15(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_15 == (unsigned long )timer) { if (ldv_timer_state_15 == 2 || pending_flag != 0) { ldv_timer_list_15 = timer; ldv_timer_list_15->data = data; ldv_timer_state_15 = 1; } else { } return; } else { } reg_timer_15(timer); ldv_timer_list_15->data = data; return; } } void choose_timer_15(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_15 = 2; return; } } void disable_suitable_timer_15(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_15) { ldv_timer_state_15 = 0; return; } else { } return; } } int ldv_del_timer_73(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_74(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } int ldv_del_timer_77(struct timer_list *ldv_func_arg1 ) ; void disable_suitable_timer_16(struct timer_list *timer ) ; void activate_pending_timer_16(struct timer_list *timer , unsigned long data , int pending_flag ) ; int reg_timer_16(struct timer_list *timer ) ; void choose_timer_16(struct timer_list *timer ) ; int ldv_scsi_add_host_with_dma_78(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; static int const MD_MIU_TEST_AGT_RDDATA___0[4U] = { 1090519208, 1090519212, 1090519224, 1090519228}; static uint32_t const qla8044_reg_tbl[14U] = { 13480U, 13484U, 13488U, 14216U, 14212U, 14220U, 13640U, 14304U, 14208U, 13648U, 13652U, 13656U, 13904U, 14260U}; uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) ; __inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha ) ; static void qla8044_need_reset_handler(struct scsi_qla_host *vha ) ; void qla8044_get_minidump(struct scsi_qla_host *vha ) ; int qla8044_collect_md_data(struct scsi_qla_host *vha ) ; extern void __udelay(unsigned long ) ; uint32_t qla8044_rd_reg(struct qla_hw_data *ha , ulong addr ) { unsigned int tmp ; { tmp = readl((void const volatile *)(ha->nx_pcibase + addr)); return (tmp); } } void qla8044_wr_reg(struct qla_hw_data *ha , ulong addr , uint32_t val ) { { writel(val, (void volatile *)(ha->nx_pcibase + addr)); return; } } int qla8044_rd_direct(struct scsi_qla_host *vha , uint32_t const crb_reg ) { struct qla_hw_data *ha ; uint32_t tmp ; { ha = vha->hw; if ((unsigned int )crb_reg <= 13U) { tmp = qla8044_rd_reg(ha, (ulong )qla8044_reg_tbl[crb_reg]); return ((int )tmp); } else { return (258); } } } void qla8044_wr_direct(struct scsi_qla_host *vha , uint32_t const crb_reg , uint32_t const value ) { struct qla_hw_data *ha ; { ha = vha->hw; if ((unsigned int )crb_reg <= 13U) { qla8044_wr_reg(ha, (ulong )qla8044_reg_tbl[crb_reg], value); } else { } return; } } static int qla8044_set_win_base(scsi_qla_host_t *vha , uint32_t addr ) { uint32_t val ; int ret_val ; struct qla_hw_data *ha ; { ret_val = 0; ha = vha->hw; qla8044_wr_reg(ha, (ulong )(((int )ha->portnum + 3584) * 4), addr); val = qla8044_rd_reg(ha, (ulong )(((int )ha->portnum + 3584) * 4)); if (val != addr) { ql_log(1U, vha, 45191, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n", "qla8044_set_win_base", addr, val); ret_val = 258; } else { } return (ret_val); } } static int qla8044_rd_reg_indirect(scsi_qla_host_t *vha , uint32_t addr , uint32_t *data ) { int ret_val ; struct qla_hw_data *ha ; { ret_val = 0; ha = vha->hw; ret_val = qla8044_set_win_base(vha, addr); if (ret_val == 0) { *data = qla8044_rd_reg(ha, 14576UL); } else { ql_log(1U, vha, 45192, "%s: failed read of addr 0x%x!\n", "qla8044_rd_reg_indirect", addr); } return (ret_val); } } static int qla8044_wr_reg_indirect(scsi_qla_host_t *vha , uint32_t addr , uint32_t data ) { int ret_val ; struct qla_hw_data *ha ; { ret_val = 0; ha = vha->hw; ret_val = qla8044_set_win_base(vha, addr); if (ret_val == 0) { qla8044_wr_reg(ha, 14576UL, data); } else { ql_log(1U, vha, 45193, "%s: failed wrt to addr 0x%x, data 0x%x\n", "qla8044_wr_reg_indirect", addr, data); } return (ret_val); } } static void qla8044_read_write_crb_reg(struct scsi_qla_host *vha , uint32_t raddr , uint32_t waddr ) { uint32_t value ; { qla8044_rd_reg_indirect(vha, raddr, & value); qla8044_wr_reg_indirect(vha, waddr, value); return; } } static void qla8044_rmw_crb_reg(struct scsi_qla_host *vha , uint32_t raddr , uint32_t waddr , struct qla8044_rmw *p_rmw_hdr ) { uint32_t value ; { if ((unsigned int )p_rmw_hdr->index_a != 0U) { value = vha->reset_tmplt.array[(int )p_rmw_hdr->index_a]; } else { qla8044_rd_reg_indirect(vha, raddr, & value); } value = p_rmw_hdr->test_mask & value; value = value << (int )p_rmw_hdr->shl; value = value >> (int )p_rmw_hdr->shr; value = p_rmw_hdr->or_value | value; value = p_rmw_hdr->xor_value ^ value; qla8044_wr_reg_indirect(vha, waddr, value); return; } } __inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha ) { uint32_t qsnt_state ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla8044_rd_direct(vha, 5U); qsnt_state = (uint32_t )tmp; qsnt_state = (uint32_t )(1 << (int )ha->portnum) | qsnt_state; qla8044_wr_direct(vha, 5U, qsnt_state); ql_log(2U, vha, 45198, "%s(%ld): qsnt_state: 0x%08x\n", "qla8044_set_qsnt_ready", vha->host_no, qsnt_state); return; } } void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha ) { uint32_t qsnt_state ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla8044_rd_direct(vha, 5U); qsnt_state = (uint32_t )tmp; qsnt_state = (uint32_t )(~ (1 << (int )ha->portnum)) & qsnt_state; qla8044_wr_direct(vha, 5U, qsnt_state); ql_log(2U, vha, 45199, "%s(%ld): qsnt_state: 0x%08x\n", "qla8044_clear_qsnt_ready", vha->host_no, qsnt_state); return; } } static int qla8044_lock_recovery(struct scsi_qla_host *vha ) { uint32_t lock ; uint32_t lockid ; struct qla_hw_data *ha ; { lock = 0U; ha = vha->hw; lockid = qla8044_rd_reg(ha, 14236UL); if ((lockid & 3U) != 0U) { return (258); } else { } qla8044_wr_reg(ha, 14236UL, (uint32_t )(((int )ha->portnum << 2) | 1)); msleep(200U); lockid = qla8044_rd_reg(ha, 14236UL); if ((lockid & 60U) != (uint32_t )((int )ha->portnum << 2)) { return (258); } else { } ql_dbg(524288U, vha, 45195, "%s:%d: IDC Lock recovery initiated\n", "qla8044_lock_recovery", (int )ha->portnum); qla8044_wr_reg(ha, 14236UL, (uint32_t )(((int )ha->portnum << 2) | 2)); qla8044_wr_reg(ha, 13572UL, 255U); qla8044_rd_reg(ha, 14444UL); qla8044_wr_reg(ha, 14236UL, 0U); lock = qla8044_rd_reg(ha, 14440UL); if (lock != 0U) { lockid = qla8044_rd_reg(ha, 13572UL); lockid = ((lockid + 256U) & 4294967040U) | (uint32_t )ha->portnum; qla8044_wr_reg(ha, 13572UL, lockid); return (0); } else { return (258); } } } int qla8044_idc_lock(struct qla_hw_data *ha ) { uint32_t ret_val ; uint32_t timeout ; uint32_t status ; uint32_t lock_id ; uint32_t lock_cnt ; uint32_t func_num ; uint32_t tmo_owner ; uint32_t first_owner ; scsi_qla_host_t *vha ; void *tmp ; int tmp___0 ; { ret_val = 0U; timeout = 0U; status = 0U; tmo_owner = 0U; first_owner = 0U; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; goto ldv_43544; ldv_43543: status = qla8044_rd_reg(ha, 14440UL); if (status != 0U) { lock_id = qla8044_rd_reg(ha, 13572UL); lock_id = ((lock_id + 256U) & 4294967040U) | (uint32_t )ha->portnum; qla8044_wr_reg(ha, 13572UL, lock_id); goto ldv_43541; } else { } if (timeout == 0U) { first_owner = qla8044_rd_reg(ha, 13572UL); } else { } timeout = timeout + 1U; if (timeout > 9U) { tmo_owner = qla8044_rd_reg(ha, 13572UL); func_num = tmo_owner & 255U; lock_cnt = tmo_owner >> 8; ql_log(1U, vha, 45332, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n", "qla8044_idc_lock", (int )ha->portnum, func_num, lock_cnt, first_owner & 255U); if (first_owner != tmo_owner) { ql_dbg(524288U, vha, 45333, "%s: %d: IDC lock failed\n", "qla8044_idc_lock", (int )ha->portnum); timeout = 0U; } else { tmp___0 = qla8044_lock_recovery(vha); if (tmp___0 == 0) { ret_val = 0U; ql_dbg(524288U, vha, 45334, "%s:IDC lock Recovery by %dsuccessful...\n", "qla8044_idc_lock", (int )ha->portnum); } else { } ql_dbg(524288U, vha, 45194, "%s: IDC lock Recovery by %d failed, Retrying timout\n", "qla8044_idc_lock", (int )ha->portnum); timeout = 0U; } } else { } msleep(200U); ldv_43544: ; if (status == 0U) { goto ldv_43543; } else { } ldv_43541: ; return ((int )ret_val); } } void qla8044_idc_unlock(struct qla_hw_data *ha ) { int id ; scsi_qla_host_t *vha ; void *tmp ; uint32_t tmp___0 ; { tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = qla8044_rd_reg(ha, 13572UL); id = (int )tmp___0; if ((id & 255) != (int )ha->portnum) { ql_log(1U, vha, 45336, "%s: IDC Unlock by %d failed, lock owner is %d!\n", "qla8044_idc_unlock", (int )ha->portnum, id & 255); return; } else { } qla8044_wr_reg(ha, 13572UL, (uint32_t )(id | 255)); qla8044_rd_reg(ha, 14444UL); return; } } static int qla8044_flash_lock(scsi_qla_host_t *vha ) { int lock_owner ; int timeout ; uint32_t lock_status ; int ret_val ; struct qla_hw_data *ha ; uint32_t tmp ; { timeout = 0; lock_status = 0U; ret_val = 0; ha = vha->hw; goto ldv_43562; ldv_43561: lock_status = qla8044_rd_reg(ha, 14416UL); if (lock_status != 0U) { goto ldv_43559; } else { } timeout = timeout + 1; if (timeout > 499) { tmp = qla8044_rd_reg(ha, 13568UL); lock_owner = (int )tmp; ql_log(1U, vha, 45331, "%s: flash lock by %d failed, held by %d\n", "qla8044_flash_lock", (int )ha->portnum, lock_owner); ret_val = 258; goto ldv_43559; } else { } msleep(20U); ldv_43562: ; if (lock_status == 0U) { goto ldv_43561; } else { } ldv_43559: qla8044_wr_reg(ha, 13568UL, (uint32_t )ha->portnum); return (ret_val); } } static void qla8044_flash_unlock(scsi_qla_host_t *vha ) { int ret_val ; struct qla_hw_data *ha ; uint32_t tmp ; { ha = vha->hw; qla8044_wr_reg(ha, 13568UL, 255U); tmp = qla8044_rd_reg(ha, 14420UL); ret_val = (int )tmp; return; } } static void qla8044_flash_lock_recovery(struct scsi_qla_host *vha ) { int tmp ; { tmp = qla8044_flash_lock(vha); if (tmp != 0) { ql_log(1U, vha, 45344, "Resetting flash_lock\n"); } else { } qla8044_flash_unlock(vha); return; } } static int qla8044_read_flash_data(scsi_qla_host_t *vha , uint8_t *p_data , uint32_t flash_addr , int u32_word_count ) { int i ; int ret_val ; uint32_t u32_word ; int tmp ; int tmp___0 ; { ret_val = 0; tmp = qla8044_flash_lock(vha); if (tmp != 0) { ret_val = 258; goto exit_lock_error; } else { } if ((flash_addr & 3U) != 0U) { ql_log(1U, vha, 45335, "%s: Illegal addr = 0x%x\n", "qla8044_read_flash_data", flash_addr); ret_val = 258; goto exit_flash_read; } else { } i = 0; goto ldv_43584; ldv_43583: tmp___0 = qla8044_wr_reg_indirect(vha, 1108410416U, flash_addr & 4294901760U); if (tmp___0 != 0) { ql_log(1U, vha, 45337, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n! ", "qla8044_read_flash_data", flash_addr); ret_val = 258; goto exit_flash_read; } else { } ret_val = qla8044_rd_reg_indirect(vha, (flash_addr & 65535U) | 1108672512U, & u32_word); if (ret_val != 0) { ql_log(1U, vha, 45196, "%s: failed to read addr 0x%x!\n", "qla8044_read_flash_data", flash_addr); goto exit_flash_read; } else { } *((uint32_t *)p_data) = u32_word; p_data = p_data + 4UL; flash_addr = flash_addr + 4U; i = i + 1; ldv_43584: ; if (i < u32_word_count) { goto ldv_43583; } else { } exit_flash_read: qla8044_flash_unlock(vha); exit_lock_error: ; return (ret_val); } } uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { int tmp ; { scsi_block_requests(vha->host); tmp = qla8044_read_flash_data(vha, buf, offset, (int )(length / 4U)); if (tmp != 0) { ql_log(1U, vha, 45197, "%s: Failed to read from flash\n", "qla8044_read_optrom_data"); } else { } scsi_unblock_requests(vha->host); return (buf); } } __inline int qla8044_need_reset(struct scsi_qla_host *vha ) { uint32_t drv_state ; uint32_t drv_active ; int rval ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = vha->hw; tmp = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp; tmp___0 = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp___0; rval = (int )((uint32_t )(1 << (int )ha->portnum) & drv_state); if (*((unsigned long *)ha + 2UL) != 0UL && drv_active != 0U) { rval = 1; } else { } return (rval); } } static void qla8044_write_list(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { struct qla8044_entry *p_entry ; uint32_t i ; { p_entry = (struct qla8044_entry *)p_hdr + 8U; i = 0U; goto ldv_43607; ldv_43606: qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2); if ((unsigned int )p_hdr->delay != 0U) { __udelay((unsigned long )p_hdr->delay); } else { } i = i + 1U; p_entry = p_entry + 1; ldv_43607: ; if ((uint32_t )p_hdr->count > i) { goto ldv_43606; } else { } return; } } static void qla8044_read_write_list(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { struct qla8044_entry *p_entry ; uint32_t i ; { p_entry = (struct qla8044_entry *)p_hdr + 8U; i = 0U; goto ldv_43616; ldv_43615: qla8044_read_write_crb_reg(vha, p_entry->arg1, p_entry->arg2); if ((unsigned int )p_hdr->delay != 0U) { __udelay((unsigned long )p_hdr->delay); } else { } i = i + 1U; p_entry = p_entry + 1; ldv_43616: ; if ((uint32_t )p_hdr->count > i) { goto ldv_43615; } else { } return; } } static int qla8044_poll_reg(struct scsi_qla_host *vha , uint32_t addr , int duration , uint32_t test_mask , uint32_t test_result ) { uint32_t value ; int timeout_error ; uint8_t retries ; int ret_val ; uint8_t tmp ; { ret_val = 0; ret_val = qla8044_rd_reg_indirect(vha, addr, & value); if (ret_val == 258) { timeout_error = 1; goto exit_poll_reg; } else { } retries = (uint8_t )(duration / 10); ldv_43631: ; if ((value & test_mask) != test_result) { timeout_error = 1; msleep((unsigned int )(duration / 10)); ret_val = qla8044_rd_reg_indirect(vha, addr, & value); if (ret_val == 258) { timeout_error = 1; goto exit_poll_reg; } else { } } else { timeout_error = 0; goto ldv_43630; } tmp = retries; retries = (uint8_t )((int )retries - 1); if ((unsigned int )tmp != 0U) { goto ldv_43631; } else { } ldv_43630: ; exit_poll_reg: ; if (timeout_error != 0) { vha->reset_tmplt.seq_error = vha->reset_tmplt.seq_error + 1; ql_log(0U, vha, 45200, "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n", "qla8044_poll_reg", value, test_mask, test_result); } else { } return (timeout_error); } } static void qla8044_poll_list(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { long delay ; struct qla8044_entry *p_entry ; struct qla8044_poll *p_poll ; uint32_t i ; uint32_t value ; int tmp ; { p_poll = (struct qla8044_poll *)p_hdr + 8U; p_entry = (struct qla8044_entry *)p_poll + 8U; delay = (long )p_hdr->delay; if (delay == 0L) { i = 0U; goto ldv_43643; ldv_43642: qla8044_poll_reg(vha, p_entry->arg1, (int )delay, p_poll->test_mask, p_poll->test_value); i = i + 1U; p_entry = p_entry + 1; ldv_43643: ; if ((uint32_t )p_hdr->count > i) { goto ldv_43642; } else { } } else { i = 0U; goto ldv_43646; ldv_43645: ; if (delay != 0L) { tmp = qla8044_poll_reg(vha, p_entry->arg1, (int )delay, p_poll->test_mask, p_poll->test_value); if (tmp != 0) { qla8044_rd_reg_indirect(vha, p_entry->arg1, & value); qla8044_rd_reg_indirect(vha, p_entry->arg2, & value); } else { } } else { } i = i + 1U; p_entry = p_entry + 1; ldv_43646: ; if ((uint32_t )p_hdr->count > i) { goto ldv_43645; } else { } } return; } } static void qla8044_poll_write_list(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { long delay ; struct qla8044_quad_entry *p_entry ; struct qla8044_poll *p_poll ; uint32_t i ; int tmp ; { p_poll = (struct qla8044_poll *)p_hdr + 8U; p_entry = (struct qla8044_quad_entry *)p_poll + 8U; delay = (long )p_hdr->delay; i = 0U; goto ldv_43658; ldv_43657: qla8044_wr_reg_indirect(vha, p_entry->dr_addr, p_entry->dr_value); qla8044_wr_reg_indirect(vha, p_entry->ar_addr, p_entry->ar_value); if (delay != 0L) { tmp = qla8044_poll_reg(vha, p_entry->ar_addr, (int )delay, p_poll->test_mask, p_poll->test_value); if (tmp != 0) { ql_dbg(524288U, vha, 45201, "%s: Timeout Error: poll list, ", "qla8044_poll_write_list"); ql_dbg(524288U, vha, 45202, "item_num %d, entry_num %d\n", i, vha->reset_tmplt.seq_index); } else { } } else { } i = i + 1U; p_entry = p_entry + 1; ldv_43658: ; if ((uint32_t )p_hdr->count > i) { goto ldv_43657; } else { } return; } } static void qla8044_read_modify_write(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { struct qla8044_entry *p_entry ; struct qla8044_rmw *p_rmw_hdr ; uint32_t i ; { p_rmw_hdr = (struct qla8044_rmw *)p_hdr + 8U; p_entry = (struct qla8044_entry *)p_rmw_hdr + 16U; i = 0U; goto ldv_43668; ldv_43667: qla8044_rmw_crb_reg(vha, p_entry->arg1, p_entry->arg2, p_rmw_hdr); if ((unsigned int )p_hdr->delay != 0U) { __udelay((unsigned long )p_hdr->delay); } else { } i = i + 1U; p_entry = p_entry + 1; ldv_43668: ; if ((uint32_t )p_hdr->count > i) { goto ldv_43667; } else { } return; } } static void qla8044_pause(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { unsigned long __ms ; unsigned long tmp ; { if ((unsigned int )p_hdr->delay != 0U) { __ms = (unsigned long )p_hdr->delay; goto ldv_43676; ldv_43675: __const_udelay(4295000UL); ldv_43676: tmp = __ms; __ms = __ms - 1UL; if (tmp != 0UL) { goto ldv_43675; } else { } } else { } return; } } static void qla8044_template_end(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { { vha->reset_tmplt.template_end = 1U; if (vha->reset_tmplt.seq_error == 0) { ql_dbg(524288U, vha, 45203, "%s: Reset sequence completed SUCCESSFULLY.\n", "qla8044_template_end"); } else { ql_log(0U, vha, 45204, "%s: Reset sequence completed with some timeout errors.\n", "qla8044_template_end"); } return; } } static void qla8044_poll_read_list(struct scsi_qla_host *vha , struct qla8044_reset_entry_hdr *p_hdr ) { long delay ; int index ; struct qla8044_quad_entry *p_entry ; struct qla8044_poll *p_poll ; uint32_t i ; uint32_t value ; int tmp ; int tmp___0 ; { p_poll = (struct qla8044_poll *)p_hdr + 8U; p_entry = (struct qla8044_quad_entry *)p_poll + 8U; delay = (long )p_hdr->delay; i = 0U; goto ldv_43695; ldv_43694: qla8044_wr_reg_indirect(vha, p_entry->ar_addr, p_entry->ar_value); if (delay != 0L) { tmp___0 = qla8044_poll_reg(vha, p_entry->ar_addr, (int )delay, p_poll->test_mask, p_poll->test_value); if (tmp___0 != 0) { ql_dbg(524288U, vha, 45205, "%s: Timeout Error: poll list, ", "qla8044_poll_read_list"); ql_dbg(524288U, vha, 45206, "Item_num %d, entry_num %d\n", i, vha->reset_tmplt.seq_index); } else { index = vha->reset_tmplt.array_index; qla8044_rd_reg_indirect(vha, p_entry->dr_addr, & value); tmp = index; index = index + 1; vha->reset_tmplt.array[tmp] = value; if (index == 16) { vha->reset_tmplt.array_index = 1; } else { } } } else { } i = i + 1U; p_entry = p_entry + 1; ldv_43695: ; if ((uint32_t )p_hdr->count > i) { goto ldv_43694; } else { } return; } } static void qla8044_process_reset_template(struct scsi_qla_host *vha , char *p_buff ) { int index ; int entries ; struct qla8044_reset_entry_hdr *p_hdr ; char *p_entry ; { p_entry = p_buff; vha->reset_tmplt.seq_end = 0U; vha->reset_tmplt.template_end = 0U; entries = (int )(vha->reset_tmplt.hdr)->entries; index = vha->reset_tmplt.seq_index; goto ldv_43719; ldv_43718: p_hdr = (struct qla8044_reset_entry_hdr *)p_entry; switch ((int )p_hdr->cmd) { case 0: ; goto ldv_43706; case 1: qla8044_write_list(vha, p_hdr); goto ldv_43706; case 2: qla8044_read_write_list(vha, p_hdr); goto ldv_43706; case 4: qla8044_poll_list(vha, p_hdr); goto ldv_43706; case 8: qla8044_poll_write_list(vha, p_hdr); goto ldv_43706; case 16: qla8044_read_modify_write(vha, p_hdr); goto ldv_43706; case 32: qla8044_pause(vha, p_hdr); goto ldv_43706; case 64: vha->reset_tmplt.seq_end = 1U; goto ldv_43706; case 128: qla8044_template_end(vha, p_hdr); goto ldv_43706; case 256: qla8044_poll_read_list(vha, p_hdr); goto ldv_43706; default: ql_log(0U, vha, 45207, "%s: Unknown command ==> 0x%04x on entry = %d\n", "qla8044_process_reset_template", (int )p_hdr->cmd, index); goto ldv_43706; } ldv_43706: p_entry = p_entry + (unsigned long )p_hdr->size; index = index + 1; ldv_43719: ; if ((unsigned int )vha->reset_tmplt.seq_end == 0U && index < entries) { goto ldv_43718; } else { } vha->reset_tmplt.seq_index = index; return; } } static void qla8044_process_init_seq(struct scsi_qla_host *vha ) { { qla8044_process_reset_template(vha, (char *)vha->reset_tmplt.init_offset); if ((unsigned int )vha->reset_tmplt.seq_end != 1U) { ql_log(0U, vha, 45208, "%s: Abrupt INIT Sub-Sequence end.\n", "qla8044_process_init_seq"); } else { } return; } } static void qla8044_process_stop_seq(struct scsi_qla_host *vha ) { { vha->reset_tmplt.seq_index = 0; qla8044_process_reset_template(vha, (char *)vha->reset_tmplt.stop_offset); if ((unsigned int )vha->reset_tmplt.seq_end != 1U) { ql_log(0U, vha, 45209, "%s: Abrupt STOP Sub-Sequence end.\n", "qla8044_process_stop_seq"); } else { } return; } } static void qla8044_process_start_seq(struct scsi_qla_host *vha ) { { qla8044_process_reset_template(vha, (char *)vha->reset_tmplt.start_offset); if ((unsigned int )vha->reset_tmplt.template_end != 1U) { ql_log(0U, vha, 45210, "%s: Abrupt START Sub-Sequence end.\n", "qla8044_process_start_seq"); } else { } return; } } static int qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha , uint32_t flash_addr , uint8_t *p_data , int u32_word_count ) { uint32_t i ; uint32_t u32_word ; uint32_t flash_offset ; uint32_t addr ; int ret_val ; { addr = flash_addr; ret_val = 0; flash_offset = addr & 65535U; if ((addr & 3U) != 0U) { ql_log(0U, vha, 45211, "%s: Illegal addr = 0x%x\n", "qla8044_lockless_flash_read_u32", addr); ret_val = 258; goto exit_lockless_read; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410416U, addr); if (ret_val != 0) { ql_log(0U, vha, 45212, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", "qla8044_lockless_flash_read_u32", addr); goto exit_lockless_read; } else { } if ((unsigned long )flash_offset + (unsigned long )u32_word_count * 4UL > 65535UL) { i = 0U; goto ldv_43747; ldv_43746: ret_val = qla8044_rd_reg_indirect(vha, (addr & 65535U) | 1108672512U, & u32_word); if (ret_val != 0) { ql_log(0U, vha, 45213, "%s: failed to read addr 0x%x!\n", "qla8044_lockless_flash_read_u32", addr); goto exit_lockless_read; } else { } *((uint32_t *)p_data) = u32_word; p_data = p_data + 4UL; addr = addr + 4U; flash_offset = flash_offset + 4U; if (flash_offset > 65535U) { ret_val = qla8044_wr_reg_indirect(vha, 1108410416U, addr); if (ret_val != 0) { ql_log(0U, vha, 45215, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", "qla8044_lockless_flash_read_u32", addr); goto exit_lockless_read; } else { } flash_offset = 0U; } else { } i = i + 1U; ldv_43747: ; if ((uint32_t )u32_word_count > i) { goto ldv_43746; } else { } } else { i = 0U; goto ldv_43750; ldv_43749: ret_val = qla8044_rd_reg_indirect(vha, (addr & 65535U) | 1108672512U, & u32_word); if (ret_val != 0) { ql_log(0U, vha, 45216, "%s: failed to read addr 0x%x!\n", "qla8044_lockless_flash_read_u32", addr); goto exit_lockless_read; } else { } *((uint32_t *)p_data) = u32_word; p_data = p_data + 4UL; addr = addr + 4U; i = i + 1U; ldv_43750: ; if ((uint32_t )u32_word_count > i) { goto ldv_43749; } else { } } exit_lockless_read: ; return (ret_val); } } static int qla8044_ms_mem_write_128b(struct scsi_qla_host *vha , uint64_t addr , uint32_t *data , uint32_t count ) { int i ; int j ; int ret_val ; uint32_t agt_ctrl ; struct qla_hw_data *ha ; uint32_t *tmp ; int tmp___0 ; uint32_t *tmp___1 ; int tmp___2 ; uint32_t *tmp___3 ; int tmp___4 ; uint32_t *tmp___5 ; int tmp___6 ; int tmp___7 ; { ret_val = 0; ha = vha->hw; if ((addr & 15ULL) != 0ULL) { ret_val = 258; goto exit_ms_mem_write; } else { } ldv_write_lock_irqsave(& ha->hw_lock); ret_val = qla8044_wr_reg_indirect(vha, 1090519192U, 0U); if (ret_val == 258) { ql_log(0U, vha, 45217, "%s: write to AGT_ADDR_HI failed!\n", "qla8044_ms_mem_write_128b"); goto exit_ms_mem_write_unlock; } else { } i = 0; goto ldv_43771; ldv_43770: ; if ((addr > 13019119615ULL || addr <= 12884901887ULL) && addr > 268435455ULL) { ret_val = 258; goto exit_ms_mem_write_unlock; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1090519188U, (uint32_t )addr); tmp = data; data = data + 1; tmp___0 = qla8044_wr_reg_indirect(vha, 1090519200U, *tmp); ret_val = tmp___0 + ret_val; tmp___1 = data; data = data + 1; tmp___2 = qla8044_wr_reg_indirect(vha, 1090519204U, *tmp___1); ret_val = tmp___2 + ret_val; tmp___3 = data; data = data + 1; tmp___4 = qla8044_wr_reg_indirect(vha, 1090519216U, *tmp___3); ret_val = tmp___4 + ret_val; tmp___5 = data; data = data + 1; tmp___6 = qla8044_wr_reg_indirect(vha, 1090519220U, *tmp___5); ret_val = tmp___6 + ret_val; if (ret_val == 258) { ql_log(0U, vha, 45218, "%s: write to AGT_WRDATA failed!\n", "qla8044_ms_mem_write_128b"); goto exit_ms_mem_write_unlock; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1090519184U, 6U); tmp___7 = qla8044_wr_reg_indirect(vha, 1090519184U, 7U); ret_val = tmp___7 + ret_val; if (ret_val == 258) { ql_log(0U, vha, 45219, "%s: write to AGT_CTRL failed!\n", "qla8044_ms_mem_write_128b"); goto exit_ms_mem_write_unlock; } else { } j = 0; goto ldv_43769; ldv_43768: ret_val = qla8044_rd_reg_indirect(vha, 1090519184U, & agt_ctrl); if (ret_val == 258) { ql_log(0U, vha, 45220, "%s: failed to read MD_MIU_TEST_AGT_CTRL!\n", "qla8044_ms_mem_write_128b"); goto exit_ms_mem_write_unlock; } else { } if ((agt_ctrl & 8U) == 0U) { goto ldv_43767; } else { } j = j + 1; ldv_43769: ; if (j <= 999) { goto ldv_43768; } else { } ldv_43767: ; if (j > 999) { ql_log(0U, vha, 45221, "%s: MS memory write failed!\n", "qla8044_ms_mem_write_128b"); ret_val = 258; goto exit_ms_mem_write_unlock; } else { } i = i + 1; addr = addr + 16ULL; ldv_43771: ; if ((uint32_t )i < count) { goto ldv_43770; } else { } exit_ms_mem_write_unlock: ldv_write_unlock_irqrestore(& ha->hw_lock); exit_ms_mem_write: ; return (ret_val); } } static int qla8044_copy_bootloader(struct scsi_qla_host *vha ) { uint8_t *p_cache ; uint32_t src ; uint32_t count ; uint32_t size ; uint64_t dest ; int ret_val ; struct qla_hw_data *ha ; uint32_t tmp ; void *tmp___0 ; { ret_val = 0; ha = vha->hw; src = 65536U; tmp = qla8044_rd_reg(ha, 13660UL); dest = (uint64_t )tmp; size = qla8044_rd_reg(ha, 13664UL); if ((size & 15U) != 0U) { size = (size + 16U) & 4294967280U; } else { } count = size / 16U; tmp___0 = vmalloc((unsigned long )size); p_cache = (uint8_t *)tmp___0; if ((unsigned long )p_cache == (unsigned long )((uint8_t *)0U)) { ql_log(0U, vha, 45222, "%s: Failed to allocate memory for boot loader cache\n", "qla8044_copy_bootloader"); ret_val = 258; goto exit_copy_bootloader; } else { } ret_val = qla8044_lockless_flash_read_u32(vha, src, p_cache, (int )(size / 4U)); if (ret_val == 258) { ql_log(0U, vha, 45223, "%s: Error reading F/W from flash!!!\n", "qla8044_copy_bootloader"); goto exit_copy_error; } else { } ql_dbg(524288U, vha, 45224, "%s: Read F/W from flash!\n", "qla8044_copy_bootloader"); ret_val = qla8044_ms_mem_write_128b(vha, dest, (uint32_t *)p_cache, count); if (ret_val == 258) { ql_log(0U, vha, 45225, "%s: Error writing F/W to MS !!!\n", "qla8044_copy_bootloader"); goto exit_copy_error; } else { } ql_dbg(524288U, vha, 45226, "%s: Wrote F/W (size %d) to MS !!!\n", "qla8044_copy_bootloader", size); exit_copy_error: vfree((void const *)p_cache); exit_copy_bootloader: ; return (ret_val); } } static int qla8044_restart(struct scsi_qla_host *vha ) { int ret_val ; struct qla_hw_data *ha ; int tmp ; { ret_val = 0; ha = vha->hw; qla8044_process_stop_seq(vha); if (ql2xmdenable != 0) { qla8044_get_minidump(vha); } else { ql_log(0U, vha, 45388, "Minidump disabled.\n"); } qla8044_process_init_seq(vha); tmp = qla8044_copy_bootloader(vha); if (tmp != 0) { ql_log(0U, vha, 45227, "%s: Copy bootloader, firmware restart failed!\n", "qla8044_restart"); ret_val = 258; goto exit_restart; } else { } qla8044_wr_reg(ha, 13820UL, 0U); qla8044_process_start_seq(vha); exit_restart: ; return (ret_val); } } static int qla8044_check_cmd_peg_status(struct scsi_qla_host *vha ) { uint32_t val ; uint32_t ret_val ; int retries ; struct qla_hw_data *ha ; { ret_val = 258U; retries = 60; ha = vha->hw; ldv_43802: val = qla8044_rd_reg(ha, 13904UL); if (val == 65281U) { ql_dbg(524288U, vha, 45228, "%s: Command Peg initialization complete! state=0x%x\n", "qla8044_check_cmd_peg_status", val); ret_val = 0U; goto ldv_43801; } else { } msleep(500U); retries = retries - 1; if (retries != 0) { goto ldv_43802; } else { } ldv_43801: ; return ((int )ret_val); } } static int qla8044_start_firmware(struct scsi_qla_host *vha ) { int ret_val ; int tmp ; { ret_val = 0; tmp = qla8044_restart(vha); if (tmp != 0) { ql_log(0U, vha, 45229, "%s: Restart Error!!!, Need Reset!!!\n", "qla8044_start_firmware"); ret_val = 258; goto exit_start_fw; } else { ql_dbg(524288U, vha, 45231, "%s: Restart done!\n", "qla8044_start_firmware"); } ret_val = qla8044_check_cmd_peg_status(vha); if (ret_val != 0) { ql_log(0U, vha, 45232, "%s: Peg not initialized!\n", "qla8044_start_firmware"); ret_val = 258; } else { } exit_start_fw: ; return (ret_val); } } void qla8044_clear_drv_active(struct scsi_qla_host *vha ) { uint32_t drv_active ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp; drv_active = (uint32_t )(~ (1 << (int )ha->portnum)) & drv_active; ql_log(2U, vha, 45233, "%s(%ld): drv_active: 0x%08x\n", "qla8044_clear_drv_active", vha->host_no, drv_active); qla8044_wr_direct(vha, 3U, drv_active); return; } } static int qla8044_device_bootstrap(struct scsi_qla_host *vha ) { int rval ; int i ; uint32_t old_count ; uint32_t count ; int need_reset ; uint32_t idc_ctrl ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { rval = 258; old_count = 0U; count = 0U; need_reset = 0; ha = vha->hw; need_reset = qla8044_need_reset(vha); if (need_reset == 0) { tmp = qla8044_rd_direct(vha, 2U); old_count = (uint32_t )tmp; i = 0; goto ldv_43827; ldv_43826: msleep(200U); tmp___0 = qla8044_rd_direct(vha, 2U); count = (uint32_t )tmp___0; if (count != old_count) { rval = 0; goto dev_ready; } else { } i = i + 1; ldv_43827: ; if (i <= 9) { goto ldv_43826; } else { } qla8044_flash_lock_recovery(vha); } else if (*((unsigned long *)ha + 2UL) != 0UL) { qla8044_flash_lock_recovery(vha); } else { } ql_log(2U, vha, 45234, "%s: HW State: INITIALIZING\n", "qla8044_device_bootstrap"); qla8044_wr_direct(vha, 4U, 2U); qla8044_idc_unlock(ha); rval = qla8044_start_firmware(vha); qla8044_idc_lock(ha); if (rval != 0) { ql_log(2U, vha, 45235, "%s: HW State: FAILED\n", "qla8044_device_bootstrap"); qla8044_clear_drv_active(vha); qla8044_wr_direct(vha, 4U, 6U); return (rval); } else { } idc_ctrl = qla8044_rd_reg(ha, 14224UL); if ((idc_ctrl & 2U) != 0U) { qla8044_wr_reg(ha, 14224UL, idc_ctrl & 4294967293U); ha->fw_dumped = 0; } else { } dev_ready: ql_log(2U, vha, 45236, "%s: HW State: READY\n", "qla8044_device_bootstrap"); qla8044_wr_direct(vha, 4U, 3U); return (rval); } } static void qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha ) { u8 *phdr ; { if ((unsigned long )vha->reset_tmplt.buff == (unsigned long )((uint8_t *)0U)) { ql_log(0U, vha, 45237, "%s: Error Invalid reset_seq_template\n", "qla8044_dump_reset_seq_hdr"); return; } else { } phdr = vha->reset_tmplt.buff; ql_dbg(524288U, vha, 45238, "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n", (int )*phdr, (int )*(phdr + 1UL), (int )*(phdr + 2UL), (int )*(phdr + 3UL), (int )*(phdr + 4UL), (int )*(phdr + 5UL), (int )*(phdr + 6UL), (int )*(phdr + 7UL), (int )*(phdr + 8UL), (int )*(phdr + 9UL), (int )*(phdr + 10UL), (int )*(phdr + 11UL), (int )*(phdr + 12UL), (int )*(phdr + 13UL), (int )*(phdr + 14UL), (int )*(phdr + 15UL)); return; } } static int qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha ) { uint32_t sum ; uint16_t *buff ; int u16_count ; uint16_t *tmp ; int tmp___0 ; { sum = 0U; buff = (uint16_t *)vha->reset_tmplt.buff; u16_count = (int )((unsigned int )(vha->reset_tmplt.hdr)->size / 2U); goto ldv_43842; ldv_43841: tmp = buff; buff = buff + 1; sum = (uint32_t )*tmp + sum; ldv_43842: tmp___0 = u16_count; u16_count = u16_count - 1; if (tmp___0 > 0) { goto ldv_43841; } else { } goto ldv_43845; ldv_43844: sum = (sum & 65535U) + (sum >> 16); ldv_43845: ; if (sum >> 16 != 0U) { goto ldv_43844; } else { } if (sum != 4294967295U) { return (0); } else { ql_log(0U, vha, 45239, "%s: Reset seq checksum failed\n", "qla8044_reset_seq_checksum_test"); return (258); } } } void qla8044_read_reset_template(struct scsi_qla_host *vha ) { uint8_t *p_buff ; uint32_t addr ; uint32_t tmplt_hdr_def_size ; uint32_t tmplt_hdr_size ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { vha->reset_tmplt.seq_error = 0; tmp = vmalloc(8192UL); vha->reset_tmplt.buff = (uint8_t *)tmp; if ((unsigned long )vha->reset_tmplt.buff == (unsigned long )((uint8_t *)0U)) { ql_log(0U, vha, 45240, "%s: Failed to allocate reset template resources\n", "qla8044_read_reset_template"); goto exit_read_reset_template; } else { } p_buff = vha->reset_tmplt.buff; addr = 5177344U; tmplt_hdr_def_size = 4U; ql_dbg(524288U, vha, 45241, "%s: Read template hdr size %d from Flash\n", "qla8044_read_reset_template", tmplt_hdr_def_size); tmp___0 = qla8044_read_flash_data(vha, p_buff, addr, (int )tmplt_hdr_def_size); if (tmp___0 != 0) { ql_log(0U, vha, 45242, "%s: Failed to read reset template\n", "qla8044_read_reset_template"); goto exit_read_template_error; } else { } vha->reset_tmplt.hdr = (struct qla8044_reset_template_hdr *)vha->reset_tmplt.buff; tmplt_hdr_size = (unsigned int )(vha->reset_tmplt.hdr)->hdr_size / 4U; if (tmplt_hdr_size != tmplt_hdr_def_size || (unsigned int )(vha->reset_tmplt.hdr)->signature != 51966U) { ql_log(0U, vha, 45243, "%s: Template Header size invalid %d tmplt_hdr_def_size %d!!!\n", "qla8044_read_reset_template", tmplt_hdr_size, tmplt_hdr_def_size); goto exit_read_template_error; } else { } addr = (uint32_t )((int )(vha->reset_tmplt.hdr)->hdr_size + 5177344); p_buff = vha->reset_tmplt.buff + (unsigned long )(vha->reset_tmplt.hdr)->hdr_size; tmplt_hdr_def_size = (uint32_t )((unsigned long )((int )(vha->reset_tmplt.hdr)->size - (int )(vha->reset_tmplt.hdr)->hdr_size) / 4UL); ql_dbg(524288U, vha, 45244, "%s: Read rest of the template size %d\n", "qla8044_read_reset_template", (int )(vha->reset_tmplt.hdr)->size); tmp___1 = qla8044_read_flash_data(vha, p_buff, addr, (int )tmplt_hdr_def_size); if (tmp___1 != 0) { ql_log(0U, vha, 45245, "%s: Failed to read reset tempelate\n", "qla8044_read_reset_template"); goto exit_read_template_error; } else { } tmp___2 = qla8044_reset_seq_checksum_test(vha); if (tmp___2 != 0) { ql_log(0U, vha, 45246, "%s: Reset Seq checksum failed!\n", "qla8044_read_reset_template"); goto exit_read_template_error; } else { } ql_dbg(524288U, vha, 45247, "%s: Reset Seq checksum passed! Get stop, start and init seq offsets\n", "qla8044_read_reset_template"); vha->reset_tmplt.init_offset = vha->reset_tmplt.buff + (unsigned long )(vha->reset_tmplt.hdr)->init_seq_offset; vha->reset_tmplt.start_offset = vha->reset_tmplt.buff + (unsigned long )(vha->reset_tmplt.hdr)->start_seq_offset; vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff + (unsigned long )(vha->reset_tmplt.hdr)->hdr_size; qla8044_dump_reset_seq_hdr(vha); goto exit_read_reset_template; exit_read_template_error: vfree((void const *)vha->reset_tmplt.buff); exit_read_reset_template: ; return; } } void qla8044_set_idc_dontreset(struct scsi_qla_host *vha ) { uint32_t idc_ctrl ; struct qla_hw_data *ha ; { ha = vha->hw; idc_ctrl = qla8044_rd_reg(ha, 14224UL); idc_ctrl = idc_ctrl | 1U; ql_dbg(524288U, vha, 45248, "%s: idc_ctrl = %d\n", "qla8044_set_idc_dontreset", idc_ctrl); qla8044_wr_reg(ha, 14224UL, idc_ctrl); return; } } __inline void qla8044_set_rst_ready(struct scsi_qla_host *vha ) { uint32_t drv_state ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp; drv_state = (uint32_t )(1 << (int )ha->portnum) | drv_state; ql_log(2U, vha, 45249, "%s(%ld): drv_state: 0x%08x\n", "qla8044_set_rst_ready", vha->host_no, drv_state); qla8044_wr_direct(vha, 5U, drv_state); return; } } static void qla8044_need_reset_handler(struct scsi_qla_host *vha ) { uint32_t dev_state ; uint32_t drv_state ; uint32_t drv_active ; unsigned long reset_timeout ; unsigned long dev_init_timeout ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { dev_state = 0U; ha = vha->hw; ql_log(0U, vha, 45250, "%s: Performing ISP error recovery\n", "qla8044_need_reset_handler"); if (*((unsigned long *)vha + 19UL) != 0UL) { qla8044_idc_unlock(ha); qla2x00_abort_isp_cleanup(vha); (*((ha->isp_ops)->get_flash_version))(vha, (void *)(vha->req)->ring); (*((ha->isp_ops)->nvram_config))(vha); qla8044_idc_lock(ha); } else { } if (*((unsigned long *)ha + 2UL) == 0UL) { ql_dbg(524288U, vha, 45251, "%s(%ld): reset acknowledged\n", "qla8044_need_reset_handler", vha->host_no); qla8044_set_rst_ready(vha); dev_init_timeout = (unsigned long )(ha->fcoe_reset_timeout * 250U) + (unsigned long )jiffies; ldv_43887: ; if ((long )((unsigned long )jiffies - dev_init_timeout) >= 0L) { ql_log(2U, vha, 45252, "%s: Non Reset owner DEV INIT TIMEOUT!\n", "qla8044_need_reset_handler"); goto ldv_43886; } else { } qla8044_idc_unlock(ha); msleep(1000U); qla8044_idc_lock(ha); tmp = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp; if (dev_state == 4U) { goto ldv_43887; } else { } ldv_43886: ; } else { qla8044_set_rst_ready(vha); reset_timeout = (unsigned long )(ha->fcoe_reset_timeout * 250U) + (unsigned long )jiffies; tmp___0 = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp___0; tmp___1 = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp___1; ql_log(2U, vha, 45253, "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", "qla8044_need_reset_handler", vha->host_no, drv_state, drv_active); goto ldv_43896; ldv_43895: ; if ((long )((unsigned long )jiffies - reset_timeout) >= 0L) { ql_log(2U, vha, 45254, "%s: RESET TIMEOUT!drv_state: 0x%08x, drv_active: 0x%08x\n", (char *)"qla2xxx", drv_state, drv_active); goto ldv_43894; } else { } qla8044_idc_unlock(ha); msleep(1000U); qla8044_idc_lock(ha); tmp___2 = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp___2; tmp___3 = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp___3; ldv_43896: ; if (drv_state != drv_active) { goto ldv_43895; } else { } ldv_43894: ; if (drv_state != drv_active) { ql_log(2U, vha, 45255, "%s(%ld): Reset_owner turning off drv_active of non-acking function 0x%x\n", "qla8044_need_reset_handler", vha->host_no, drv_active ^ drv_state); drv_active = drv_active & drv_state; qla8044_wr_direct(vha, 3U, drv_active); } else { } ha->flags.nic_core_reset_owner = 0U; qla8044_device_bootstrap(vha); } return; } } static void qla8044_set_drv_active(struct scsi_qla_host *vha ) { uint32_t drv_active ; struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp; drv_active = (uint32_t )(1 << (int )ha->portnum) | drv_active; ql_log(2U, vha, 45256, "%s(%ld): drv_active: 0x%08x\n", "qla8044_set_drv_active", vha->host_no, drv_active); qla8044_wr_direct(vha, 3U, drv_active); return; } } static void qla8044_clear_idc_dontreset(struct scsi_qla_host *vha ) { uint32_t idc_ctrl ; struct qla_hw_data *ha ; { ha = vha->hw; idc_ctrl = qla8044_rd_reg(ha, 14224UL); idc_ctrl = idc_ctrl & 4294967294U; ql_log(2U, vha, 45257, "%s: idc_ctrl = %d\n", "qla8044_clear_idc_dontreset", idc_ctrl); qla8044_wr_reg(ha, 14224UL, idc_ctrl); return; } } static int qla8044_set_idc_ver(struct scsi_qla_host *vha ) { int idc_ver ; uint32_t drv_active ; int rval ; struct qla_hw_data *ha ; int tmp ; uint32_t tmp___0 ; { rval = 0; ha = vha->hw; tmp = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp; if ((uint32_t )(1 << (int )ha->portnum) == drv_active) { idc_ver = qla8044_rd_direct(vha, 8U); idc_ver = idc_ver & -256; idc_ver = idc_ver | 1; qla8044_wr_direct(vha, 8U, (uint32_t const )idc_ver); ql_log(2U, vha, 45258, "%s: IDC version updated to %d\n", "qla8044_set_idc_ver", idc_ver); } else { idc_ver = qla8044_rd_direct(vha, 8U); idc_ver = idc_ver & 255; if (idc_ver != 1) { ql_log(2U, vha, 45259, "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n", "qla8044_set_idc_ver", 1, idc_ver); rval = 258; goto exit_set_idc_ver; } else { } } tmp___0 = qla8044_rd_reg(ha, 14232UL); idc_ver = (int )tmp___0; idc_ver = ~ (3 << (int )ha->portnum * 2) & idc_ver; idc_ver = idc_ver; qla8044_wr_reg(ha, 14232UL, (uint32_t )idc_ver); exit_set_idc_ver: ; return (rval); } } static int qla8044_update_idc_reg(struct scsi_qla_host *vha ) { uint32_t drv_active ; int rval ; struct qla_hw_data *ha ; int tmp ; { rval = 0; ha = vha->hw; if (*((unsigned long *)vha + 19UL) != 0UL) { goto exit_update_idc_reg; } else { } qla8044_idc_lock(ha); qla8044_set_drv_active(vha); tmp = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp; if ((uint32_t )(1 << (int )ha->portnum) == drv_active && ql2xdontresethba == 0) { qla8044_clear_idc_dontreset(vha); } else { } rval = qla8044_set_idc_ver(vha); if (rval == 258) { qla8044_clear_drv_active(vha); } else { } qla8044_idc_unlock(ha); exit_update_idc_reg: ; return (rval); } } static void qla8044_need_qsnt_handler(struct scsi_qla_host *vha ) { unsigned long qsnt_timeout ; uint32_t drv_state ; uint32_t drv_active ; uint32_t dev_state ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { ha = vha->hw; if (*((unsigned long *)vha + 19UL) != 0UL) { qla2x00_quiesce_io(vha); } else { return; } qla8044_set_qsnt_ready(vha); qsnt_timeout = (unsigned long )jiffies + 7500UL; tmp = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp; tmp___0 = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp___0; drv_active = drv_active << 1; goto ldv_43940; ldv_43939: ; if ((long )((unsigned long )jiffies - qsnt_timeout) >= 0L) { clear_bit(20L, (unsigned long volatile *)(& vha->dpc_flags)); qla8044_wr_direct(vha, 4U, 3U); qla8044_clear_qsnt_ready(vha); ql_log(2U, vha, 45260, "Timeout waiting for quiescent ack!!!\n"); return; } else { } qla8044_idc_unlock(ha); msleep(1000U); qla8044_idc_lock(ha); tmp___1 = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp___1; tmp___2 = qla8044_rd_direct(vha, 3U); drv_active = (uint32_t )tmp___2; drv_active = drv_active << 1; ldv_43940: ; if (drv_state != drv_active) { goto ldv_43939; } else { } tmp___3 = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp___3; if (dev_state == 5U) { qla8044_wr_direct(vha, 4U, 7U); ql_log(2U, vha, 45261, "%s: HW State: QUIESCENT\n", "qla8044_need_qsnt_handler"); } else { } return; } } int qla8044_device_state_handler(struct scsi_qla_host *vha ) { uint32_t dev_state ; int rval ; unsigned long dev_init_timeout ; struct qla_hw_data *ha ; int tmp ; char *tmp___0 ; char *tmp___1 ; char *tmp___2 ; char *tmp___3 ; int tmp___4 ; char *tmp___5 ; char *tmp___6 ; { rval = 0; ha = vha->hw; rval = qla8044_update_idc_reg(vha); if (rval == 258) { goto exit_error; } else { } tmp = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp; if (dev_state <= 7U) { tmp___0 = qdev_state(dev_state); tmp___1 = tmp___0; } else { tmp___1 = (char *)"Unknown"; } ql_dbg(524288U, vha, 45262, "Device state is 0x%x = %s\n", dev_state, tmp___1); dev_init_timeout = (unsigned long )(ha->fcoe_dev_init_timeout * 250U) + (unsigned long )jiffies; qla8044_idc_lock(ha); ldv_43967: ; if ((long )((unsigned long )jiffies - dev_init_timeout) >= 0L) { if (dev_state <= 7U) { tmp___2 = qdev_state(dev_state); tmp___3 = tmp___2; } else { tmp___3 = (char *)"Unknown"; } ql_log(1U, vha, 45263, "%s: Device Init Failed 0x%x = %s\n", (char *)"qla2xxx", dev_state, tmp___3); qla8044_wr_direct(vha, 4U, 6U); } else { } tmp___4 = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp___4; if (dev_state <= 7U) { tmp___5 = qdev_state(dev_state); tmp___6 = tmp___5; } else { tmp___6 = (char *)"Unknown"; } ql_log(2U, vha, 45264, "Device state is 0x%x = %s\n", dev_state, tmp___6); switch (dev_state) { case 3U: ha->flags.nic_core_reset_owner = 0U; goto exit; case 1U: rval = qla8044_device_bootstrap(vha); goto exit; case 2U: qla8044_idc_unlock(ha); msleep(1000U); qla8044_idc_lock(ha); goto ldv_43961; case 4U: qla8044_need_reset_handler(vha); goto ldv_43961; case 5U: qla8044_need_qsnt_handler(vha); dev_init_timeout = (unsigned long )(ha->fcoe_reset_timeout * 250U) + (unsigned long )jiffies; goto ldv_43961; case 7U: ql_log(2U, vha, 45265, "HW State: QUIESCENT\n"); qla8044_idc_unlock(ha); msleep(1000U); qla8044_idc_lock(ha); dev_init_timeout = (unsigned long )(ha->fcoe_reset_timeout * 250U) + (unsigned long )jiffies; goto ldv_43961; case 6U: ha->flags.nic_core_reset_owner = 0U; qla8044_idc_unlock(ha); qla8xxx_dev_failed_handler(vha); rval = 258; qla8044_idc_lock(ha); goto exit; default: qla8044_idc_unlock(ha); qla8xxx_dev_failed_handler(vha); rval = 258; qla8044_idc_lock(ha); goto exit; } ldv_43961: ; goto ldv_43967; exit: qla8044_idc_unlock(ha); exit_error: ; return (rval); } } static int qla8044_check_temp(struct scsi_qla_host *vha ) { uint32_t temp ; uint32_t temp_state ; uint32_t temp_val ; int status ; int tmp ; { status = 0; tmp = qla8044_rd_direct(vha, 13U); temp = (uint32_t )tmp; temp_state = temp & 65535U; temp_val = temp >> 16; if (temp_state == 3U) { ql_log(1U, vha, 45266, "Device temperature %d degrees C exceeds maximum allowed. Hardware has been shut down\n", temp_val); status = 258; return (status); } else if (temp_state == 2U) { ql_log(1U, vha, 45267, "Device temperature %d degrees C exceeds operating range. Immediate action needed.\n", temp_val); } else { } return (0); } } int qla8044_read_temperature(scsi_qla_host_t *vha ) { uint32_t temp ; int tmp ; { tmp = qla8044_rd_direct(vha, 13U); temp = (uint32_t )tmp; return ((int )(temp >> 16)); } } int qla8044_check_fw_alive(struct scsi_qla_host *vha ) { uint32_t fw_heartbeat_counter ; uint32_t halt_status1 ; uint32_t halt_status2 ; int status ; int tmp ; int tmp___0 ; int tmp___1 ; { status = 0; tmp = qla8044_rd_direct(vha, 2U); fw_heartbeat_counter = (uint32_t )tmp; if (fw_heartbeat_counter == 4294967295U) { ql_dbg(524288U, vha, 45268, "scsi%ld: %s: Device in frozen state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", vha->host_no, "qla8044_check_fw_alive"); return (status); } else { } if ((uint32_t )vha->fw_heartbeat_counter == fw_heartbeat_counter) { vha->seconds_since_last_heartbeat = vha->seconds_since_last_heartbeat + 1; if (vha->seconds_since_last_heartbeat == 2) { vha->seconds_since_last_heartbeat = 0; tmp___0 = qla8044_rd_direct(vha, 0U); halt_status1 = (uint32_t )tmp___0; tmp___1 = qla8044_rd_direct(vha, 1U); halt_status2 = (uint32_t )tmp___1; ql_log(2U, vha, 45269, "scsi(%ld): %s, ISP8044 Dumping hw/fw registers:\n PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n", vha->host_no, "qla8044_check_fw_alive", halt_status1, halt_status2); status = 258; } else { } } else { vha->seconds_since_last_heartbeat = 0; } vha->fw_heartbeat_counter = (int )fw_heartbeat_counter; return (status); } } void qla8044_watchdog(struct scsi_qla_host *vha ) { uint32_t dev_state ; uint32_t halt_status ; int halt_status_unrecoverable ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; { halt_status_unrecoverable = 0; ha = vha->hw; tmp___5 = constant_test_bit(3L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___5 == 0) { tmp___6 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___6 == 0) { tmp___7 = constant_test_bit(10L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___7 == 0) { tmp___8 = constant_test_bit(18L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___8 == 0) { tmp = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp; tmp___4 = qla8044_check_temp(vha); if (tmp___4 != 0) { set_bit(17L, (unsigned long volatile *)(& vha->dpc_flags)); ha->flags.isp82xx_fw_hung = 1U; qla2xxx_wake_dpc(vha); } else if (dev_state == 4U) { tmp___3 = constant_test_bit(2L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___3 == 0) { ql_log(2U, vha, 45270, "%s: HW State: NEED RESET!\n", "qla8044_watchdog"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else { goto _L___0; } } else _L___0: /* CIL Label */ if (dev_state == 5U) { tmp___2 = constant_test_bit(20L, (unsigned long const volatile *)(& vha->dpc_flags)); if (tmp___2 == 0) { ql_log(2U, vha, 45271, "%s: HW State: NEED QUIES detected!\n", "qla8044_watchdog"); set_bit(20L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); } else { goto _L; } } else { _L: /* CIL Label */ tmp___1 = qla8044_check_fw_alive(vha); if (tmp___1 != 0) { tmp___0 = qla8044_rd_direct(vha, 0U); halt_status = (uint32_t )tmp___0; if ((halt_status & 1073741824U) != 0U) { ql_log(0U, vha, 45272, "%s: Firmware error detected device is being reset\n", "qla8044_watchdog"); } else if ((int )halt_status < 0) { halt_status_unrecoverable = 1; } else { } if (halt_status_unrecoverable != 0) { set_bit(17L, (unsigned long volatile *)(& vha->dpc_flags)); } else if (dev_state == 7U) { set_bit(18L, (unsigned long volatile *)(& vha->dpc_flags)); ql_log(2U, vha, 45273, "%s: FW CONTEXT Reset needed!\n", "qla8044_watchdog"); } else { ql_log(2U, vha, 45274, "%s: detect abort needed\n", "qla8044_watchdog"); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla82xx_clear_pending_mbx(vha); } ha->flags.isp82xx_fw_hung = 1U; ql_log(1U, vha, 45322, "Firmware hung.\n"); qla2xxx_wake_dpc(vha); } else { } } } else { } } else { } } else { } } else { } return; } } static int qla8044_minidump_process_control(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr ) { struct qla8044_minidump_entry_crb *crb_entry ; uint32_t read_value ; uint32_t opcode ; uint32_t poll_time ; uint32_t addr ; uint32_t index ; uint32_t crb_addr ; uint32_t rval ; unsigned long wtime ; struct qla8044_minidump_template_hdr *tmplt_hdr ; int i ; struct qla_hw_data *ha ; { rval = 0U; ha = vha->hw; ql_dbg(524288U, vha, 45277, "Entering fn: %s\n", "qla8044_minidump_process_control"); tmplt_hdr = (struct qla8044_minidump_template_hdr *)ha->md_tmplt_hdr; crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr; crb_addr = crb_entry->addr; i = 0; goto ldv_44021; ldv_44020: opcode = (uint32_t )crb_entry->crb_ctrl.opcode; if ((int )opcode & 1) { qla8044_wr_reg_indirect(vha, crb_addr, crb_entry->value_1); opcode = opcode & 4294967294U; } else { } if ((opcode & 2U) != 0U) { qla8044_rd_reg_indirect(vha, crb_addr, & read_value); qla8044_wr_reg_indirect(vha, crb_addr, read_value); opcode = opcode & 4294967293U; } else { } if ((opcode & 4U) != 0U) { qla8044_rd_reg_indirect(vha, crb_addr, & read_value); read_value = crb_entry->value_2 & read_value; opcode = opcode & 4294967291U; if ((opcode & 8U) != 0U) { read_value = crb_entry->value_3 | read_value; opcode = opcode & 4294967287U; } else { } qla8044_wr_reg_indirect(vha, crb_addr, read_value); } else { } if ((opcode & 8U) != 0U) { qla8044_rd_reg_indirect(vha, crb_addr, & read_value); read_value = crb_entry->value_3 | read_value; qla8044_wr_reg_indirect(vha, crb_addr, read_value); opcode = opcode & 4294967287U; } else { } if ((opcode & 16U) != 0U) { poll_time = (uint32_t )crb_entry->crb_strd.poll_timeout; wtime = (unsigned long )poll_time + (unsigned long )jiffies; qla8044_rd_reg_indirect(vha, crb_addr, & read_value); ldv_44019: ; if ((crb_entry->value_2 & read_value) == crb_entry->value_1) { goto ldv_44012; } else if ((long )((unsigned long )jiffies - wtime) >= 0L) { rval = 258U; goto ldv_44012; } else { qla8044_rd_reg_indirect(vha, crb_addr, & read_value); } goto ldv_44019; ldv_44012: opcode = opcode & 4294967279U; } else { } if ((opcode & 32U) != 0U) { if ((unsigned int )crb_entry->crb_strd.state_index_a != 0U) { index = (uint32_t )crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } qla8044_rd_reg_indirect(vha, addr, & read_value); index = (uint32_t )crb_entry->crb_ctrl.state_index_v; tmplt_hdr->saved_state_array[index] = read_value; opcode = opcode & 4294967263U; } else { } if ((opcode & 64U) != 0U) { if ((unsigned int )crb_entry->crb_strd.state_index_a != 0U) { index = (uint32_t )crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } if ((unsigned int )crb_entry->crb_ctrl.state_index_v != 0U) { index = (uint32_t )crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; } else { read_value = crb_entry->value_1; } qla8044_wr_reg_indirect(vha, addr, read_value); opcode = opcode & 4294967231U; } else { } if ((opcode & 128U) != 0U) { index = (uint32_t )crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; read_value = read_value << (int )crb_entry->crb_ctrl.shl; read_value = read_value >> (int )crb_entry->crb_ctrl.shr; if (crb_entry->value_2 != 0U) { read_value = crb_entry->value_2 & read_value; } else { } read_value = crb_entry->value_3 | read_value; read_value = crb_entry->value_1 + read_value; tmplt_hdr->saved_state_array[index] = read_value; opcode = opcode & 4294967167U; } else { } crb_addr = (uint32_t )crb_entry->crb_strd.addr_stride + crb_addr; i = i + 1; ldv_44021: ; if ((uint32_t )i < crb_entry->op_count) { goto ldv_44020; } else { } return ((int )rval); } } static void qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t r_stride ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla8044_minidump_entry_crb *crb_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; ql_dbg(524288U, vha, 45278, "Entering fn: %s\n", "qla8044_minidump_process_rdcrb"); crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr; r_addr = crb_hdr->addr; r_stride = (uint32_t )crb_hdr->crb_strd.addr_stride; loop_cnt = crb_hdr->op_count; i = 0U; goto ldv_44037; ldv_44036: qla8044_rd_reg_indirect(vha, r_addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_addr; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; r_addr = r_addr + r_stride; i = i + 1U; ldv_44037: ; if (i < loop_cnt) { goto ldv_44036; } else { } *d_ptr = data_ptr; return; } } static int qla8044_minidump_process_rdmem(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t r_value ; uint32_t r_data ; uint32_t i ; uint32_t j ; uint32_t loop_cnt ; struct qla8044_minidump_entry_rdmem *m_hdr ; uint32_t *data_ptr ; struct qla_hw_data *ha ; struct ratelimit_state _rs ; int tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; ha = vha->hw; ql_dbg(524288U, vha, 45279, "Entering fn: %s\n", "qla8044_minidump_process_rdmem"); m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr; r_addr = m_hdr->read_addr; loop_cnt = m_hdr->read_data_size / 16U; ql_dbg(524288U, vha, 45296, "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n", "qla8044_minidump_process_rdmem", r_addr, m_hdr->read_data_size); if ((r_addr & 15U) != 0U) { ql_dbg(524288U, vha, 45297, "[%s]: Read addr 0x%x not 16 bytes alligned\n", "qla8044_minidump_process_rdmem", r_addr); return (258); } else { } if ((m_hdr->read_data_size & 15U) != 0U) { ql_dbg(524288U, vha, 45298, "[%s]: Read data[0x%x] not multiple of 16 bytes\n", "qla8044_minidump_process_rdmem", m_hdr->read_data_size); return (258); } else { } ql_dbg(524288U, vha, 45299, "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", "qla8044_minidump_process_rdmem", r_addr, m_hdr->read_data_size, loop_cnt); ldv_write_lock_irqsave(& ha->hw_lock); i = 0U; goto ldv_44064; ldv_44063: qla8044_wr_reg_indirect(vha, 1090519188U, r_addr); r_value = 0U; qla8044_wr_reg_indirect(vha, 1090519192U, r_value); r_value = 2U; qla8044_wr_reg_indirect(vha, 1090519184U, r_value); r_value = 3U; qla8044_wr_reg_indirect(vha, 1090519184U, r_value); j = 0U; goto ldv_44057; ldv_44056: qla8044_rd_reg_indirect(vha, 1090519184U, & r_value); if ((r_value & 8U) == 0U) { goto ldv_44055; } else { } j = j + 1U; ldv_44057: ; if (j <= 999U) { goto ldv_44056; } else { } ldv_44055: ; if (j > 999U) { _rs.lock.raw_lock.ldv_1464.head_tail = 0U; _rs.lock.magic = 3735899821U; _rs.lock.owner_cpu = 4294967295U; _rs.lock.owner = (void *)-1; _rs.lock.dep_map.key = 0; _rs.lock.dep_map.class_cache[0] = 0; _rs.lock.dep_map.class_cache[1] = 0; _rs.lock.dep_map.name = "_rs.lock"; _rs.lock.dep_map.cpu = 0; _rs.lock.dep_map.ip = 0UL; _rs.interval = 1250; _rs.burst = 10; _rs.printed = 0; _rs.missed = 0; _rs.begin = 0UL; tmp = ___ratelimit(& _rs, "qla8044_minidump_process_rdmem"); if (tmp != 0) { printk("\v%s: failed to read through agent\n", "qla8044_minidump_process_rdmem"); } else { } ldv_write_unlock_irqrestore(& ha->hw_lock); return (0); } else { } j = 0U; goto ldv_44061; ldv_44060: qla8044_rd_reg_indirect(vha, (uint32_t )MD_MIU_TEST_AGT_RDDATA___0[j], & r_data); tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_data; j = j + 1U; ldv_44061: ; if (j <= 3U) { goto ldv_44060; } else { } r_addr = r_addr + 16U; i = i + 1U; ldv_44064: ; if (i < loop_cnt) { goto ldv_44063; } else { } ldv_write_unlock_irqrestore(& ha->hw_lock); ql_dbg(524288U, vha, 45300, "Leaving fn: %s datacount: 0x%x\n", "qla8044_minidump_process_rdmem", loop_cnt * 16U); *d_ptr = data_ptr; return (0); } } static uint32_t qla8044_minidump_process_rdrom(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t fl_addr ; uint32_t u32_count ; uint32_t rval ; struct qla8044_minidump_entry_rdrom *rom_hdr ; uint32_t *data_ptr ; int tmp ; { data_ptr = *d_ptr; rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr; fl_addr = rom_hdr->read_addr; u32_count = rom_hdr->read_data_size / 4U; ql_dbg(524288U, vha, 45301, "[%s]: fl_addr: 0x%x, count: 0x%x\n", "qla8044_minidump_process_rdrom", fl_addr, u32_count); tmp = qla8044_lockless_flash_read_u32(vha, fl_addr, (uint8_t *)data_ptr, (int )u32_count); rval = (uint32_t )tmp; if (rval != 0U) { ql_log(0U, vha, 45302, "%s: Flash Read Error,Count=%d\n", "qla8044_minidump_process_rdrom", u32_count); return (258U); } else { data_ptr = data_ptr + (unsigned long )u32_count; *d_ptr = data_ptr; return (0U); } } } static void qla8044_mark_entry_skipped(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , int index ) { { entry_hdr->d_ctrl.driver_flags = (uint8_t )((unsigned int )entry_hdr->d_ctrl.driver_flags | 128U); ql_log(2U, vha, 45303, "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", vha->host_no, index, entry_hdr->entry_type, (int )entry_hdr->d_ctrl.entry_capture_mask); return; } } static int qla8044_minidump_process_l2tag(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t addr ; uint32_t r_addr ; uint32_t c_addr ; uint32_t t_r_addr ; uint32_t i ; uint32_t k ; uint32_t loop_count ; uint32_t t_value ; uint32_t r_cnt ; uint32_t r_value ; unsigned long p_wait ; unsigned long w_time ; unsigned long p_mask ; uint32_t c_value_w ; uint32_t c_value_r ; struct qla8044_minidump_entry_cache *cache_hdr ; int rval ; uint32_t *data_ptr ; uint32_t *tmp ; { rval = 258; data_ptr = *d_ptr; ql_dbg(524288U, vha, 45304, "Entering fn: %s\n", "qla8044_minidump_process_l2tag"); cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = (uint32_t )cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = (uint32_t )cache_hdr->addr_ctrl.init_tag_value; r_cnt = (uint32_t )cache_hdr->read_ctrl.read_addr_cnt; p_wait = (unsigned long )cache_hdr->cache_ctrl.poll_wait; p_mask = (unsigned long )cache_hdr->cache_ctrl.poll_mask; i = 0U; goto ldv_44118; ldv_44117: qla8044_wr_reg_indirect(vha, t_r_addr, t_value); if (c_value_w != 0U) { qla8044_wr_reg_indirect(vha, c_addr, c_value_w); } else { } if (p_mask != 0UL) { w_time = (unsigned long )jiffies + p_wait; ldv_44113: qla8044_rd_reg_indirect(vha, c_addr, & c_value_r); if (((unsigned long )c_value_r & p_mask) == 0UL) { goto ldv_44106; } else if ((long )((unsigned long )jiffies - w_time) >= 0L) { return (rval); } else { } goto ldv_44113; ldv_44106: ; } else { } addr = r_addr; k = 0U; goto ldv_44115; ldv_44114: qla8044_rd_reg_indirect(vha, addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; addr = (uint32_t )cache_hdr->read_ctrl.read_addr_stride + addr; k = k + 1U; ldv_44115: ; if (k < r_cnt) { goto ldv_44114; } else { } t_value = (uint32_t )cache_hdr->addr_ctrl.tag_value_stride + t_value; i = i + 1U; ldv_44118: ; if (i < loop_count) { goto ldv_44117; } else { } *d_ptr = data_ptr; return (0); } } static void qla8044_minidump_process_l1cache(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t addr ; uint32_t r_addr ; uint32_t c_addr ; uint32_t t_r_addr ; uint32_t i ; uint32_t k ; uint32_t loop_count ; uint32_t t_value ; uint32_t r_cnt ; uint32_t r_value ; uint32_t c_value_w ; struct qla8044_minidump_entry_cache *cache_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; { data_ptr = *d_ptr; cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = (uint32_t )cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = (uint32_t )cache_hdr->addr_ctrl.init_tag_value; r_cnt = (uint32_t )cache_hdr->read_ctrl.read_addr_cnt; i = 0U; goto ldv_44142; ldv_44141: qla8044_wr_reg_indirect(vha, t_r_addr, t_value); qla8044_wr_reg_indirect(vha, c_addr, c_value_w); addr = r_addr; k = 0U; goto ldv_44139; ldv_44138: qla8044_rd_reg_indirect(vha, addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; addr = (uint32_t )cache_hdr->read_ctrl.read_addr_stride + addr; k = k + 1U; ldv_44139: ; if (k < r_cnt) { goto ldv_44138; } else { } t_value = (uint32_t )cache_hdr->addr_ctrl.tag_value_stride + t_value; i = i + 1U; ldv_44142: ; if (i < loop_count) { goto ldv_44141; } else { } *d_ptr = data_ptr; return; } } static void qla8044_minidump_process_rdocm(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t r_stride ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla8044_minidump_entry_rdocm *ocm_hdr ; uint32_t *data_ptr ; struct qla_hw_data *ha ; uint32_t *tmp ; { data_ptr = *d_ptr; ha = vha->hw; ql_dbg(524288U, vha, 45305, "Entering fn: %s\n", "qla8044_minidump_process_rdocm"); ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr; r_addr = ocm_hdr->read_addr; r_stride = ocm_hdr->read_addr_stride; loop_cnt = ocm_hdr->op_count; ql_dbg(524288U, vha, 45306, "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n", "qla8044_minidump_process_rdocm", r_addr, r_stride, loop_cnt); i = 0U; goto ldv_44159; ldv_44158: r_value = readl((void const volatile *)((unsigned long )r_addr + ha->nx_pcibase)); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; r_addr = r_addr + r_stride; i = i + 1U; ldv_44159: ; if (i < loop_cnt) { goto ldv_44158; } else { } ql_dbg(524288U, vha, 45307, "Leaving fn: %s datacount: 0x%lx\n", "qla8044_minidump_process_rdocm", (unsigned long )loop_cnt * 4UL); *d_ptr = data_ptr; return; } } static void qla8044_minidump_process_rdmux(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t s_stride ; uint32_t s_addr ; uint32_t s_value ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla8044_minidump_entry_mux *mux_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; ql_dbg(524288U, vha, 45308, "Entering fn: %s\n", "qla8044_minidump_process_rdmux"); mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr; r_addr = mux_hdr->read_addr; s_addr = mux_hdr->select_addr; s_stride = mux_hdr->select_value_stride; s_value = mux_hdr->select_value; loop_cnt = mux_hdr->op_count; i = 0U; goto ldv_44177; ldv_44176: qla8044_wr_reg_indirect(vha, s_addr, s_value); qla8044_rd_reg_indirect(vha, r_addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = s_value; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; s_value = s_value + s_stride; i = i + 1U; ldv_44177: ; if (i < loop_cnt) { goto ldv_44176; } else { } *d_ptr = data_ptr; return; } } static void qla8044_minidump_process_queue(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t s_addr ; uint32_t r_addr ; uint32_t r_stride ; uint32_t r_value ; uint32_t r_cnt ; uint32_t qid ; uint32_t i ; uint32_t k ; uint32_t loop_cnt ; struct qla8044_minidump_entry_queue *q_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; { qid = 0U; data_ptr = *d_ptr; ql_dbg(524288U, vha, 45309, "Entering fn: %s\n", "qla8044_minidump_process_queue"); q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr; s_addr = q_hdr->select_addr; r_cnt = (uint32_t )q_hdr->rd_strd.read_addr_cnt; r_stride = (uint32_t )q_hdr->rd_strd.read_addr_stride; loop_cnt = q_hdr->op_count; i = 0U; goto ldv_44200; ldv_44199: qla8044_wr_reg_indirect(vha, s_addr, qid); r_addr = q_hdr->read_addr; k = 0U; goto ldv_44197; ldv_44196: qla8044_rd_reg_indirect(vha, r_addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; r_addr = r_addr + r_stride; k = k + 1U; ldv_44197: ; if (k < r_cnt) { goto ldv_44196; } else { } qid = (uint32_t )q_hdr->q_strd.queue_id_stride + qid; i = i + 1U; ldv_44200: ; if (i < loop_cnt) { goto ldv_44199; } else { } *d_ptr = data_ptr; return; } } static uint32_t qla8044_minidump_process_pollrd(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t s_addr ; uint32_t s_value ; uint32_t r_value ; uint32_t poll_wait ; uint32_t poll_mask ; uint16_t s_stride ; uint16_t i ; struct qla8044_minidump_entry_pollrd *pollrd_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; pollrd_hdr = (struct qla8044_minidump_entry_pollrd *)entry_hdr; s_addr = pollrd_hdr->select_addr; r_addr = pollrd_hdr->read_addr; s_value = pollrd_hdr->select_value; s_stride = pollrd_hdr->select_value_stride; poll_wait = pollrd_hdr->poll_wait; poll_mask = pollrd_hdr->poll_mask; i = 0U; goto ldv_44222; ldv_44221: qla8044_wr_reg_indirect(vha, s_addr, s_value); poll_wait = pollrd_hdr->poll_wait; ldv_44220: qla8044_rd_reg_indirect(vha, s_addr, & r_value); if ((r_value & poll_mask) != 0U) { goto ldv_44217; } else { usleep_range(1000UL, 1100UL); poll_wait = poll_wait - 1U; if (poll_wait == 0U) { ql_log(0U, vha, 45310, "%s: TIMEOUT\n", "qla8044_minidump_process_pollrd"); goto error; } else { } } goto ldv_44220; ldv_44217: qla8044_rd_reg_indirect(vha, r_addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = s_value; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; s_value = (uint32_t )s_stride + s_value; i = (uint16_t )((int )i + 1); ldv_44222: ; if ((int )pollrd_hdr->op_count > (int )i) { goto ldv_44221; } else { } *d_ptr = data_ptr; return (0U); error: ; return (258U); } } static void qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t sel_val1 ; uint32_t sel_val2 ; uint32_t t_sel_val ; uint32_t data ; uint32_t i ; uint32_t sel_addr1 ; uint32_t sel_addr2 ; uint32_t sel_val_mask ; uint32_t read_addr ; struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; { data_ptr = *d_ptr; rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *)entry_hdr; sel_val1 = rdmux2_hdr->select_value_1; sel_val2 = rdmux2_hdr->select_value_2; sel_addr1 = rdmux2_hdr->select_addr_1; sel_addr2 = rdmux2_hdr->select_addr_2; sel_val_mask = rdmux2_hdr->select_value_mask; read_addr = rdmux2_hdr->read_addr; i = 0U; goto ldv_44241; ldv_44240: qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1); t_sel_val = sel_val1 & sel_val_mask; tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = t_sel_val; qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); qla8044_rd_reg_indirect(vha, read_addr, & data); tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = data; qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2); t_sel_val = sel_val2 & sel_val_mask; tmp___1 = data_ptr; data_ptr = data_ptr + 1; *tmp___1 = t_sel_val; qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); qla8044_rd_reg_indirect(vha, read_addr, & data); tmp___2 = data_ptr; data_ptr = data_ptr + 1; *tmp___2 = data; sel_val1 = (uint32_t )rdmux2_hdr->select_value_stride + sel_val1; sel_val2 = (uint32_t )rdmux2_hdr->select_value_stride + sel_val2; i = i + 1U; ldv_44241: ; if (rdmux2_hdr->op_count > i) { goto ldv_44240; } else { } *d_ptr = data_ptr; return; } } static uint32_t qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t poll_wait ; uint32_t poll_mask ; uint32_t r_value ; uint32_t data ; uint32_t addr_1 ; uint32_t addr_2 ; uint32_t value_1 ; uint32_t value_2 ; struct qla8044_minidump_entry_pollrdmwr *poll_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *)entry_hdr; addr_1 = poll_hdr->addr_1; addr_2 = poll_hdr->addr_2; value_1 = poll_hdr->value_1; value_2 = poll_hdr->value_2; poll_mask = poll_hdr->poll_mask; qla8044_wr_reg_indirect(vha, addr_1, value_1); poll_wait = poll_hdr->poll_wait; ldv_44261: qla8044_rd_reg_indirect(vha, addr_1, & r_value); if ((r_value & poll_mask) != 0U) { goto ldv_44258; } else { usleep_range(1000UL, 1100UL); poll_wait = poll_wait - 1U; if (poll_wait == 0U) { ql_log(0U, vha, 45311, "%s: TIMEOUT\n", "qla8044_minidump_process_pollrdmwr"); goto error; } else { } } goto ldv_44261; ldv_44258: qla8044_rd_reg_indirect(vha, addr_2, & data); data = poll_hdr->modify_mask & data; qla8044_wr_reg_indirect(vha, addr_2, data); qla8044_wr_reg_indirect(vha, addr_1, value_2); poll_wait = poll_hdr->poll_wait; ldv_44263: qla8044_rd_reg_indirect(vha, addr_1, & r_value); if ((r_value & poll_mask) != 0U) { goto ldv_44262; } else { usleep_range(1000UL, 1100UL); poll_wait = poll_wait - 1U; if (poll_wait == 0U) { ql_log(0U, vha, 45312, "%s: TIMEOUT2\n", "qla8044_minidump_process_pollrdmwr"); goto error; } else { } } goto ldv_44263; ldv_44262: tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = addr_2; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = data; *d_ptr = data_ptr; return (0U); error: ; return (258U); } } static int qla8044_check_dma_engine_state(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; int rval ; uint32_t dma_eng_num ; uint32_t cmd_sts_and_cntrl ; uint64_t dma_base_addr ; struct qla8044_minidump_template_hdr *tmplt_hdr ; { ha = vha->hw; rval = 0; dma_eng_num = 0U; cmd_sts_and_cntrl = 0U; dma_base_addr = 0ULL; tmplt_hdr = (struct qla8044_minidump_template_hdr *)0; tmplt_hdr = (struct qla8044_minidump_template_hdr *)ha->md_tmplt_hdr; dma_eng_num = tmplt_hdr->saved_state_array[8]; dma_base_addr = (uint64_t )((dma_eng_num + 30514U) * 65536U); rval = qla8044_rd_reg_indirect(vha, (uint32_t )dma_base_addr + 8U, & cmd_sts_and_cntrl); if (rval != 0) { return (258); } else { } if ((int )cmd_sts_and_cntrl < 0) { return (0); } else { } return (258); } } static int qla8044_start_pex_dma(struct scsi_qla_host *vha , struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr ) { struct qla_hw_data *ha ; int rval ; int wait ; uint32_t dma_eng_num ; uint32_t cmd_sts_and_cntrl ; uint64_t dma_base_addr ; struct qla8044_minidump_template_hdr *tmplt_hdr ; { ha = vha->hw; rval = 0; wait = 0; dma_eng_num = 0U; cmd_sts_and_cntrl = 0U; dma_base_addr = 0ULL; tmplt_hdr = (struct qla8044_minidump_template_hdr *)0; tmplt_hdr = (struct qla8044_minidump_template_hdr *)ha->md_tmplt_hdr; dma_eng_num = tmplt_hdr->saved_state_array[8]; dma_base_addr = (uint64_t )((dma_eng_num + 30514U) * 65536U); rval = qla8044_wr_reg_indirect(vha, (uint32_t )dma_base_addr, m_hdr->desc_card_addr); if (rval != 0) { goto error_exit; } else { } rval = qla8044_wr_reg_indirect(vha, (uint32_t )dma_base_addr + 4U, 0U); if (rval != 0) { goto error_exit; } else { } rval = qla8044_wr_reg_indirect(vha, (uint32_t )dma_base_addr + 8U, m_hdr->start_dma_cmd); if (rval != 0) { goto error_exit; } else { } wait = 0; goto ldv_44287; ldv_44286: rval = qla8044_rd_reg_indirect(vha, (uint32_t )dma_base_addr + 8U, & cmd_sts_and_cntrl); if (rval != 0) { goto error_exit; } else { } if ((cmd_sts_and_cntrl & 2U) == 0U) { goto ldv_44285; } else { } __const_udelay(42950UL); wait = wait + 1; ldv_44287: ; if (wait <= 9999) { goto ldv_44286; } else { } ldv_44285: ; if (wait > 9999) { rval = 258; goto error_exit; } else { } error_exit: ; return (rval); } } static int qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha , struct qla8044_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { struct qla_hw_data *ha ; int rval ; struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr ; uint32_t chunk_size ; uint32_t read_size ; uint8_t *data_ptr ; void *rdmem_buffer ; dma_addr_t rdmem_dma ; struct qla8044_pex_dma_descriptor dma_desc ; size_t __len ; void *__ret ; { ha = vha->hw; rval = 0; m_hdr = (struct qla8044_minidump_entry_rdmem_pex_dma *)0; data_ptr = (uint8_t *)*d_ptr; rdmem_buffer = (void *)0; rval = qla8044_check_dma_engine_state(vha); if (rval != 0) { ql_dbg(524288U, vha, 45383, "DMA engine not available. Fallback to rdmem-read.\n"); return (258); } else { } m_hdr = (struct qla8044_minidump_entry_rdmem_pex_dma *)entry_hdr; rdmem_buffer = dma_alloc_attrs(& (ha->pdev)->dev, 16384UL, & rdmem_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )rdmem_buffer == (unsigned long )((void *)0)) { ql_dbg(524288U, vha, 45384, "Unable to allocate rdmem dma buffer\n"); return (258); } else { } dma_desc.cmd.dma_desc_cmd = (unsigned int )m_hdr->dma_desc_cmd & 65295U; dma_desc.cmd.dma_desc_cmd = (unsigned int )dma_desc.cmd.dma_desc_cmd | (((unsigned int )((uint16_t )(ha->pdev)->devfn) & 7U) << 4U); dma_desc.dma_bus_addr = rdmem_dma; chunk_size = 16384U; dma_desc.cmd.read_data_size = chunk_size; read_size = 0U; goto ldv_44308; ldv_44307: ; if (m_hdr->read_data_size - read_size <= 16383U) { chunk_size = m_hdr->read_data_size - read_size; dma_desc.cmd.read_data_size = chunk_size; } else { } dma_desc.src_addr = (uint64_t )(m_hdr->read_addr + read_size); rval = qla8044_ms_mem_write_128b(vha, (uint64_t )m_hdr->desc_card_addr, (uint32_t *)(& dma_desc), 3U); if (rval != 0) { ql_log(1U, vha, 45386, "%s: Error writing rdmem-dma-init to MS !!!\n", "qla8044_minidump_pex_dma_read"); goto error_exit; } else { } ql_dbg(524288U, vha, 45387, "%s: Dma-descriptor: Instruct for rdmem dma (chunk_size 0x%x).\n", "qla8044_minidump_pex_dma_read", chunk_size); rval = qla8044_start_pex_dma(vha, m_hdr); if (rval != 0) { goto error_exit; } else { } __len = (size_t )chunk_size; __ret = __builtin_memcpy((void *)data_ptr, (void const *)rdmem_buffer, __len); data_ptr = data_ptr + (unsigned long )chunk_size; read_size = read_size + chunk_size; ldv_44308: ; if (m_hdr->read_data_size > read_size) { goto ldv_44307; } else { } *d_ptr = (uint32_t *)data_ptr; error_exit: ; if ((unsigned long )rdmem_buffer != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 16384UL, rdmem_buffer, rdmem_dma, (struct dma_attrs *)0); } else { } return (rval); } } int qla8044_collect_md_data(struct scsi_qla_host *vha ) { int num_entry_hdr ; struct qla8044_minidump_entry_hdr *entry_hdr ; struct qla8044_minidump_template_hdr *tmplt_hdr ; uint32_t *data_ptr ; uint32_t data_collected ; uint32_t f_capture_mask ; int i ; int rval ; uint64_t now ; uint32_t timestamp ; uint32_t idc_control ; struct qla_hw_data *ha ; int tmp ; unsigned int tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; { num_entry_hdr = 0; data_collected = 0U; rval = 258; ha = vha->hw; if ((unsigned long )ha->md_dump == (unsigned long )((void *)0)) { ql_log(2U, vha, 45313, "%s(%ld) No buffer to dump\n", "qla8044_collect_md_data", vha->host_no); return (rval); } else { } if (ha->fw_dumped != 0) { ql_log(1U, vha, 45325, "Firmware has been previously dumped (%p) -- ignoring request.\n", ha->fw_dump); goto md_failed; } else { } ha->fw_dumped = 0; if ((unsigned long )ha->md_tmplt_hdr == (unsigned long )((void *)0) || (unsigned long )ha->md_dump == (unsigned long )((void *)0)) { ql_log(1U, vha, 45326, "Memory not allocated for minidump capture\n"); goto md_failed; } else { } qla8044_idc_lock(ha); idc_control = qla8044_rd_reg(ha, 14224UL); if ((idc_control & 2U) != 0U) { ql_log(1U, vha, 45330, "Forced reset from application, ignore minidump capture\n"); qla8044_wr_reg(ha, 14224UL, idc_control & 4294967293U); qla8044_idc_unlock(ha); goto md_failed; } else { } qla8044_idc_unlock(ha); tmp = qla82xx_validate_template_chksum(vha); if (tmp != 0) { ql_log(2U, vha, 45321, "Template checksum validation error\n"); goto md_failed; } else { } tmplt_hdr = (struct qla8044_minidump_template_hdr *)ha->md_tmplt_hdr; data_ptr = (uint32_t *)ha->md_dump; num_entry_hdr = (int )tmplt_hdr->num_of_entries; ql_dbg(524288U, vha, 45338, "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); f_capture_mask = tmplt_hdr->capture_debug_level & 255U; if ((f_capture_mask & 3U) != 3U) { ql_log(1U, vha, 45327, "Minimum required capture mask[0x%x] level not set\n", f_capture_mask); } else { } tmplt_hdr->driver_capture_mask = (uint32_t )ql2xmdcapmask; ql_log(2U, vha, 45314, "[%s]: starting data ptr: %p\n", "qla8044_collect_md_data", data_ptr); ql_log(2U, vha, 45323, "[%s]: no of entry headers in Template: 0x%x\n", "qla8044_collect_md_data", num_entry_hdr); ql_log(2U, vha, 45324, "[%s]: Total_data_size 0x%x, %d obtained\n", "qla8044_collect_md_data", ha->md_dump_size, ha->md_dump_size); now = get_jiffies_64(); tmp___0 = jiffies_to_msecs((unsigned long const )now); timestamp = tmp___0 / 1000U; tmplt_hdr->driver_timestamp = timestamp; entry_hdr = (struct qla8044_minidump_entry_hdr *)ha->md_tmplt_hdr + (unsigned long )tmplt_hdr->first_entry_offset; tmplt_hdr->saved_state_array[3] = tmplt_hdr->ocm_window_reg[(int )ha->portnum]; i = 0; goto ldv_44352; ldv_44351: ; if (ha->md_dump_size < data_collected) { ql_log(2U, vha, 45315, "Data collected: [0x%x], Total Dump size: [0x%x]\n", data_collected, ha->md_dump_size); return (rval); } else { } if (((int )entry_hdr->d_ctrl.entry_capture_mask & ql2xmdcapmask) == 0) { entry_hdr->d_ctrl.driver_flags = (uint8_t )((unsigned int )entry_hdr->d_ctrl.driver_flags | 128U); goto skip_nxt_entry; } else { } ql_dbg(524288U, vha, 45316, "Data collected: [0x%x], Dump size left:[0x%x]\n", data_collected, ha->md_dump_size - data_collected); switch (entry_hdr->entry_type) { case 255U: qla8044_mark_entry_skipped(vha, entry_hdr, i); goto ldv_44329; case 98U: rval = qla8044_minidump_process_control(vha, entry_hdr); if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } else { } goto ldv_44329; case 1U: qla8044_minidump_process_rdcrb(vha, entry_hdr, & data_ptr); goto ldv_44329; case 72U: rval = qla8044_minidump_pex_dma_read(vha, entry_hdr, & data_ptr); if (rval != 0) { rval = qla8044_minidump_process_rdmem(vha, entry_hdr, & data_ptr); if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } else { } } else { } goto ldv_44329; case 4U: ; case 71U: tmp___1 = qla8044_minidump_process_rdrom(vha, entry_hdr, & data_ptr); rval = (int )tmp___1; if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); } else { } goto ldv_44329; case 21U: ; case 22U: ; case 23U: ; case 24U: rval = qla8044_minidump_process_l2tag(vha, entry_hdr, & data_ptr); if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } else { } goto ldv_44329; case 8U: ; case 9U: ; case 11U: ; case 12U: qla8044_minidump_process_l1cache(vha, entry_hdr, & data_ptr); goto ldv_44329; case 6U: qla8044_minidump_process_rdocm(vha, entry_hdr, & data_ptr); goto ldv_44329; case 2U: qla8044_minidump_process_rdmux(vha, entry_hdr, & data_ptr); goto ldv_44329; case 3U: qla8044_minidump_process_queue(vha, entry_hdr, & data_ptr); goto ldv_44329; case 35U: tmp___2 = qla8044_minidump_process_pollrd(vha, entry_hdr, & data_ptr); rval = (int )tmp___2; if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); } else { } goto ldv_44329; case 36U: qla8044_minidump_process_rdmux2(vha, entry_hdr, & data_ptr); goto ldv_44329; case 37U: tmp___3 = qla8044_minidump_process_pollrdmwr(vha, entry_hdr, & data_ptr); rval = (int )tmp___3; if (rval != 0) { qla8044_mark_entry_skipped(vha, entry_hdr, i); } else { } goto ldv_44329; case 0U: ; default: qla8044_mark_entry_skipped(vha, entry_hdr, i); goto ldv_44329; } ldv_44329: data_collected = (uint32_t )((long )data_ptr) - (uint32_t )((long )ha->md_dump); skip_nxt_entry: entry_hdr = entry_hdr + (unsigned long )entry_hdr->entry_size; i = i + 1; ldv_44352: ; if (i < num_entry_hdr) { goto ldv_44351; } else { } if (ha->md_dump_size != data_collected) { ql_log(2U, vha, 45317, "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n", data_collected, ha->md_dump_size); goto md_failed; } else { } ql_log(2U, vha, 45328, "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, 0U); ql_log(2U, vha, 45318, "Leaving fn: %s Last entry: 0x%x\n", "qla8044_collect_md_data", i); md_failed: ; return (rval); } } void qla8044_get_minidump(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; int tmp ; { ha = vha->hw; tmp = qla8044_collect_md_data(vha); if (tmp == 0) { ha->fw_dumped = 1; } else { ql_log(0U, vha, 45275, "%s: Unable to collect minidump\n", "qla8044_get_minidump"); } return; } } static int qla8044_poll_flash_status_reg(struct scsi_qla_host *vha ) { uint32_t flash_status ; int retries ; int ret_val ; int tmp ; { retries = 2000; ret_val = 0; goto ldv_44368; ldv_44367: ret_val = qla8044_rd_reg_indirect(vha, 1108344836U, & flash_status); if (ret_val != 0) { ql_log(1U, vha, 45372, "%s: Failed to read FLASH_STATUS reg.\n", "qla8044_poll_flash_status_reg"); goto ldv_44366; } else { } if ((flash_status & 6U) == 6U) { goto ldv_44366; } else { } msleep(1U); ldv_44368: tmp = retries; retries = retries - 1; if (tmp != 0) { goto ldv_44367; } else { } ldv_44366: ; if (retries == 0) { ret_val = 258; } else { } return (ret_val); } } static int qla8044_write_flash_status_reg(struct scsi_qla_host *vha , uint32_t data ) { int ret_val ; uint32_t cmd ; { ret_val = 0; cmd = (vha->hw)->fdt_wrt_sts_reg_cmd; ret_val = qla8044_wr_reg_indirect(vha, 1108410376U, cmd | 16580864U); if (ret_val != 0) { ql_log(1U, vha, 45349, "%s: Failed to write to FLASH_ADDR.\n", "qla8044_write_flash_status_reg"); goto exit_func; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410380U, data); if (ret_val != 0) { ql_log(1U, vha, 45350, "%s: Failed to write to FLASH_WRDATA.\n", "qla8044_write_flash_status_reg"); goto exit_func; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410372U, 5U); if (ret_val != 0) { ql_log(1U, vha, 45351, "%s: Failed to write to FLASH_CONTROL.\n", "qla8044_write_flash_status_reg"); goto exit_func; } else { } ret_val = qla8044_poll_flash_status_reg(vha); if (ret_val != 0) { ql_log(1U, vha, 45352, "%s: Error polling flash status reg.\n", "qla8044_write_flash_status_reg"); } else { } exit_func: ; return (ret_val); } } static int qla8044_unprotect_flash(scsi_qla_host_t *vha ) { int ret_val ; struct qla_hw_data *ha ; { ha = vha->hw; ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable); if (ret_val != 0) { ql_log(1U, vha, 45369, "%s: Write flash status failed.\n", "qla8044_unprotect_flash"); } else { } return (ret_val); } } static int qla8044_protect_flash(scsi_qla_host_t *vha ) { int ret_val ; struct qla_hw_data *ha ; { ha = vha->hw; ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable); if (ret_val != 0) { ql_log(1U, vha, 45371, "%s: Write flash status failed.\n", "qla8044_protect_flash"); } else { } return (ret_val); } } static int qla8044_erase_flash_sector(struct scsi_qla_host *vha , uint32_t sector_start_addr ) { uint32_t reversed_addr ; int ret_val ; { ret_val = 0; ret_val = qla8044_poll_flash_status_reg(vha); if (ret_val != 0) { ql_log(1U, vha, 45358, "%s: Poll flash status after erase failed..\n", "qla8044_erase_flash_sector"); } else { } reversed_addr = (((sector_start_addr & 255U) << 16) | (sector_start_addr & 65280U)) | ((sector_start_addr & 16711680U) >> 16); ret_val = qla8044_wr_reg_indirect(vha, 1108410380U, reversed_addr); if (ret_val != 0) { ql_log(1U, vha, 45359, "%s: Failed to write to FLASH_WRDATA.\n", "qla8044_erase_flash_sector"); } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410376U, (vha->hw)->fdt_erase_cmd | 16581376U); if (ret_val != 0) { ql_log(1U, vha, 45360, "%s: Failed to write to FLASH_ADDR.\n", "qla8044_erase_flash_sector"); } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410372U, 61U); if (ret_val != 0) { ql_log(1U, vha, 45361, "%s: Failed write to FLASH_CONTROL.\n", "qla8044_erase_flash_sector"); } else { } ret_val = qla8044_poll_flash_status_reg(vha); if (ret_val != 0) { ql_log(1U, vha, 45362, "%s: Poll flash status failed.\n", "qla8044_erase_flash_sector"); } else { } return (ret_val); } } static int qla8044_flash_write_u32(struct scsi_qla_host *vha , uint32_t addr , uint32_t *p_data ) { int ret_val ; { ret_val = 0; ret_val = qla8044_wr_reg_indirect(vha, 1108410376U, (addr >> 2) | 8388608U); if (ret_val != 0) { ql_log(1U, vha, 45364, "%s: Failed write to FLASH_ADDR.\n", "qla8044_flash_write_u32"); goto exit_func; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410380U, *p_data); if (ret_val != 0) { ql_log(1U, vha, 45365, "%s: Failed write to FLASH_WRDATA.\n", "qla8044_flash_write_u32"); goto exit_func; } else { } ret_val = qla8044_wr_reg_indirect(vha, 1108410372U, 61U); if (ret_val != 0) { ql_log(1U, vha, 45366, "%s: Failed write to FLASH_CONTROL.\n", "qla8044_flash_write_u32"); goto exit_func; } else { } ret_val = qla8044_poll_flash_status_reg(vha); if (ret_val != 0) { ql_log(1U, vha, 45367, "%s: Poll flash status failed.\n", "qla8044_flash_write_u32"); } else { } exit_func: ; return (ret_val); } } static int qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha , uint32_t *dwptr , uint32_t faddr , uint32_t dwords ) { int ret ; uint32_t spi_val ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; { ret = 258; if (dwords <= 1U || dwords > 64U) { ql_dbg(8388608U, vha, 45347, "Got unsupported dwords = 0x%x.\n", dwords); return (258); } else { } qla8044_rd_reg_indirect(vha, 671670292U, & spi_val); qla8044_wr_reg_indirect(vha, 671670292U, spi_val | 4U); qla8044_wr_reg_indirect(vha, 1108410376U, 8388608U); tmp = dwptr; dwptr = dwptr + 1; ret = qla8044_wr_reg_indirect(vha, 1108410380U, *tmp); qla8044_wr_reg_indirect(vha, 1108410372U, 67U); ret = qla8044_poll_flash_status_reg(vha); if (ret != 0) { ql_log(1U, vha, 45348, "%s: Failed.\n", "qla8044_write_flash_buffer_mode"); goto exit_func; } else { } dwords = dwords - 1U; qla8044_wr_reg_indirect(vha, 1108410376U, 8388609U); goto ldv_44415; ldv_44414: tmp___0 = dwptr; dwptr = dwptr + 1; qla8044_wr_reg_indirect(vha, 1108410380U, *tmp___0); qla8044_wr_reg_indirect(vha, 1108410372U, 127U); ret = qla8044_poll_flash_status_reg(vha); if (ret != 0) { ql_log(1U, vha, 45353, "%s: Failed.\n", "qla8044_write_flash_buffer_mode"); goto exit_func; } else { } dwords = dwords - 1U; ldv_44415: ; if (dwords != 1U) { goto ldv_44414; } else { } qla8044_wr_reg_indirect(vha, 1108410376U, (faddr >> 2) | 8388608U); tmp___1 = dwptr; dwptr = dwptr + 1; qla8044_wr_reg_indirect(vha, 1108410380U, *tmp___1); qla8044_wr_reg_indirect(vha, 1108410372U, 125U); ret = qla8044_poll_flash_status_reg(vha); if (ret != 0) { ql_log(1U, vha, 45354, "%s: Failed.\n", "qla8044_write_flash_buffer_mode"); goto exit_func; } else { } qla8044_rd_reg_indirect(vha, 671670288U, & spi_val); if ((spi_val & 4U) != 0U) { ql_log(1U, vha, 45355, "%s: Failed.\n", "qla8044_write_flash_buffer_mode"); spi_val = 0U; qla8044_rd_reg_indirect(vha, 671670292U, & spi_val); qla8044_wr_reg_indirect(vha, 671670292U, spi_val | 4U); } else { } exit_func: ; return (ret); } } static int qla8044_write_flash_dword_mode(scsi_qla_host_t *vha , uint32_t *dwptr , uint32_t faddr , uint32_t dwords ) { int ret ; uint32_t liter ; { ret = 258; liter = 0U; goto ldv_44428; ldv_44427: ret = qla8044_flash_write_u32(vha, faddr, dwptr); if (ret != 0) { ql_dbg(524288U, vha, 45377, "%s: flash address=%x data=%x.\n", "qla8044_write_flash_dword_mode", faddr, *dwptr); goto ldv_44426; } else { } liter = liter + 1U; faddr = faddr + 4U; dwptr = dwptr + 1; ldv_44428: ; if (liter < dwords) { goto ldv_44427; } else { } ldv_44426: ; return (ret); } } int qla8044_write_optrom_data(struct scsi_qla_host *vha , uint8_t *buf , uint32_t offset , uint32_t length ) { int rval ; int i ; int burst_iter_count ; int dword_count ; int erase_sec_count ; uint32_t erase_offset ; uint8_t *p_cache ; uint8_t *p_src ; void *tmp ; size_t __len ; void *__ret ; { rval = 258; erase_offset = offset; tmp = kcalloc((size_t )length, 1UL, 208U); p_cache = (uint8_t *)tmp; if ((unsigned long )p_cache == (unsigned long )((uint8_t *)0U)) { return (258); } else { } __len = (size_t )length; __ret = __builtin_memcpy((void *)p_cache, (void const *)buf, __len); p_src = p_cache; dword_count = (int )(length / 4U); burst_iter_count = dword_count / 64; erase_sec_count = (int )(length / 65536U); scsi_block_requests(vha->host); qla8044_flash_lock(vha); qla8044_unprotect_flash(vha); i = 0; goto ldv_44448; ldv_44447: rval = qla8044_erase_flash_sector(vha, erase_offset); ql_dbg(8388608U, vha, 45368, "Done erase of sector=0x%x.\n", erase_offset); if (rval != 0) { ql_log(1U, vha, 45345, "Failed to erase the sector having address: 0x%x.\n", erase_offset); goto out; } else { } erase_offset = erase_offset + 65536U; i = i + 1; ldv_44448: ; if (i < erase_sec_count) { goto ldv_44447; } else { } ql_dbg(8388608U, vha, 45375, "Got write for addr = 0x%x length=0x%x.\n", offset, length); i = 0; goto ldv_44451; ldv_44450: rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src, offset, 64U); if (rval != 0) { ql_log(1U, vha, 45346, "Failed to write flash in buffer mode, Reverting to slow-write.\n"); rval = qla8044_write_flash_dword_mode(vha, (uint32_t *)p_src, offset, 64U); } else { } p_src = p_src + 256UL; offset = offset + 256U; i = i + 1; ldv_44451: ; if (i < burst_iter_count) { goto ldv_44450; } else { } ql_dbg(8388608U, vha, 45363, "Done writing.\n"); out: qla8044_protect_flash(vha); qla8044_flash_unlock(vha); scsi_unblock_requests(vha->host); kfree((void const *)p_cache); return (rval); } } irqreturn_t qla8044_intr_handler(int irq , void *dev_id ) { scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct rsp_que *rsp ; struct device_reg_82xx *reg ; int status ; unsigned long flags ; unsigned long iter ; uint32_t stat ; uint16_t mb[4U] ; uint32_t leg_int_ptr ; uint32_t pf_bit ; void *tmp ; int tmp___0 ; long tmp___1 ; raw_spinlock_t *tmp___2 ; unsigned int tmp___3 ; unsigned long tmp___4 ; { status = 0; leg_int_ptr = 0U; rsp = (struct rsp_que *)dev_id; if ((unsigned long )rsp == (unsigned long )((struct rsp_que *)0)) { ql_log(2U, (scsi_qla_host_t *)0, 45379, "%s(): NULL response queue pointer\n", "qla8044_intr_handler"); return (0); } else { } ha = rsp->hw; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = pci_channel_offline(ha->pdev); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 0L); if (tmp___1 != 0L) { return (1); } else { } leg_int_ptr = qla8044_rd_reg(ha, 14528UL); if ((int )leg_int_ptr >= 0) { ql_dbg(524288U, vha, 45380, "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n", "qla8044_intr_handler"); return (0); } else { } pf_bit = (uint32_t )((int )ha->portnum << 16); if ((leg_int_ptr & 983040U) != pf_bit) { ql_dbg(524288U, vha, 45381, "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n", "qla8044_intr_handler", leg_int_ptr & 983040U, pf_bit); return (0); } else { } qla8044_wr_reg(ha, 14532UL, 0U); ldv_44470: leg_int_ptr = qla8044_rd_reg(ha, 14528UL); if ((leg_int_ptr & 983040U) != pf_bit) { goto ldv_44469; } else { } if ((leg_int_ptr & 1073741824U) != 0U) { goto ldv_44470; } else { } ldv_44469: reg = & (ha->iobase)->isp82; tmp___2 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___2); iter = 1UL; goto ldv_44484; ldv_44483: tmp___3 = readl((void const volatile *)(& reg->host_int)); if (tmp___3 != 0U) { stat = readl((void const volatile *)(& reg->host_status)); if ((stat & 32768U) == 0U) { goto ldv_44474; } else { } switch (stat & 255U) { case 1U: ; case 2U: ; case 16U: ; case 17U: qla82xx_mbx_completion(vha, (int )((unsigned short )(stat >> 16))); status = status | 1; goto ldv_44479; case 18U: mb[0] = (unsigned short )(stat >> 16); mb[1] = readw((void const volatile *)(& reg->mailbox_out) + 1U); mb[2] = readw((void const volatile *)(& reg->mailbox_out) + 2U); mb[3] = readw((void const volatile *)(& reg->mailbox_out) + 3U); qla2x00_async_event(vha, rsp, (uint16_t *)(& mb)); goto ldv_44479; case 19U: qla24xx_process_response_queue(vha, rsp); goto ldv_44479; default: ql_dbg(524288U, vha, 45382, "Unrecognized interrupt type (%d).\n", stat & 255U); goto ldv_44479; } ldv_44479: ; } else { } writel(0U, (void volatile *)(& reg->host_int)); ldv_44484: tmp___4 = iter; iter = iter - 1UL; if (tmp___4 != 0UL) { goto ldv_44483; } else { } ldv_44474: qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } static int qla8044_idc_dontreset(struct qla_hw_data *ha ) { uint32_t idc_ctrl ; { idc_ctrl = qla8044_rd_reg(ha, 14224UL); return ((int )idc_ctrl & 1); } } static void qla8044_clear_rst_ready(scsi_qla_host_t *vha ) { uint32_t drv_state ; int tmp ; { tmp = qla8044_rd_direct(vha, 5U); drv_state = (uint32_t )tmp; drv_state = (uint32_t )(~ (1 << (int )(vha->hw)->portnum)) & drv_state; ql_dbg(524288U, vha, 45373, "drv_state: 0x%08x\n", drv_state); qla8044_wr_direct(vha, 5U, drv_state); return; } } int qla8044_abort_isp(scsi_qla_host_t *vha ) { int rval ; uint32_t dev_state ; struct qla_hw_data *ha ; int tmp ; int tmp___0 ; { ha = vha->hw; qla8044_idc_lock(ha); tmp = qla8044_rd_direct(vha, 4U); dev_state = (uint32_t )tmp; if (ql2xdontresethba != 0) { qla8044_set_idc_dontreset(vha); } else { } if (dev_state == 3U) { tmp___0 = qla8044_idc_dontreset(ha); if (tmp___0 == 1) { ql_dbg(524288U, vha, 45374, "Reset recovery disabled\n"); rval = 258; goto exit_isp_reset; } else { } ql_dbg(524288U, vha, 45376, "HW State: NEED RESET\n"); qla8044_wr_direct(vha, 4U, 4U); } else { } qla83xx_reset_ownership(vha); qla8044_idc_unlock(ha); rval = qla8044_device_state_handler(vha); qla8044_idc_lock(ha); qla8044_clear_rst_ready(vha); exit_isp_reset: qla8044_idc_unlock(ha); if (rval == 0) { ha->flags.isp82xx_fw_hung = 0U; ha->flags.nic_core_reset_hdlr_active = 0U; rval = qla82xx_restart_isp(vha); } else { } return (rval); } } void disable_suitable_timer_16(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_16) { ldv_timer_state_16 = 0; return; } else { } return; } } void activate_pending_timer_16(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_16 == (unsigned long )timer) { if (ldv_timer_state_16 == 2 || pending_flag != 0) { ldv_timer_list_16 = timer; ldv_timer_list_16->data = data; ldv_timer_state_16 = 1; } else { } return; } else { } reg_timer_16(timer); ldv_timer_list_16->data = data; return; } } int reg_timer_16(struct timer_list *timer ) { { ldv_timer_list_16 = timer; ldv_timer_state_16 = 1; return (0); } } void choose_timer_16(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_16 = 2; return; } } int ldv_del_timer_77(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_78(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } __inline static int variable_test_bit(long nr , unsigned long const volatile *addr ) { int oldbit ; { __asm__ volatile ("bt %2,%1\n\tsbb %0,%0": "=r" (oldbit): "m" (*((unsigned long *)addr)), "Ir" (nr)); return (oldbit); } } __inline static __u64 __arch_swab64(__u64 val ) { { __asm__ ("bswapq %0": "=r" (val): "0" (val)); return (val); } } __inline static __u64 __fswab64(__u64 val ) { __u64 tmp ; { tmp = __arch_swab64(val); return (tmp); } } __inline static __u32 __le32_to_cpup(__le32 const *p ) { { return ((__u32 )*p); } } extern void dump_stack(void) ; extern int __dynamic_pr_debug(struct _ddebug * , char const * , ...) ; extern int strcasecmp(char const * , char const * ) ; extern void _raw_spin_lock(raw_spinlock_t * ) ; extern void _raw_spin_unlock(raw_spinlock_t * ) ; __inline static void spin_lock(spinlock_t *lock ) { { _raw_spin_lock(& lock->ldv_6105.rlock); return; } } __inline static void spin_unlock(spinlock_t *lock ) { { _raw_spin_unlock(& lock->ldv_6105.rlock); return; } } extern void __wake_up(wait_queue_head_t * , unsigned int , int , void * ) ; extern void prepare_to_wait(wait_queue_head_t * , wait_queue_t * , int ) ; extern void finish_wait(wait_queue_head_t * , wait_queue_t * ) ; extern int autoremove_wake_function(wait_queue_t * , unsigned int , int , void * ) ; extern void init_timer_key(struct timer_list * , unsigned int , char const * , struct lock_class_key * ) ; int ldv_del_timer_81(struct timer_list *ldv_func_arg1 ) ; extern void delayed_work_timer_fn(unsigned long ) ; extern struct workqueue_struct *system_wq ; extern bool queue_delayed_work_on(int , struct workqueue_struct * , struct delayed_work * , unsigned long ) ; extern void flush_scheduled_work(void) ; extern bool flush_delayed_work(struct delayed_work * ) ; __inline static bool queue_delayed_work(struct workqueue_struct *wq , struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = queue_delayed_work_on(4096, wq, dwork, delay); return (tmp); } } __inline static bool schedule_work(struct work_struct *work ) { bool tmp ; { tmp = queue_work(system_wq, work); return (tmp); } } __inline static bool schedule_delayed_work(struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = queue_delayed_work(system_wq, dwork, delay); return (tmp); } } extern void kmem_cache_free(struct kmem_cache * , void * ) ; extern void *kmem_cache_alloc(struct kmem_cache * , gfp_t ) ; __inline static void *kmem_cache_zalloc(struct kmem_cache *k , gfp_t flags ) { void *tmp ; { tmp = kmem_cache_alloc(k, flags | 32768U); return (tmp); } } void disable_suitable_timer_17(struct timer_list *timer ) ; void choose_timer_17(struct timer_list *timer ) ; int reg_timer_17(struct timer_list *timer ) ; void activate_pending_timer_17(struct timer_list *timer , unsigned long data , int pending_flag ) ; __inline static void sg_assign_page(struct scatterlist *sg , struct page *page ) { unsigned long page_link ; long tmp ; long tmp___0 ; long tmp___1 ; { page_link = sg->page_link & 3UL; tmp = ldv__builtin_expect(((unsigned long )page & 3UL) != 0UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (65), "i" (12UL)); ldv_31894: ; goto ldv_31894; } else { } tmp___0 = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (67), "i" (12UL)); ldv_31895: ; goto ldv_31895; } else { } tmp___1 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (68), "i" (12UL)); ldv_31896: ; goto ldv_31896; } else { } sg->page_link = page_link | (unsigned long )page; return; } } __inline static void sg_set_page(struct scatterlist *sg , struct page *page , unsigned int len , unsigned int offset ) { { sg_assign_page(sg, page); sg->offset = offset; sg->length = len; return; } } __inline static struct page *sg_page___3(struct scatterlist *sg ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (98), "i" (12UL)); ldv_31906: ; goto ldv_31906; } else { } tmp___0 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (99), "i" (12UL)); ldv_31907: ; goto ldv_31907; } else { } return ((struct page *)(sg->page_link & 0xfffffffffffffffcUL)); } } __inline static void *sg_virt___2(struct scatterlist *sg ) { struct page *tmp ; void *tmp___0 ; { tmp = sg_page___3(sg); tmp___0 = lowmem_page_address((struct page const *)tmp); return (tmp___0 + (unsigned long )sg->offset); } } extern void sg_init_table(struct scatterlist * , unsigned int ) ; __inline static int dma_map_sg_attrs___2(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int i ; int ents ; struct scatterlist *s ; void *tmp___0 ; int tmp___1 ; long tmp___2 ; { tmp = get_dma_ops(dev); ops = tmp; i = 0; s = sg; goto ldv_32456; ldv_32455: tmp___0 = sg_virt___2(s); kmemcheck_mark_initialized(tmp___0, s->length); i = i + 1; s = sg_next(s); ldv_32456: ; if (i < nents) { goto ldv_32455; } else { } tmp___1 = valid_dma_direction((int )dir); tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (52), "i" (12UL)); ldv_32458: ; goto ldv_32458; } else { } ents = (*(ops->map_sg))(dev, sg, nents, dir, attrs); debug_dma_map_sg(dev, sg, nents, ents, (int )dir); return (ents); } } __inline static void dma_unmap_sg_attrs___1(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (65), "i" (12UL)); ldv_32467: ; goto ldv_32467; } else { } debug_dma_unmap_sg(dev, sg, nents, (int )dir); if ((unsigned long )ops->unmap_sg != (unsigned long )((void (*)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_sg))(dev, sg, nents, dir, attrs); } else { } return; } } __inline static int pci_map_sg(struct pci_dev *hwdev , struct scatterlist *sg , int nents , int direction ) { int tmp ; { tmp = dma_map_sg_attrs___2((unsigned long )hwdev != (unsigned long )((struct pci_dev *)0) ? & hwdev->dev : (struct device *)0, sg, nents, (enum dma_data_direction )direction, (struct dma_attrs *)0); return (tmp); } } __inline static void pci_unmap_sg(struct pci_dev *hwdev , struct scatterlist *sg , int nents , int direction ) { { dma_unmap_sg_attrs___1((unsigned long )hwdev != (unsigned long )((struct pci_dev *)0) ? & hwdev->dev : (struct device *)0, sg, nents, (enum dma_data_direction )direction, (struct dma_attrs *)0); return; } } __inline static u32 get_unaligned_le32(void const *p ) { __u32 tmp ; { tmp = __le32_to_cpup((__le32 const *)p); return (tmp); } } __inline static void put_unaligned_be64(u64 val , void *p ) { __u64 tmp ; { tmp = __fswab64(val); *((__be64 *)p) = tmp; return; } } int ldv_scsi_add_host_with_dma_82(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; extern int scsilun_to_int(struct scsi_lun * ) ; void qlt_disable_vha(struct scsi_qla_host *vha ) ; int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops , u64 wwpn , int (*callback)(struct scsi_qla_host * ) , void *target_lport_ptr ) ; void qlt_lport_deregister(struct scsi_qla_host *vha ) ; void qlt_unreg_sess(struct qla_tgt_sess *sess ) ; void qlt_set_mode(struct scsi_qla_host *vha ) ; void qlt_clear_mode(struct scsi_qla_host *vha ) ; __inline static void qla_reverse_ini_mode(struct scsi_qla_host *ha ) { { if ((int )(ha->host)->active_mode & 1) { (ha->host)->active_mode = (unsigned int )(ha->host)->active_mode & 2U; } else { (ha->host)->active_mode = (unsigned char )((unsigned int )(ha->host)->active_mode | 1U); } return; } } void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha , struct atio_from_isp *atio ) ; int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd ) ; int qlt_xmit_response(struct qla_tgt_cmd *cmd , int xmit_type , uint8_t scsi_status ) ; void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd ) ; void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd ) ; void qlt_free_cmd(struct qla_tgt_cmd *cmd ) ; void qlt_enable_vha(struct scsi_qla_host *vha ) ; void qlt_stop_phase1(struct qla_tgt *tgt ) ; void qlt_stop_phase2(struct qla_tgt *tgt ) ; static char *qlini_mode = (char *)"enabled"; int ql2x_ini_mode = 0; static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha , struct atio_from_isp *atio ) ; static void qlt_response_pkt(struct scsi_qla_host *vha , response_t *pkt ) ; static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess , uint32_t lun , int fn , void *iocb , int flags ) ; static void qlt_send_term_exchange(struct scsi_qla_host *vha , struct qla_tgt_cmd *cmd , struct atio_from_isp *atio , int ha_locked ) ; static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha , struct qla_tgt_srr_imm *imm , int ha_locked ) ; static struct kmem_cache *qla_tgt_cmd_cachep ; static struct kmem_cache *qla_tgt_mgmt_cmd_cachep ; static mempool_t *qla_tgt_mgmt_cmd_mempool ; static struct workqueue_struct *qla_tgt_wq ; static struct mutex qla_tgt_mutex = {{1}, {{{{{0U}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "qla_tgt_mutex.wait_lock", 0, 0UL}}}}, {& qla_tgt_mutex.wait_list, & qla_tgt_mutex.wait_list}, 0, 0, (void *)(& qla_tgt_mutex), {0, {0, 0}, "qla_tgt_mutex", 0, 0UL}}; static struct list_head qla_tgt_glist = {& qla_tgt_glist, & qla_tgt_glist}; static struct qla_tgt_sess *qlt_find_sess_by_port_name(struct qla_tgt *tgt , uint8_t const *port_name ) { struct qla_tgt_sess *sess ; struct list_head const *__mptr ; int tmp ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)tgt->sess_list.next; sess = (struct qla_tgt_sess *)__mptr + 0xffffffffffffffd8UL; goto ldv_61186; ldv_61185: tmp = memcmp((void const *)(& sess->port_name), (void const *)port_name, 8UL); if (tmp == 0) { return (sess); } else { } __mptr___0 = (struct list_head const *)sess->sess_list_entry.next; sess = (struct qla_tgt_sess *)__mptr___0 + 0xffffffffffffffd8UL; ldv_61186: ; if ((unsigned long )(& sess->sess_list_entry) != (unsigned long )(& tgt->sess_list)) { goto ldv_61185; } else { } return ((struct qla_tgt_sess *)0); } } __inline static int qlt_issue_marker(struct scsi_qla_host *vha , int vha_locked ) { int rc ; int tmp ; long tmp___0 ; { tmp___0 = ldv__builtin_expect((unsigned int )vha->marker_needed != 0U, 0L); if (tmp___0 != 0L) { tmp = qla2x00_issue_marker(vha, vha_locked); rc = tmp; if (rc != 0) { ql_dbg(16384U, vha, 57405, "qla_target(%d): issue_marker() failed\n", (int )vha->vp_idx); } else { } return (rc); } else { } return (0); } } __inline static struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha , uint8_t *d_id ) { struct qla_hw_data *ha ; uint8_t vp_idx ; long tmp ; int tmp___0 ; long tmp___1 ; { ha = vha->hw; if ((int )vha->d_id.b.area != (int )*(d_id + 1UL) || (int )vha->d_id.b.domain != (int )*d_id) { return ((struct scsi_qla_host *)0); } else { } if ((int )vha->d_id.b.al_pa == (int )*(d_id + 2UL)) { return (vha); } else { } tmp = ldv__builtin_expect((unsigned long )ha->tgt.tgt_vp_map == (unsigned long )((struct qla_tgt_vp_map *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"), "i" (457), "i" (12UL)); ldv_61199: ; goto ldv_61199; } else { } vp_idx = (ha->tgt.tgt_vp_map + (unsigned long )*(d_id + 2UL))->idx; tmp___0 = variable_test_bit((long )vp_idx, (unsigned long const volatile *)(& ha->vp_idx_map)); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 1L); if (tmp___1 != 0L) { return ((ha->tgt.tgt_vp_map + (unsigned long )vp_idx)->vha); } else { } return ((struct scsi_qla_host *)0); } } __inline static struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha , uint16_t vp_idx ) { struct qla_hw_data *ha ; long tmp ; int tmp___0 ; long tmp___1 ; { ha = vha->hw; if ((int )vha->vp_idx == (int )vp_idx) { return (vha); } else { } tmp = ldv__builtin_expect((unsigned long )ha->tgt.tgt_vp_map == (unsigned long )((struct qla_tgt_vp_map *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"), "i" (474), "i" (12UL)); ldv_61205: ; goto ldv_61205; } else { } tmp___0 = variable_test_bit((long )vp_idx, (unsigned long const volatile *)(& ha->vp_idx_map)); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 1L); if (tmp___1 != 0L) { return ((ha->tgt.tgt_vp_map + (unsigned long )vp_idx)->vha); } else { } return ((struct scsi_qla_host *)0); } } void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha , struct atio_from_isp *atio ) { struct scsi_qla_host *host ; struct scsi_qla_host *tmp ; long tmp___0 ; struct scsi_qla_host *host___0 ; struct imm_ntfy_from_isp *entry ; long tmp___1 ; { switch ((int )atio->u.raw.entry_type) { case 6: tmp = qlt_find_host_by_d_id(vha, (uint8_t *)(& atio->u.isp24.fcp_hdr.d_id)); host = tmp; tmp___0 = ldv__builtin_expect((unsigned long )host == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___0 != 0L) { ql_dbg(16384U, vha, 57406, "qla_target(%d): Received ATIO_TYPE7 with unknown d_id %x:%x:%x\n", (int )vha->vp_idx, (int )atio->u.isp24.fcp_hdr.d_id[0], (int )atio->u.isp24.fcp_hdr.d_id[1], (int )atio->u.isp24.fcp_hdr.d_id[2]); goto ldv_61212; } else { } qlt_24xx_atio_pkt(host, atio); goto ldv_61212; case 13: host___0 = vha; entry = (struct imm_ntfy_from_isp *)atio; if ((unsigned int )entry->u.isp24.vp_index != 255U && (unsigned int )entry->u.isp24.nport_handle != 65535U) { host___0 = qlt_find_host_by_vp_idx(vha, (int )entry->u.isp24.vp_index); tmp___1 = ldv__builtin_expect((unsigned long )host___0 == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___1 != 0L) { ql_dbg(16384U, vha, 57407, "qla_target(%d): Received ATIO (IMMED_NOTIFY_TYPE) with unknown vp_index %d\n", (int )vha->vp_idx, (int )entry->u.isp24.vp_index); goto ldv_61212; } else { } } else { } qlt_24xx_atio_pkt(host___0, atio); goto ldv_61212; default: ql_dbg(16384U, vha, 57408, "qla_target(%d): Received unknown ATIO atio type %x\n", (int )vha->vp_idx, (int )atio->u.raw.entry_type); goto ldv_61212; } ldv_61212: ; return; } } void qlt_response_pkt_all_vps(struct scsi_qla_host *vha , response_t *pkt ) { struct ctio7_from_24xx *entry ; struct scsi_qla_host *host ; struct scsi_qla_host *tmp ; long tmp___0 ; struct scsi_qla_host *host___0 ; struct imm_ntfy_from_isp *entry___0 ; long tmp___1 ; struct scsi_qla_host *host___1 ; struct nack_to_isp *entry___1 ; long tmp___2 ; struct abts_recv_from_24xx *entry___2 ; struct scsi_qla_host *host___2 ; struct scsi_qla_host *tmp___3 ; long tmp___4 ; struct abts_resp_to_24xx *entry___3 ; struct scsi_qla_host *host___3 ; struct scsi_qla_host *tmp___5 ; long tmp___6 ; { switch ((int )pkt->entry_type) { case 18: entry = (struct ctio7_from_24xx *)pkt; tmp = qlt_find_host_by_vp_idx(vha, (int )entry->vp_index); host = tmp; tmp___0 = ldv__builtin_expect((unsigned long )host == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___0 != 0L) { ql_dbg(16384U, vha, 57409, "qla_target(%d): Response pkt (CTIO_TYPE7) received, with unknown vp_index %d\n", (int )vha->vp_idx, (int )entry->vp_index); goto ldv_61224; } else { } qlt_response_pkt(host, pkt); goto ldv_61224; case 13: host___0 = vha; entry___0 = (struct imm_ntfy_from_isp *)pkt; host___0 = qlt_find_host_by_vp_idx(vha, (int )entry___0->u.isp24.vp_index); tmp___1 = ldv__builtin_expect((unsigned long )host___0 == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___1 != 0L) { ql_dbg(16384U, vha, 57410, "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) received, with unknown vp_index %d\n", (int )vha->vp_idx, (int )entry___0->u.isp24.vp_index); goto ldv_61224; } else { } qlt_response_pkt(host___0, pkt); goto ldv_61224; case 14: host___1 = vha; entry___1 = (struct nack_to_isp *)pkt; if ((unsigned int )entry___1->u.isp24.vp_index != 255U) { host___1 = qlt_find_host_by_vp_idx(vha, (int )entry___1->u.isp24.vp_index); tmp___2 = ldv__builtin_expect((unsigned long )host___1 == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___2 != 0L) { ql_dbg(16384U, vha, 57411, "qla_target(%d): Response pkt (NOTIFY_ACK_TYPE) received, with unknown vp_index %d\n", (int )vha->vp_idx, (int )entry___1->u.isp24.vp_index); goto ldv_61224; } else { } } else { } qlt_response_pkt(host___1, pkt); goto ldv_61224; case 84: entry___2 = (struct abts_recv_from_24xx *)pkt; tmp___3 = qlt_find_host_by_vp_idx(vha, (int )entry___2->vp_index); host___2 = tmp___3; tmp___4 = ldv__builtin_expect((unsigned long )host___2 == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___4 != 0L) { ql_dbg(16384U, vha, 57412, "qla_target(%d): Response pkt (ABTS_RECV_24XX) received, with unknown vp_index %d\n", (int )vha->vp_idx, (int )entry___2->vp_index); goto ldv_61224; } else { } qlt_response_pkt(host___2, pkt); goto ldv_61224; case 85: entry___3 = (struct abts_resp_to_24xx *)pkt; tmp___5 = qlt_find_host_by_vp_idx(vha, (int )entry___3->vp_index); host___3 = tmp___5; tmp___6 = ldv__builtin_expect((unsigned long )host___3 == (unsigned long )((struct scsi_qla_host *)0), 0L); if (tmp___6 != 0L) { ql_dbg(16384U, vha, 57413, "qla_target(%d): Response pkt (ABTS_RECV_24XX) received, with unknown vp_index %d\n", (int )vha->vp_idx, (int )entry___3->vp_index); goto ldv_61224; } else { } qlt_response_pkt(host___3, pkt); goto ldv_61224; default: qlt_response_pkt(vha, pkt); goto ldv_61224; } ldv_61224: ; return; } } static void qlt_free_session_done(struct work_struct *work ) { struct qla_tgt_sess *sess ; struct work_struct const *__mptr ; struct qla_tgt *tgt ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; long tmp ; { __mptr = (struct work_struct const *)work; sess = (struct qla_tgt_sess *)__mptr + 0xffffffffffffffa8UL; tgt = sess->tgt; vha = sess->vha; ha = vha->hw; tmp = ldv__builtin_expect((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"), "i" (643), "i" (12UL)); ldv_61247: ; goto ldv_61247; } else { } if ((unsigned long )sess->se_sess != (unsigned long )((struct se_session *)0)) { (*((ha->tgt.tgt_ops)->free_session))(sess); } else { } ql_dbg(8192U, vha, 61441, "Unregistration of sess %p finished\n", sess); kfree((void const *)sess); tgt->sess_count = tgt->sess_count - 1; if (tgt->sess_count == 0) { __wake_up(& tgt->waitQ, 3U, 0, (void *)0); } else { } return; } } void qlt_unreg_sess(struct qla_tgt_sess *sess ) { struct scsi_qla_host *vha ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; { vha = sess->vha; (*(((vha->hw)->tgt.tgt_ops)->clear_nacl_from_fcport_map))(sess); list_del(& sess->sess_list_entry); if ((unsigned int )*((unsigned char *)sess + 8UL) != 0U) { list_del(& sess->del_list_entry); } else { } __init_work(& sess->free_work, 0); __constr_expr_0.counter = 137438953408L; sess->free_work.data = __constr_expr_0; lockdep_init_map(& sess->free_work.lockdep_map, "(&sess->free_work)", & __key, 0); INIT_LIST_HEAD(& sess->free_work.entry); sess->free_work.func = & qlt_free_session_done; schedule_work(& sess->free_work); return; } } static int qlt_reset(struct scsi_qla_host *vha , void *iocb , int mcmd ) { struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; uint32_t unpacked_lun ; uint32_t lun ; uint16_t loop_id ; int res ; struct imm_ntfy_from_isp *n ; struct atio_from_isp *a ; int tmp ; int tmp___0 ; { ha = vha->hw; sess = (struct qla_tgt_sess *)0; lun = 0U; res = 0; n = (struct imm_ntfy_from_isp *)iocb; a = (struct atio_from_isp *)iocb; loop_id = n->u.isp24.nport_handle; if ((unsigned int )loop_id == 65535U) { } else { sess = (*((ha->tgt.tgt_ops)->find_sess_by_loop_id))(vha, (int )loop_id); } ql_dbg(16384U, vha, 57344, "Using sess for qla_tgt_reset: %p\n", sess); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { res = -3; return (res); } else { } ql_dbg(16384U, vha, 57415, "scsi(%ld): resetting (session %p from port %8phC mcmd %x, loop_id %d)\n", vha->host_no, sess, (uint8_t *)(& sess->port_name), mcmd, (int )loop_id); lun = (uint32_t )a->u.isp24.fcp_cmnd.lun; tmp = scsilun_to_int((struct scsi_lun *)(& lun)); unpacked_lun = (uint32_t )tmp; tmp___0 = qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, iocb, 1); return (tmp___0); } } static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess , bool immediate ) { struct qla_tgt *tgt ; uint32_t dev_loss_tmo ; { tgt = sess->tgt; dev_loss_tmo = (uint32_t )((tgt->ha)->port_down_retry_count + 5); if ((unsigned int )*((unsigned char *)sess + 8UL) != 0U) { return; } else { } ql_dbg(16384U, sess->vha, 57345, "Scheduling sess %p for deletion\n", sess); list_add_tail(& sess->del_list_entry, & tgt->del_sess_list); sess->deleted = 1U; if ((int )immediate) { dev_loss_tmo = 0U; } else { } sess->expires = (unsigned long )(dev_loss_tmo * 250U) + (unsigned long )jiffies; ql_dbg(16384U, sess->vha, 57416, "qla_target(%d): session for port %8phC (loop ID %d) scheduled for deletion in %u secs (expires: %lu) immed: %d\n", (int )(sess->vha)->vp_idx, (uint8_t *)(& sess->port_name), (int )sess->loop_id, dev_loss_tmo, sess->expires, (int )immediate); if ((int )immediate) { schedule_delayed_work(& tgt->sess_del_work, 0UL); } else { schedule_delayed_work(& tgt->sess_del_work, (unsigned long )jiffies - sess->expires); } return; } } static void qlt_clear_tgt_db(struct qla_tgt *tgt , bool local_only ) { struct qla_tgt_sess *sess ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)tgt->sess_list.next; sess = (struct qla_tgt_sess *)__mptr + 0xffffffffffffffd8UL; goto ldv_61289; ldv_61288: qlt_schedule_sess_for_deletion(sess, 1); __mptr___0 = (struct list_head const *)sess->sess_list_entry.next; sess = (struct qla_tgt_sess *)__mptr___0 + 0xffffffffffffffd8UL; ldv_61289: ; if ((unsigned long )(& sess->sess_list_entry) != (unsigned long )(& tgt->sess_list)) { goto ldv_61288; } else { } return; } } static int qla24xx_get_loop_id(struct scsi_qla_host *vha , uint8_t const *s_id , uint16_t *loop_id ) { struct qla_hw_data *ha ; dma_addr_t gid_list_dma ; struct gid_list_info *gid_list ; char *id_iter ; int res ; int rc ; int i ; uint16_t entries ; int tmp ; void *tmp___0 ; int tmp___1 ; struct gid_list_info *gid ; int tmp___2 ; { ha = vha->hw; tmp = qla2x00_gid_list_size(ha); tmp___0 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )tmp, & gid_list_dma, 208U, (struct dma_attrs *)0); gid_list = (struct gid_list_info *)tmp___0; if ((unsigned long )gid_list == (unsigned long )((struct gid_list_info *)0)) { tmp___1 = qla2x00_gid_list_size(ha); ql_dbg(8192U, vha, 61508, "qla_target(%d): DMA Alloc failed of %u\n", (int )vha->vp_idx, tmp___1); return (-12); } else { } rc = qla2x00_get_id_list(vha, (void *)gid_list, gid_list_dma, & entries); if (rc != 0) { ql_dbg(8192U, vha, 61509, "qla_target(%d): get_id_list() failed: %x\n", (int )vha->vp_idx, rc); res = -1; goto out_free_id_list; } else { } id_iter = (char *)gid_list; res = -1; i = 0; goto ldv_61308; ldv_61307: gid = (struct gid_list_info *)id_iter; if (((int )gid->al_pa == (int )((unsigned char )*(s_id + 2UL)) && (int )gid->area == (int )((unsigned char )*(s_id + 1UL))) && (int )gid->domain == (int )((unsigned char )*s_id)) { *loop_id = gid->loop_id; res = 0; goto ldv_61306; } else { } id_iter = id_iter + (unsigned long )ha->gid_list_info_size; i = i + 1; ldv_61308: ; if ((int )entries > i) { goto ldv_61307; } else { } ldv_61306: ; out_free_id_list: tmp___2 = qla2x00_gid_list_size(ha); dma_free_attrs(& (ha->pdev)->dev, (size_t )tmp___2, (void *)gid_list, gid_list_dma, (struct dma_attrs *)0); return (res); } } static void qlt_undelete_sess(struct qla_tgt_sess *sess ) { long tmp ; { tmp = ldv__builtin_expect((unsigned int )*((unsigned char *)sess + 8UL) == 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"), "i" (839), "i" (12UL)); ldv_61312: ; goto ldv_61312; } else { } list_del(& sess->del_list_entry); sess->deleted = 0U; return; } } static void qlt_del_sess_work_fn(struct delayed_work *work ) { struct qla_tgt *tgt ; struct delayed_work const *__mptr ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr___0 ; int tmp___0 ; { __mptr = (struct delayed_work const *)work; tgt = (struct qla_tgt *)__mptr + 0xffffffffffffffb0UL; vha = tgt->vha; ha = vha->hw; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); goto ldv_61336; ldv_61335: __mptr___0 = (struct list_head const *)tgt->del_sess_list.next; sess = (struct qla_tgt_sess *)__mptr___0 + 0xffffffffffffffc0UL; if ((long )((unsigned long )jiffies - sess->expires) >= 0L) { qlt_undelete_sess(sess); ql_dbg(8192U, vha, 61444, "Timeout: sess %p about to be deleted\n", sess); (*((ha->tgt.tgt_ops)->shutdown_sess))(sess); (*((ha->tgt.tgt_ops)->put_sess))(sess); } else { schedule_delayed_work(& tgt->sess_del_work, (unsigned long )jiffies - sess->expires); goto ldv_61334; } ldv_61336: tmp___0 = list_empty((struct list_head const *)(& tgt->del_sess_list)); if (tmp___0 == 0) { goto ldv_61335; } else { } ldv_61334: spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static struct qla_tgt_sess *qlt_create_sess(struct scsi_qla_host *vha , fc_port_t *fcport , bool local ) { struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; unsigned long flags ; unsigned char be_sid[3U] ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; int tmp___0 ; struct list_head const *__mptr___0 ; void *tmp___1 ; int tmp___2 ; size_t __len ; void *__ret ; raw_spinlock_t *tmp___3 ; { ha = vha->hw; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)(ha->tgt.qla_tgt)->sess_list.next; sess = (struct qla_tgt_sess *)__mptr + 0xffffffffffffffd8UL; goto ldv_61354; ldv_61353: tmp___0 = memcmp((void const *)(& sess->port_name), (void const *)(& fcport->port_name), 8UL); if (tmp___0 == 0) { ql_dbg(8192U, vha, 61445, "Double sess %p found (s_id %x:%x:%x, loop_id %d), updating to d_id %x:%x:%x, loop_id %d", sess, (int )sess->s_id.b.domain, (int )sess->s_id.b.al_pa, (int )sess->s_id.b.area, (int )sess->loop_id, (int )fcport->d_id.b.domain, (int )fcport->d_id.b.al_pa, (int )fcport->d_id.b.area, (int )fcport->loop_id); if ((unsigned int )*((unsigned char *)sess + 8UL) != 0U) { qlt_undelete_sess(sess); } else { } kref_get(& (sess->se_sess)->sess_kref); (*((ha->tgt.tgt_ops)->update_sess))(sess, fcport->d_id, (int )fcport->loop_id, (fcport->flags & 16U) != 0U); if ((unsigned int )*((unsigned char *)sess + 8UL) != 0U && ! local) { sess->local = 0U; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (sess); } else { } __mptr___0 = (struct list_head const *)sess->sess_list_entry.next; sess = (struct qla_tgt_sess *)__mptr___0 + 0xffffffffffffffd8UL; ldv_61354: ; if ((unsigned long )(& sess->sess_list_entry) != (unsigned long )(& (ha->tgt.qla_tgt)->sess_list)) { goto ldv_61353; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___1 = kzalloc(168UL, 208U); sess = (struct qla_tgt_sess *)tmp___1; if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { ql_dbg(8192U, vha, 61514, "qla_target(%u): session allocation failed, all commands from port %8phC will be refused", (int )vha->vp_idx, (uint8_t *)(& fcport->port_name)); return ((struct qla_tgt_sess *)0); } else { } sess->tgt = ha->tgt.qla_tgt; sess->vha = vha; sess->s_id = fcport->d_id; sess->loop_id = fcport->loop_id; sess->local = (unsigned char )local; ql_dbg(8192U, vha, 61446, "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", sess, ha->tgt.qla_tgt); be_sid[0] = sess->s_id.b.domain; be_sid[1] = sess->s_id.b.area; be_sid[2] = sess->s_id.b.al_pa; tmp___2 = (*((ha->tgt.tgt_ops)->check_initiator_node_acl))(vha, (unsigned char *)(& fcport->port_name), (void *)sess, (uint8_t *)(& be_sid), (int )fcport->loop_id); if (tmp___2 < 0) { kfree((void const *)sess); return ((struct qla_tgt_sess *)0); } else { } kref_get(& (sess->se_sess)->sess_kref); sess->conf_compl_supported = 0U; __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& sess->port_name), (void const *)(& fcport->port_name), __len); } else { __ret = __builtin_memcpy((void *)(& sess->port_name), (void const *)(& fcport->port_name), __len); } tmp___3 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___3); list_add_tail(& sess->sess_list_entry, & (ha->tgt.qla_tgt)->sess_list); (ha->tgt.qla_tgt)->sess_count = (ha->tgt.qla_tgt)->sess_count + 1; spin_unlock_irqrestore(& ha->hardware_lock, flags); ql_dbg(8192U, vha, 61515, "qla_target(%d): %ssession for wwn %8phC (loop_id %d, s_id %x:%x:%x, confirmed completion %ssupported) added\n", (int )vha->vp_idx, (int )local ? (char *)"local " : (char *)"", (uint8_t *)(& fcport->port_name), (int )fcport->loop_id, (int )sess->s_id.b.domain, (int )sess->s_id.b.area, (int )sess->s_id.b.al_pa, (unsigned int )*((unsigned char *)sess + 8UL) != 0U ? (char *)"" : (char *)"not "); return (sess); } } void qlt_fc_port_added(struct scsi_qla_host *vha , fc_port_t *fcport ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_sess *sess ; unsigned long flags ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; { ha = vha->hw; tgt = ha->tgt.qla_tgt; if ((unsigned long )(vha->hw)->tgt.tgt_ops == (unsigned long )((struct qla_tgt_func_tmpl *)0)) { return; } else { } if ((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0) || (unsigned int )fcport->port_type != 4U) { return; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if (tgt->tgt_stop != 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } else { } sess = qlt_find_sess_by_port_name(tgt, (uint8_t const *)(& fcport->port_name)); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { spin_unlock_irqrestore(& ha->hardware_lock, flags); mutex_lock_nested(& ha->tgt.tgt_mutex, 0U); sess = qlt_create_sess(vha, fcport, 0); mutex_unlock(& ha->tgt.tgt_mutex); tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); } else { kref_get(& (sess->se_sess)->sess_kref); if ((unsigned int )*((unsigned char *)sess + 8UL) != 0U) { qlt_undelete_sess(sess); ql_dbg(8192U, vha, 61516, "qla_target(%u): %ssession for port %8phC (loop ID %d) reappeared\n", (int )vha->vp_idx, (unsigned int )*((unsigned char *)sess + 8UL) != 0U ? (char *)"local " : (char *)"", (uint8_t *)(& sess->port_name), (int )sess->loop_id); ql_dbg(8192U, vha, 61447, "Reappeared sess %p\n", sess); } else { } (*((ha->tgt.tgt_ops)->update_sess))(sess, fcport->d_id, (int )fcport->loop_id, (fcport->flags & 16U) != 0U); } if ((unsigned long )sess != (unsigned long )((struct qla_tgt_sess *)0) && (unsigned int )*((unsigned char *)sess + 8UL) != 0U) { ql_dbg(8192U, vha, 61517, "qla_target(%u): local session for port %8phC (loop ID %d) became global\n", (int )vha->vp_idx, (uint8_t *)(& fcport->port_name), (int )sess->loop_id); sess->local = 0U; } else { } (*((ha->tgt.tgt_ops)->put_sess))(sess); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } void qlt_fc_port_deleted(struct scsi_qla_host *vha , fc_port_t *fcport ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_sess *sess ; unsigned long flags ; raw_spinlock_t *tmp ; { ha = vha->hw; tgt = ha->tgt.qla_tgt; if ((unsigned long )(vha->hw)->tgt.tgt_ops == (unsigned long )((struct qla_tgt_func_tmpl *)0)) { return; } else { } if ((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0) || (unsigned int )fcport->port_type != 4U) { return; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if (tgt->tgt_stop != 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } else { } sess = qlt_find_sess_by_port_name(tgt, (uint8_t const *)(& fcport->port_name)); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } else { } ql_dbg(8192U, vha, 61448, "qla_tgt_fc_port_deleted %p", sess); sess->local = 1U; qlt_schedule_sess_for_deletion(sess, 0); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } __inline static int test_tgt_sess_count(struct qla_tgt *tgt ) { struct qla_hw_data *ha ; unsigned long flags ; int res ; raw_spinlock_t *tmp ; int tmp___0 ; { ha = tgt->ha; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = list_empty((struct list_head const *)(& tgt->sess_list)); ql_dbg(16384U, tgt->vha, 57346, "tgt %p, empty(sess_list)=%d sess_count=%d\n", tgt, tmp___0, tgt->sess_count); res = tgt->sess_count == 0; spin_unlock_irqrestore(& ha->hardware_lock, flags); return (res); } } void qlt_stop_phase1(struct qla_tgt *tgt ) { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; raw_spinlock_t *tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; wait_queue_t __wait ; struct task_struct *tmp___5 ; int tmp___6 ; bool tmp___7 ; int tmp___8 ; wait_queue_t __wait___0 ; struct task_struct *tmp___9 ; int tmp___10 ; { vha = tgt->vha; ha = tgt->ha; if (tgt->tgt_stop != 0 || tgt->tgt_stopped != 0) { ql_dbg(8192U, vha, 61518, "Already in tgt->tgt_stop or tgt_stopped state\n"); dump_stack(); return; } else { } ql_dbg(16384U, vha, 57347, "Stopping target for host %ld(%p)\n", vha->host_no, vha); mutex_lock_nested(& ha->tgt.tgt_mutex, 0U); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tgt->tgt_stop = 1; qlt_clear_tgt_db(tgt, 1); spin_unlock_irqrestore(& ha->hardware_lock, flags); mutex_unlock(& ha->tgt.tgt_mutex); flush_delayed_work(& tgt->sess_del_work); ql_dbg(8192U, vha, 61449, "Waiting for sess works (tgt %p)", tgt); tmp___0 = spinlock_check(& tgt->sess_work_lock); flags = _raw_spin_lock_irqsave(tmp___0); goto ldv_61412; ldv_61411: spin_unlock_irqrestore(& tgt->sess_work_lock, flags); flush_scheduled_work(); tmp___1 = spinlock_check(& tgt->sess_work_lock); flags = _raw_spin_lock_irqsave(tmp___1); ldv_61412: tmp___2 = list_empty((struct list_head const *)(& tgt->sess_works_list)); if (tmp___2 == 0) { goto ldv_61411; } else { } spin_unlock_irqrestore(& tgt->sess_work_lock, flags); tmp___3 = list_empty((struct list_head const *)(& tgt->sess_list)); ql_dbg(8192U, vha, 61450, "Waiting for tgt %p: list_empty(sess_list)=%d sess_count=%d\n", tgt, tmp___3, tgt->sess_count); tmp___4 = test_tgt_sess_count(tgt); if (tmp___4 != 0) { goto ldv_61414; } else { } tmp___5 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___5; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_61417: prepare_to_wait(& tgt->waitQ, & __wait, 2); tmp___6 = test_tgt_sess_count(tgt); if (tmp___6 != 0) { goto ldv_61416; } else { } schedule(); goto ldv_61417; ldv_61416: finish_wait(& tgt->waitQ, & __wait); ldv_61414: ; if (*((unsigned long *)ha + 2UL) == 0UL) { tmp___7 = qla_tgt_mode_enabled(vha); if ((int )tmp___7) { qlt_disable_vha(vha); } else { } } else { } tmp___8 = test_tgt_sess_count(tgt); if (tmp___8 != 0) { goto ldv_61418; } else { } tmp___9 = get_current(); __wait___0.flags = 0U; __wait___0.private = (void *)tmp___9; __wait___0.func = & autoremove_wake_function; __wait___0.task_list.next = & __wait___0.task_list; __wait___0.task_list.prev = & __wait___0.task_list; ldv_61421: prepare_to_wait(& tgt->waitQ, & __wait___0, 2); tmp___10 = test_tgt_sess_count(tgt); if (tmp___10 != 0) { goto ldv_61420; } else { } schedule(); goto ldv_61421; ldv_61420: finish_wait(& tgt->waitQ, & __wait___0); ldv_61418: ; return; } } void qlt_stop_phase2(struct qla_tgt *tgt ) { struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; { ha = tgt->ha; if (tgt->tgt_stopped != 0) { ql_dbg(8192U, tgt->vha, 61519, "Already in tgt->tgt_stopped state\n"); dump_stack(); return; } else { } ql_dbg(8192U, tgt->vha, 61451, "Waiting for %d IRQ commands to complete (tgt %p)", tgt->irq_cmd_count, tgt); mutex_lock_nested(& ha->tgt.tgt_mutex, 0U); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); goto ldv_61440; ldv_61439: spin_unlock_irqrestore(& ha->hardware_lock, flags); __const_udelay(8590UL); tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); ldv_61440: ; if (tgt->irq_cmd_count != 0) { goto ldv_61439; } else { } tgt->tgt_stop = 0; tgt->tgt_stopped = 1; spin_unlock_irqrestore(& ha->hardware_lock, flags); mutex_unlock(& ha->tgt.tgt_mutex); ql_dbg(8192U, tgt->vha, 61452, "Stop of tgt %p finished", tgt); return; } } static void qlt_release(struct qla_tgt *tgt ) { struct qla_hw_data *ha ; { ha = tgt->ha; if ((unsigned long )ha->tgt.qla_tgt != (unsigned long )((struct qla_tgt *)0) && tgt->tgt_stopped == 0) { qlt_stop_phase2(tgt); } else { } ha->tgt.qla_tgt = (struct qla_tgt *)0; ql_dbg(8192U, tgt->vha, 61453, "Release of tgt %p finished\n", tgt); kfree((void const *)tgt); return; } } static int qlt_sched_sess_work(struct qla_tgt *tgt , int type , void const *param , unsigned int param_size ) { struct qla_tgt_sess_work_param *prm ; unsigned long flags ; void *tmp ; size_t __len ; void *__ret ; raw_spinlock_t *tmp___0 ; { tmp = kzalloc(88UL, 32U); prm = (struct qla_tgt_sess_work_param *)tmp; if ((unsigned long )prm == (unsigned long )((struct qla_tgt_sess_work_param *)0)) { ql_dbg(8192U, tgt->vha, 61520, "qla_target(%d): Unable to create session work, command will be refused", 0); return (-12); } else { } ql_dbg(8192U, tgt->vha, 61454, "Scheduling work (type %d, prm %p) to find session for param %p (size %d, tgt %p)\n", type, prm, param, param_size, tgt); prm->type = type; __len = (size_t )param_size; __ret = __builtin_memcpy((void *)(& prm->ldv_60976.tm_iocb), param, __len); tmp___0 = spinlock_check(& tgt->sess_work_lock); flags = _raw_spin_lock_irqsave(tmp___0); list_add_tail(& prm->sess_works_list_entry, & tgt->sess_works_list); spin_unlock_irqrestore(& tgt->sess_work_lock, flags); schedule_work(& tgt->sess_work); return (0); } } static void qlt_send_notify_ack(struct scsi_qla_host *vha , struct imm_ntfy_from_isp *ntfy , uint32_t add_flags , uint16_t resp_code , int resp_code_valid , uint16_t srr_flags , uint16_t srr_reject_code , uint8_t srr_explan ) { struct qla_hw_data *ha ; request_t *pkt ; struct nack_to_isp *nack ; int tmp ; void *tmp___0 ; { ha = vha->hw; ql_dbg(16384U, vha, 57348, "Sending NOTIFY_ACK (ha=%p)\n", ha); tmp = qlt_issue_marker(vha, 1); if (tmp != 0) { return; } else { } tmp___0 = qla2x00_alloc_iocbs(vha, (srb_t *)0); pkt = (request_t *)tmp___0; if ((unsigned long )pkt == (unsigned long )((request_t *)0)) { ql_dbg(16384U, vha, 57417, "qla_target(%d): %s failed: unable to allocate request packet\n", (int )vha->vp_idx, "qlt_send_notify_ack"); return; } else { } if ((unsigned long )ha->tgt.qla_tgt != (unsigned long )((struct qla_tgt *)0)) { (ha->tgt.qla_tgt)->notify_ack_expected = (ha->tgt.qla_tgt)->notify_ack_expected + 1; } else { } pkt->entry_type = 14U; pkt->entry_count = 1U; nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; if ((unsigned int )ntfy->u.isp24.status == 70U) { nack->u.isp24.flags = (unsigned int )ntfy->u.isp24.flags & 1U; } else { } nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; nack->u.isp24.status = ntfy->u.isp24.status; nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; nack->u.isp24.srr_flags = srr_flags; nack->u.isp24.srr_reject_code = (uint8_t )srr_reject_code; nack->u.isp24.srr_reject_code_expl = srr_explan; nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; ql_dbg(16384U, vha, 57349, "qla_target(%d): Sending 24xx Notify Ack %d\n", (int )vha->vp_idx, (int )nack->u.isp24.status); qla2x00_start_iocbs(vha, vha->req); return; } } static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha , struct abts_recv_from_24xx *abts , uint32_t status , bool ids_reversed ) { struct qla_hw_data *ha ; struct abts_resp_to_24xx *resp ; uint32_t f_ctl ; uint8_t *p ; int tmp ; void *tmp___0 ; uint8_t *tmp___1 ; uint8_t *tmp___2 ; { ha = vha->hw; ql_dbg(16384U, vha, 57350, "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", ha, abts, status); tmp = qlt_issue_marker(vha, 1); if (tmp != 0) { return; } else { } tmp___0 = qla2x00_alloc_iocbs(vha, (srb_t *)0); resp = (struct abts_resp_to_24xx *)tmp___0; if ((unsigned long )resp == (unsigned long )((struct abts_resp_to_24xx *)0)) { ql_dbg(16384U, vha, 57418, "qla_target(%d): %s failed: unable to allocate request packet", (int )vha->vp_idx, "qlt_24xx_send_abts_resp"); return; } else { } resp->entry_type = 85U; resp->entry_count = 1U; resp->nport_handle = abts->nport_handle; resp->vp_index = (uint8_t )vha->vp_idx; resp->sof_type = abts->sof_type; resp->exchange_address = abts->exchange_address; resp->fcp_hdr_le = abts->fcp_hdr_le; f_ctl = 10027008U; p = (uint8_t *)(& f_ctl); tmp___1 = p; p = p + 1; resp->fcp_hdr_le.f_ctl[0] = *tmp___1; tmp___2 = p; p = p + 1; resp->fcp_hdr_le.f_ctl[1] = *tmp___2; resp->fcp_hdr_le.f_ctl[2] = *p; if ((int )ids_reversed) { resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; } else { resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; } resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; if (status == 0U) { resp->fcp_hdr_le.r_ctl = 132U; resp->payload.ba_acct.seq_id_valid = 0U; resp->payload.ba_acct.low_seq_cnt = 0U; resp->payload.ba_acct.high_seq_cnt = 65535U; resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; } else { resp->fcp_hdr_le.r_ctl = 133U; resp->payload.ba_rjt.reason_code = 9U; } (ha->tgt.qla_tgt)->abts_resp_expected = (ha->tgt.qla_tgt)->abts_resp_expected + 1; qla2x00_start_iocbs(vha, vha->req); return; } } static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha , struct abts_resp_from_24xx_fw *entry ) { struct ctio7_to_24xx *ctio ; int tmp ; void *tmp___0 ; { ql_dbg(16384U, vha, 57351, "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); tmp = qlt_issue_marker(vha, 1); if (tmp != 0) { return; } else { } tmp___0 = qla2x00_alloc_iocbs(vha, (srb_t *)0); ctio = (struct ctio7_to_24xx *)tmp___0; if ((unsigned long )ctio == (unsigned long )((struct ctio7_to_24xx *)0)) { ql_dbg(16384U, vha, 57419, "qla_target(%d): %s failed: unable to allocate request packet\n", (int )vha->vp_idx, "qlt_24xx_retry_term_exchange"); return; } else { } ctio->entry_type = 18U; ctio->entry_count = 1U; ctio->nport_handle = entry->nport_handle; ctio->handle = 4294967295U; ctio->timeout = 10U; ctio->vp_index = (uint8_t )vha->vp_idx; ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; ctio->exchange_addr = entry->exchange_addr_to_abort; ctio->u.status1.flags = 16448U; ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; qla2x00_start_iocbs(vha, vha->req); qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, 0U, 1); return; } } static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha , struct abts_recv_from_24xx *abts , struct qla_tgt_sess *sess ) { struct qla_hw_data *ha ; struct se_session *se_sess ; struct qla_tgt_mgmt_cmd *mcmd ; struct se_cmd *se_cmd ; u32 lun ; int rc ; bool found_lun ; struct list_head const *__mptr ; struct qla_tgt_cmd *cmd ; struct se_cmd const *__mptr___0 ; struct list_head const *__mptr___1 ; void *tmp ; size_t __len ; void *__ret ; { ha = vha->hw; se_sess = sess->se_sess; lun = 0U; found_lun = 0; spin_lock(& se_sess->sess_cmd_lock); __mptr = (struct list_head const *)se_sess->sess_cmd_list.next; se_cmd = (struct se_cmd *)__mptr + 0xffffffffffffff60UL; goto ldv_61518; ldv_61517: __mptr___0 = (struct se_cmd const *)se_cmd; cmd = (struct qla_tgt_cmd *)__mptr___0 + 0xfffffffffffffff0UL; if (cmd->tag == abts->exchange_addr_to_abort) { lun = cmd->unpacked_lun; found_lun = 1; goto ldv_61516; } else { } __mptr___1 = (struct list_head const *)se_cmd->se_cmd_list.next; se_cmd = (struct se_cmd *)__mptr___1 + 0xffffffffffffff60UL; ldv_61518: ; if ((unsigned long )(& se_cmd->se_cmd_list) != (unsigned long )(& se_sess->sess_cmd_list)) { goto ldv_61517; } else { } ldv_61516: spin_unlock(& se_sess->sess_cmd_lock); if (! found_lun) { return (-2); } else { } ql_dbg(8192U, vha, 61455, "qla_target(%d): task abort (tag=%d)\n", (int )vha->vp_idx, abts->exchange_addr_to_abort); tmp = mempool_alloc(qla_tgt_mgmt_cmd_mempool, 32U); mcmd = (struct qla_tgt_mgmt_cmd *)tmp; if ((unsigned long )mcmd == (unsigned long )((struct qla_tgt_mgmt_cmd *)0)) { ql_dbg(8192U, vha, 61521, "qla_target(%d): %s: Allocation of ABORT cmd failed", (int )vha->vp_idx, "__qlt_24xx_handle_abts"); return (-12); } else { } memset((void *)mcmd, 0, 1152UL); mcmd->sess = sess; __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)(& mcmd->orig_iocb.abts), (void const *)abts, __len); } else { __ret = __builtin_memcpy((void *)(& mcmd->orig_iocb.abts), (void const *)abts, __len); } rc = (*((ha->tgt.tgt_ops)->handle_tmr))(mcmd, lun, 1, abts->exchange_addr_to_abort); if (rc != 0) { ql_dbg(8192U, vha, 61522, "qla_target(%d): tgt_ops->handle_tmr() failed: %d", (int )vha->vp_idx, rc); mempool_free((void *)mcmd, qla_tgt_mgmt_cmd_mempool); return (-14); } else { } return (0); } } static void qlt_24xx_handle_abts(struct scsi_qla_host *vha , struct abts_recv_from_24xx *abts ) { struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; uint32_t tag ; uint8_t s_id[3U] ; int rc ; { ha = vha->hw; tag = abts->exchange_addr_to_abort; if ((int )abts->fcp_hdr_le.parameter & 1) { ql_dbg(8192U, vha, 61523, "qla_target(%d): ABTS: Abort Sequence not supported\n", (int )vha->vp_idx); qlt_24xx_send_abts_resp(vha, abts, 4U, 0); return; } else { } if (tag == 4294967295U) { ql_dbg(8192U, vha, 61456, "qla_target(%d): ABTS: Unknown Exchange Address received\n", (int )vha->vp_idx); qlt_24xx_send_abts_resp(vha, abts, 4U, 0); return; } else { } ql_dbg(8192U, vha, 61457, "qla_target(%d): task abort (s_id=%x:%x:%x, tag=%d, param=%x)\n", (int )vha->vp_idx, (int )abts->fcp_hdr_le.s_id[2], (int )abts->fcp_hdr_le.s_id[1], (int )abts->fcp_hdr_le.s_id[0], tag, abts->fcp_hdr_le.parameter); s_id[0] = abts->fcp_hdr_le.s_id[2]; s_id[1] = abts->fcp_hdr_le.s_id[1]; s_id[2] = abts->fcp_hdr_le.s_id[0]; sess = (*((ha->tgt.tgt_ops)->find_sess_by_s_id))(vha, (uint8_t const *)(& s_id)); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { ql_dbg(8192U, vha, 61458, "qla_target(%d): task abort for non-existant session\n", (int )vha->vp_idx); rc = qlt_sched_sess_work(ha->tgt.qla_tgt, 1, (void const *)abts, 64U); if (rc != 0) { qlt_24xx_send_abts_resp(vha, abts, 4U, 0); } else { } return; } else { } rc = __qlt_24xx_handle_abts(vha, abts, sess); if (rc != 0) { ql_dbg(8192U, vha, 61524, "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", (int )vha->vp_idx, rc); qlt_24xx_send_abts_resp(vha, abts, 4U, 0); return; } else { } return; } } static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha , struct qla_tgt_mgmt_cmd *mcmd , uint32_t resp_code ) { struct atio_from_isp *atio ; struct ctio7_to_24xx *ctio ; int tmp ; void *tmp___0 ; __u16 tmp___1 ; { atio = & mcmd->orig_iocb.atio; ql_dbg(16384U, ha, 57352, "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", ha, atio, resp_code); tmp = qlt_issue_marker(ha, 1); if (tmp != 0) { return; } else { } tmp___0 = qla2x00_alloc_iocbs(ha, (srb_t *)0); ctio = (struct ctio7_to_24xx *)tmp___0; if ((unsigned long )ctio == (unsigned long )((struct ctio7_to_24xx *)0)) { ql_dbg(16384U, ha, 57420, "qla_target(%d): %s failed: unable to allocate request packet\n", (int )ha->vp_idx, "qlt_24xx_send_task_mgmt_ctio"); return; } else { } ctio->entry_type = 18U; ctio->entry_count = 1U; ctio->handle = 4294967295U; ctio->nport_handle = (mcmd->sess)->loop_id; ctio->timeout = 10U; ctio->vp_index = (uint8_t )ha->vp_idx; ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio->exchange_addr = atio->u.isp24.exchange_addr; ctio->u.status1.flags = (uint16_t )((int )((short )((int )atio->u.isp24.attr << 9)) | -32704); tmp___1 = __fswab16((int )atio->u.isp24.fcp_hdr.ox_id); ctio->u.status1.ox_id = tmp___1; ctio->u.status1.scsi_status = 256U; ctio->u.status1.response_len = 8U; ctio->u.status1.sense_data[0] = (uint8_t )resp_code; qla2x00_start_iocbs(ha, ha->req); return; } } void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd ) { { mempool_free((void *)mcmd, qla_tgt_mgmt_cmd_mempool); return; } } void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd ) { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; { vha = (mcmd->sess)->vha; ha = vha->hw; ql_dbg(8192U, vha, 61459, "TM response mcmd (%p) status %#x state %#x", mcmd, (int )mcmd->fc_tm_rsp, mcmd->flags); tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if (mcmd->flags == 1U) { qlt_send_notify_ack(vha, & mcmd->orig_iocb.imm_ntfy, 0U, 0, 0, 0, 0, 0); } else if ((unsigned int )(mcmd->se_cmd.se_tmr_req)->function == 1U) { qlt_24xx_send_abts_resp(vha, & mcmd->orig_iocb.abts, (uint32_t )mcmd->fc_tm_rsp, 0); } else { qlt_24xx_send_task_mgmt_ctio(vha, mcmd, (uint32_t )mcmd->fc_tm_rsp); } (*((ha->tgt.tgt_ops)->free_mcmd))(mcmd); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm ) { struct qla_tgt_cmd *cmd ; long tmp ; long tmp___0 ; { cmd = prm->cmd; tmp = ldv__builtin_expect(cmd->sg_cnt == 0, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"), "i" (1620), "i" (12UL)); ldv_61568: ; goto ldv_61568; } else { } prm->sg = cmd->sg; prm->seg_cnt = pci_map_sg(((prm->tgt)->ha)->pdev, cmd->sg, cmd->sg_cnt, (int )cmd->dma_data_direction); tmp___0 = ldv__builtin_expect(prm->seg_cnt == 0, 0L); if (tmp___0 != 0L) { goto out_err; } else { } (prm->cmd)->sg_mapped = 1U; if (prm->seg_cnt > (prm->tgt)->datasegs_per_cmd) { prm->req_cnt = prm->req_cnt + (((prm->seg_cnt - (prm->tgt)->datasegs_per_cmd) + (prm->tgt)->datasegs_per_cont) + -1) / (prm->tgt)->datasegs_per_cont; } else { } ql_dbg(16384U, (prm->cmd)->vha, 57353, "seg_cnt=%d, req_cnt=%d\n", prm->seg_cnt, prm->req_cnt); return (0); out_err: ql_dbg(16384U, (prm->cmd)->vha, 57421, "qla_target(%d): PCI mapping failed: sg_cnt=%d", 0, (prm->cmd)->sg_cnt); return (-1); } } __inline static void qlt_unmap_sg(struct scsi_qla_host *vha , struct qla_tgt_cmd *cmd ) { struct qla_hw_data *ha ; long tmp ; { ha = vha->hw; tmp = ldv__builtin_expect((unsigned int )*((unsigned char *)cmd + 1256UL) == 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"), "i" (1654), "i" (12UL)); ldv_61575: ; goto ldv_61575; } else { } pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, (int )cmd->dma_data_direction); cmd->sg_mapped = 0U; return; } } static int qlt_check_reserve_free_req(struct scsi_qla_host *vha , uint32_t req_cnt ) { struct qla_hw_data *ha ; device_reg_t *reg ; uint32_t cnt ; unsigned int tmp ; long tmp___0 ; { ha = vha->hw; reg = ha->iobase; if ((uint32_t )(vha->req)->cnt < req_cnt + 2U) { tmp = readl((void const volatile *)(& reg->isp24.req_q_out)); cnt = (uint32_t )((unsigned short )tmp); ql_dbg(16384U, vha, 57354, "Request ring circled: cnt=%d, vha->->ring_index=%d, vha->req->cnt=%d, req_cnt=%d\n", cnt, (int )(vha->req)->ring_index, (int )(vha->req)->cnt, req_cnt); if ((uint32_t )(vha->req)->ring_index < cnt) { (vha->req)->cnt = (int )((uint16_t )cnt) - (int )(vha->req)->ring_index; } else { (vha->req)->cnt = (int )(vha->req)->length + ((int )((uint16_t )cnt) - (int )(vha->req)->ring_index); } } else { } tmp___0 = ldv__builtin_expect((uint32_t )(vha->req)->cnt < req_cnt + 2U, 0L); if (tmp___0 != 0L) { ql_dbg(16384U, vha, 57355, "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d\n", (int )vha->vp_idx, (int )(vha->req)->ring_index, (int )(vha->req)->cnt, req_cnt); return (-11); } else { } (vha->req)->cnt = (int )(vha->req)->cnt - (int )((uint16_t )req_cnt); return (0); } } __inline static void *qlt_get_req_pkt(struct scsi_qla_host *vha ) { { (vha->req)->ring_index = (uint16_t )((int )(vha->req)->ring_index + 1); if ((int )(vha->req)->ring_index == (int )(vha->req)->length) { (vha->req)->ring_index = 0U; (vha->req)->ring_ptr = (vha->req)->ring; } else { (vha->req)->ring_ptr = (vha->req)->ring_ptr + 1; } return ((void *)(vha->req)->ring_ptr); } } __inline static uint32_t qlt_make_handle(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; uint32_t h ; { ha = vha->hw; h = (uint32_t )ha->tgt.current_handle; ldv_61592: h = h + 1U; if (h > 1024U) { h = 1U; } else { } if ((uint32_t )ha->tgt.current_handle == h) { ql_dbg(16384U, vha, 57422, "qla_target(%d): Ran out of empty cmd slots in ha %p\n", (int )vha->vp_idx, ha); h = 0U; goto ldv_61591; } else { } if ((h == 0U || h == 3758096383U) || (unsigned long )ha->tgt.cmds[h - 1U] != (unsigned long )((struct qla_tgt_cmd *)0)) { goto ldv_61592; } else { } ldv_61591: ; if (h != 0U) { ha->tgt.current_handle = (uint16_t )h; } else { } return (h); } } static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm , struct scsi_qla_host *vha ) { uint32_t h ; struct ctio7_to_24xx *pkt ; struct qla_hw_data *ha ; struct atio_from_isp *atio ; long tmp ; __u16 tmp___0 ; { ha = vha->hw; atio = & (prm->cmd)->atio; pkt = (struct ctio7_to_24xx *)(vha->req)->ring_ptr; prm->pkt = (void *)pkt; memset((void *)pkt, 0, 64UL); pkt->entry_type = 18U; pkt->entry_count = (unsigned char )prm->req_cnt; pkt->vp_index = (uint8_t )vha->vp_idx; h = qlt_make_handle(vha); tmp = ldv__builtin_expect(h == 0U, 0L); if (tmp != 0L) { return (-11); } else { ha->tgt.cmds[h - 1U] = prm->cmd; } pkt->handle = h | 536870912U; pkt->nport_handle = (prm->cmd)->loop_id; pkt->timeout = 10U; pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; pkt->exchange_addr = atio->u.isp24.exchange_addr; pkt->u.status0.flags = (uint16_t )((int )((short )pkt->u.status0.flags) | (int )((short )((int )atio->u.isp24.attr << 9))); tmp___0 = __fswab16((int )atio->u.isp24.fcp_hdr.ox_id); pkt->u.status0.ox_id = tmp___0; pkt->u.status0.relative_offset = (unsigned int )(prm->cmd)->offset; ql_dbg(16384U, vha, 57356, "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", (int )vha->vp_idx, pkt->handle, 10, (int )pkt->u.status0.ox_id); return (0); } } static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm , struct scsi_qla_host *vha ) { int cnt ; uint32_t *dword_ptr ; int enable_64bit_addressing ; cont_a64_entry_t *cont_pkt64 ; void *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; { enable_64bit_addressing = (int )(prm->tgt)->tgt_enable_64bit_addr; goto ldv_61613; ldv_61612: tmp = qlt_get_req_pkt(vha); cont_pkt64 = (cont_a64_entry_t *)tmp; memset((void *)cont_pkt64, 0, 64UL); cont_pkt64->entry_count = 1U; cont_pkt64->sys_define = 0U; if (enable_64bit_addressing != 0) { cont_pkt64->entry_type = 10U; dword_ptr = (uint32_t *)(& cont_pkt64->dseg_0_address); } else { cont_pkt64->entry_type = 2U; dword_ptr = & ((cont_entry_t *)cont_pkt64)->dseg_0_address; } cnt = 0; goto ldv_61610; ldv_61609: tmp___0 = dword_ptr; dword_ptr = dword_ptr + 1; *tmp___0 = (unsigned int )(prm->sg)->dma_address; if (enable_64bit_addressing != 0) { tmp___1 = dword_ptr; dword_ptr = dword_ptr + 1; *tmp___1 = (unsigned int )((prm->sg)->dma_address >> 32ULL); } else { } tmp___2 = dword_ptr; dword_ptr = dword_ptr + 1; *tmp___2 = (prm->sg)->dma_length; ql_dbg(16384U, vha, 57357, "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n", (prm->sg)->dma_address >> 32ULL, (prm->sg)->dma_address & 4294967295ULL, (int )(prm->sg)->dma_length); prm->sg = sg_next(prm->sg); cnt = cnt + 1; prm->seg_cnt = prm->seg_cnt - 1; ldv_61610: ; if ((prm->tgt)->datasegs_per_cont > cnt && prm->seg_cnt != 0) { goto ldv_61609; } else { } ldv_61613: ; if (prm->seg_cnt > 0) { goto ldv_61612; } else { } return; } } static void qlt_load_data_segments(struct qla_tgt_prm *prm , struct scsi_qla_host *vha ) { int cnt ; uint32_t *dword_ptr ; int enable_64bit_addressing ; struct ctio7_to_24xx *pkt24 ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; { enable_64bit_addressing = (int )(prm->tgt)->tgt_enable_64bit_addr; pkt24 = (struct ctio7_to_24xx *)prm->pkt; ql_dbg(16384U, vha, 57358, "iocb->scsi_status=%x, iocb->flags=%x\n", (int )pkt24->u.status0.scsi_status, (int )pkt24->u.status0.flags); pkt24->u.status0.transfer_length = (unsigned int )(prm->cmd)->bufflen; dword_ptr = (uint32_t *)(& pkt24->u.status0.dseg_0_address); if (prm->seg_cnt != 0) { pkt24->dseg_count = (unsigned short )prm->seg_cnt; } else { } if (prm->seg_cnt == 0) { tmp = dword_ptr; dword_ptr = dword_ptr + 1; *tmp = 0U; *dword_ptr = 0U; return; } else { } ql_dbg(16384U, vha, 57359, "%s", (char *)"Building S/G data segments..."); cnt = 0; goto ldv_61624; ldv_61623: tmp___0 = dword_ptr; dword_ptr = dword_ptr + 1; *tmp___0 = (unsigned int )(prm->sg)->dma_address; if (enable_64bit_addressing != 0) { tmp___1 = dword_ptr; dword_ptr = dword_ptr + 1; *tmp___1 = (unsigned int )((prm->sg)->dma_address >> 32ULL); } else { } tmp___2 = dword_ptr; dword_ptr = dword_ptr + 1; *tmp___2 = (prm->sg)->dma_length; ql_dbg(16384U, vha, 57360, "S/G Segment phys_addr=%llx:%llx, len=%d\n", (prm->sg)->dma_address >> 32ULL, (prm->sg)->dma_address & 4294967295ULL, (int )(prm->sg)->dma_length); prm->sg = sg_next(prm->sg); cnt = cnt + 1; prm->seg_cnt = prm->seg_cnt - 1; ldv_61624: ; if ((prm->tgt)->datasegs_per_cmd > cnt && prm->seg_cnt != 0) { goto ldv_61623; } else { } qlt_load_cont_data_segments(prm, vha); return; } } __inline static int qlt_has_data(struct qla_tgt_cmd *cmd ) { { return (cmd->bufflen > 0); } } static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd , struct qla_tgt_prm *prm , int xmit_type , uint8_t scsi_status , uint32_t *full_req_cnt ) { struct qla_tgt *tgt ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct se_cmd *se_cmd ; long tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { tgt = cmd->tgt; vha = tgt->vha; ha = vha->hw; se_cmd = & cmd->se_cmd; tmp = ldv__builtin_expect((unsigned int )*((unsigned char *)cmd + 1256UL) != 0U, 0L); if (tmp != 0L) { ql_dbg(8192U, vha, 61460, "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%d)", (int )vha->vp_idx, cmd, se_cmd, cmd->tag); cmd->state = 4; qlt_send_term_exchange(vha, cmd, & cmd->atio, 0); return (5911); } else { } ql_dbg(16384U, vha, 57361, "qla_target(%d): tag=%u\n", (int )vha->vp_idx, cmd->tag); prm->cmd = cmd; prm->tgt = tgt; prm->rq_result = (uint16_t )scsi_status; prm->sense_buffer = (unsigned char *)(& cmd->sense_buffer); prm->sense_buffer_len = 96; prm->sg = (struct scatterlist *)0; prm->seg_cnt = -1; prm->req_cnt = 1; prm->add_status_pkt = 0; ql_dbg(16384U, vha, 57362, "rq_result=%x, xmit_type=%x\n", (int )prm->rq_result, xmit_type); tmp___0 = qlt_issue_marker(vha, 0); if (tmp___0 != 0) { return (-14); } else { } ql_dbg(16384U, vha, 57363, "CTIO start: vha(%d)\n", (int )vha->vp_idx); if (xmit_type & 1) { tmp___2 = qlt_has_data(cmd); if (tmp___2 != 0) { tmp___1 = qlt_pci_map_calc_cnt(prm); if (tmp___1 != 0) { return (-11); } else { } } else { } } else { } *full_req_cnt = (uint32_t )prm->req_cnt; if ((se_cmd->se_cmd_flags & 8192U) != 0U) { prm->residual = (int )se_cmd->residual_count; ql_dbg(16384U, vha, 57364, "Residual underflow: %d (tag %d, op %x, bufflen %d, rq_result %x)\n", prm->residual, cmd->tag, (unsigned long )se_cmd->t_task_cdb != (unsigned long )((unsigned char *)0U) ? (int )*(se_cmd->t_task_cdb) : 0, cmd->bufflen, (int )prm->rq_result); prm->rq_result = (uint16_t )((unsigned int )prm->rq_result | 2048U); } else if ((se_cmd->se_cmd_flags & 4096U) != 0U) { prm->residual = (int )se_cmd->residual_count; ql_dbg(16384U, vha, 57365, "Residual overflow: %d (tag %d, op %x, bufflen %d, rq_result %x)\n", prm->residual, cmd->tag, (unsigned long )se_cmd->t_task_cdb != (unsigned long )((unsigned char *)0U) ? (int )*(se_cmd->t_task_cdb) : 0, cmd->bufflen, (int )prm->rq_result); prm->rq_result = (uint16_t )((unsigned int )prm->rq_result | 1024U); } else { } if ((xmit_type & 2) != 0) { tmp___3 = qlt_has_data(cmd); if (tmp___3 != 0) { if (((unsigned long )prm->sense_buffer != (unsigned long )((unsigned char *)0U) && ((int )*((uint8_t const *)prm->sense_buffer) & 112) == 112) || ((ha->device_type & 134217728U) != 0U && (unsigned int )prm->rq_result != 0U)) { prm->add_status_pkt = 1; *full_req_cnt = *full_req_cnt + 1U; } else { } } else { } } else { } ql_dbg(16384U, vha, 57366, "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n", prm->req_cnt, *full_req_cnt, prm->add_status_pkt); return (0); } } __inline static int qlt_need_explicit_conf(struct qla_hw_data *ha , struct qla_tgt_cmd *cmd , int sending_sense ) { { if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { return (0); } else { } if (sending_sense != 0) { return ((int )cmd->conf_compl_supported); } else { return ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U && (unsigned int )*((unsigned char *)cmd + 1256UL) != 0U); } } } __inline static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd , int *xmit_type ) { { return; } } static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio , struct qla_tgt_prm *prm ) { uint32_t __min1 ; uint32_t __min2 ; int tmp ; int i ; int tmp___0 ; __u32 tmp___1 ; { __min1 = (uint32_t )prm->sense_buffer_len; __min2 = 24U; prm->sense_buffer_len = (int )(__min1 < __min2 ? __min1 : __min2); ctio->u.status0.flags = (uint16_t )((unsigned int )ctio->u.status0.flags | 32768U); tmp = qlt_need_explicit_conf((prm->tgt)->ha, prm->cmd, 0); if (tmp != 0) { ctio->u.status0.flags = (uint16_t )((unsigned int )ctio->u.status0.flags | 8224U); } else { } ctio->u.status0.residual = (unsigned int )prm->residual; ctio->u.status0.scsi_status = prm->rq_result; if ((unsigned long )prm->sense_buffer != (unsigned long )((unsigned char *)0U) && ((int )*((uint8_t const *)prm->sense_buffer) & 112) == 112) { tmp___0 = qlt_need_explicit_conf((prm->tgt)->ha, prm->cmd, 1); if (tmp___0 != 0) { if ((unsigned int )(prm->cmd)->se_cmd.scsi_status != 0U) { ql_dbg(16384U, (prm->cmd)->vha, 57367, "Skipping EXPLICIT_CONFORM and CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ non GOOD status\n"); goto skip_explict_conf; } else { } ctio->u.status1.flags = (uint16_t )((unsigned int )ctio->u.status1.flags | 8224U); } else { } skip_explict_conf: ctio->u.status1.flags = ctio->u.status1.flags; ctio->u.status1.flags = (uint16_t )((unsigned int )ctio->u.status1.flags | 64U); ctio->u.status1.scsi_status = (uint16_t )((unsigned int )ctio->u.status1.scsi_status | 512U); ctio->u.status1.sense_length = (unsigned short )prm->sense_buffer_len; i = 0; goto ldv_61659; ldv_61658: tmp___1 = __fswab32(*((uint32_t *)prm->sense_buffer + (unsigned long )i)); *((uint32_t *)(& ctio->u.status1.sense_data) + (unsigned long )i) = tmp___1; i = i + 1; ldv_61659: ; if (prm->sense_buffer_len / 4 > i) { goto ldv_61658; } else { } } else { ctio->u.status1.flags = ctio->u.status1.flags; ctio->u.status1.flags = (uint16_t )((unsigned int )ctio->u.status1.flags | 64U); ctio->u.status1.sense_length = 0U; memset((void *)(& ctio->u.status1.sense_data), 0, 24UL); } return; } } int qlt_xmit_response(struct qla_tgt_cmd *cmd , int xmit_type , uint8_t scsi_status ) { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct ctio7_to_24xx *pkt ; struct qla_tgt_prm prm ; uint32_t full_req_cnt ; unsigned long flags ; int res ; long tmp ; raw_spinlock_t *tmp___0 ; long tmp___1 ; long tmp___2 ; int tmp___3 ; struct ctio7_to_24xx *ctio ; void *tmp___4 ; size_t __len ; void *__ret ; struct _ddebug descriptor ; long tmp___5 ; int tmp___6 ; { vha = cmd->vha; ha = vha->hw; full_req_cnt = 0U; flags = 0UL; memset((void *)(& prm), 0, 72UL); qlt_check_srr_debug(cmd, & xmit_type); ql_dbg(16384U, cmd->vha, 57368, "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d\n", (xmit_type & 2) != 0, cmd->bufflen, cmd->sg_cnt, (unsigned int )cmd->dma_data_direction); res = qlt_pre_xmit_response(cmd, & prm, xmit_type, (int )scsi_status, & full_req_cnt); tmp = ldv__builtin_expect(res != 0, 0L); if (tmp != 0L) { if (res == 5911) { return (0); } else { } return (res); } else { } tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); res = qlt_check_reserve_free_req(vha, full_req_cnt); tmp___1 = ldv__builtin_expect(res != 0, 0L); if (tmp___1 != 0L) { goto out_unmap_unlock; } else { } res = qlt_24xx_build_ctio_pkt(& prm, vha); tmp___2 = ldv__builtin_expect(res != 0, 0L); if (tmp___2 != 0L) { goto out_unmap_unlock; } else { } pkt = (struct ctio7_to_24xx *)prm.pkt; tmp___6 = qlt_has_data(cmd); if (tmp___6 != 0 && xmit_type & 1) { pkt->u.status0.flags = (uint16_t )((unsigned int )pkt->u.status0.flags | 2U); qlt_load_data_segments(& prm, vha); if (prm.add_status_pkt == 0) { if ((xmit_type & 2) != 0) { pkt->u.status0.scsi_status = prm.rq_result; pkt->u.status0.residual = (unsigned int )prm.residual; pkt->u.status0.flags = (uint16_t )((unsigned int )pkt->u.status0.flags | 32768U); tmp___3 = qlt_need_explicit_conf(ha, cmd, 0); if (tmp___3 != 0) { pkt->u.status0.flags = (uint16_t )((unsigned int )pkt->u.status0.flags | 8224U); } else { } } else { } } else { tmp___4 = qlt_get_req_pkt(vha); ctio = (struct ctio7_to_24xx *)tmp___4; ql_dbg(16384U, vha, 57369, "Building additional status packet\n"); __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)ctio, (void const *)pkt, __len); } else { __ret = __builtin_memcpy((void *)ctio, (void const *)pkt, __len); } ctio->entry_count = 1U; ctio->dseg_count = 0U; ctio->u.status1.flags = (unsigned int )ctio->u.status1.flags & 65533U; pkt->handle = pkt->handle | 1073741824U; pkt->u.status0.flags = (uint16_t )((unsigned int )pkt->u.status0.flags | 256U); qlt_24xx_init_ctio_to_isp(ctio, & prm); descriptor.modname = "qla2xxx"; descriptor.function = "qlt_xmit_response"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"; descriptor.format = "Status CTIO7: %p\n"; descriptor.lineno = 2275U; descriptor.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___5 != 0L) { __dynamic_pr_debug(& descriptor, "Status CTIO7: %p\n", ctio); } else { } } } else { qlt_24xx_init_ctio_to_isp(pkt, & prm); } cmd->state = 3; ql_dbg(16384U, vha, 57370, "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n", pkt, (int )scsi_status); qla2x00_start_iocbs(vha, vha->req); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); out_unmap_unlock: ; if ((unsigned int )*((unsigned char *)cmd + 1256UL) != 0U) { qlt_unmap_sg(vha, cmd); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (res); } } int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd ) { struct ctio7_to_24xx *pkt ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_prm prm ; unsigned long flags ; int res ; int tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; long tmp___2 ; { vha = cmd->vha; ha = vha->hw; tgt = cmd->tgt; res = 0; memset((void *)(& prm), 0, 72UL); prm.cmd = cmd; prm.tgt = tgt; prm.sg = (struct scatterlist *)0; prm.req_cnt = 1; tmp = qlt_issue_marker(vha, 0); if (tmp != 0) { return (-5); } else { } ql_dbg(16384U, vha, 57371, "CTIO_start: vha(%d)", (int )vha->vp_idx); tmp___0 = qlt_pci_map_calc_cnt(& prm); if (tmp___0 != 0) { return (-11); } else { } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); res = qlt_check_reserve_free_req(vha, (uint32_t )prm.req_cnt); if (res != 0) { goto out_unlock_free_unmap; } else { } res = qlt_24xx_build_ctio_pkt(& prm, vha); tmp___2 = ldv__builtin_expect(res != 0, 0L); if (tmp___2 != 0L) { goto out_unlock_free_unmap; } else { } pkt = (struct ctio7_to_24xx *)prm.pkt; pkt->u.status0.flags = (uint16_t )((unsigned int )pkt->u.status0.flags | 1U); qlt_load_data_segments(& prm, vha); cmd->state = 1; qla2x00_start_iocbs(vha, vha->req); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (res); out_unlock_free_unmap: ; if ((unsigned int )*((unsigned char *)cmd + 1256UL) != 0U) { qlt_unmap_sg(vha, cmd); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (res); } } static int __qlt_send_term_exchange(struct scsi_qla_host *vha , struct qla_tgt_cmd *cmd , struct atio_from_isp *atio ) { struct ctio7_to_24xx *ctio24 ; struct qla_hw_data *ha ; request_t *pkt ; int ret ; void *tmp ; __u16 tmp___0 ; { ha = vha->hw; ret = 0; ql_dbg(16384U, vha, 57372, "Sending TERM EXCH CTIO (ha=%p)\n", ha); tmp = qla2x00_alloc_iocbs(vha, (srb_t *)0); pkt = (request_t *)tmp; if ((unsigned long )pkt == (unsigned long )((request_t *)0)) { ql_dbg(16384U, vha, 57424, "qla_target(%d): %s failed: unable to allocate request packet\n", (int )vha->vp_idx, "__qlt_send_term_exchange"); return (-12); } else { } if ((unsigned long )cmd != (unsigned long )((struct qla_tgt_cmd *)0)) { if (cmd->state <= 2) { ql_dbg(16384U, vha, 57425, "qla_target(%d): Terminating cmd %p with incorrect state %d\n", (int )vha->vp_idx, cmd, cmd->state); } else { ret = 1; } } else { } pkt->entry_count = 1U; pkt->handle = 4294967295U; ctio24 = (struct ctio7_to_24xx *)pkt; ctio24->entry_type = 18U; ctio24->nport_handle = (unsigned long )cmd != (unsigned long )((struct qla_tgt_cmd *)0) ? cmd->loop_id : 65535U; ctio24->timeout = 10U; ctio24->vp_index = (uint8_t )vha->vp_idx; ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio24->exchange_addr = atio->u.isp24.exchange_addr; ctio24->u.status1.flags = (uint16_t )((int )((short )((int )atio->u.isp24.attr << 9)) | 16448); tmp___0 = __fswab16((int )atio->u.isp24.fcp_hdr.ox_id); ctio24->u.status1.ox_id = tmp___0; ctio24->u.status1.residual = get_unaligned_le32((void const *)(& atio->u.isp24.fcp_cmnd.add_cdb) + (unsigned long )atio->u.isp24.fcp_cmnd.add_cdb_len); if (ctio24->u.status1.residual != 0U) { ctio24->u.status1.scsi_status = (uint16_t )((unsigned int )ctio24->u.status1.scsi_status | 2048U); } else { } qla2x00_start_iocbs(vha, vha->req); return (ret); } } static void qlt_send_term_exchange(struct scsi_qla_host *vha , struct qla_tgt_cmd *cmd , struct atio_from_isp *atio , int ha_locked ) { unsigned long flags ; int rc ; int tmp ; raw_spinlock_t *tmp___0 ; struct thread_info *tmp___1 ; { tmp = qlt_issue_marker(vha, ha_locked); if (tmp < 0) { return; } else { } if (ha_locked != 0) { rc = __qlt_send_term_exchange(vha, cmd, atio); goto done; } else { } tmp___0 = spinlock_check(& (vha->hw)->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); rc = __qlt_send_term_exchange(vha, cmd, atio); spin_unlock_irqrestore(& (vha->hw)->hardware_lock, flags); done: ; if (rc == 1) { if (ha_locked == 0) { tmp___1 = current_thread_info(); if (((unsigned long )tmp___1->preempt_count & 134217472UL) == 0UL) { msleep(250U); } else { } } else { } (*(((vha->hw)->tgt.tgt_ops)->free_cmd))(cmd); } else { } return; } } void qlt_free_cmd(struct qla_tgt_cmd *cmd ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect((unsigned int )*((unsigned char *)cmd + 1256UL) != 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"), "i" (2445), "i" (12UL)); ldv_61736: ; goto ldv_61736; } else { } tmp___0 = ldv__builtin_expect((unsigned int )*((unsigned char *)cmd + 1256UL) != 0U, 0L); if (tmp___0 != 0L) { kfree((void const *)cmd->sg); } else { } kmem_cache_free(qla_tgt_cmd_cachep, (void *)cmd); return; } } static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha , struct qla_tgt_cmd *cmd , void *ctio ) { struct qla_tgt_srr_ctio *sc ; struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_srr_imm *imm ; void *tmp ; int found ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct qla_tgt_srr_imm *ti ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; { ha = vha->hw; tgt = ha->tgt.qla_tgt; tgt->ctio_srr_id = tgt->ctio_srr_id + 1; ql_dbg(8192U, vha, 61465, "qla_target(%d): CTIO with SRR status received\n", (int )vha->vp_idx); if ((unsigned long )ctio == (unsigned long )((void *)0)) { ql_dbg(8192U, vha, 61525, "qla_target(%d): SRR CTIO, but ctio is NULL\n", (int )vha->vp_idx); return (-22); } else { } tmp = kzalloc(32UL, 32U); sc = (struct qla_tgt_srr_ctio *)tmp; if ((unsigned long )sc != (unsigned long )((struct qla_tgt_srr_ctio *)0)) { sc->cmd = cmd; spin_lock(& tgt->srr_lock); sc->srr_id = tgt->ctio_srr_id; list_add_tail(& sc->srr_list_entry, & tgt->srr_ctio_list); ql_dbg(8192U, vha, 61466, "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); if (tgt->imm_srr_id == tgt->ctio_srr_id) { found = 0; __mptr = (struct list_head const *)tgt->srr_imm_list.next; imm = (struct qla_tgt_srr_imm *)__mptr; goto ldv_61759; ldv_61758: ; if (imm->srr_id == sc->srr_id) { found = 1; goto ldv_61757; } else { } __mptr___0 = (struct list_head const *)imm->srr_list_entry.next; imm = (struct qla_tgt_srr_imm *)__mptr___0; ldv_61759: ; if ((unsigned long )(& imm->srr_list_entry) != (unsigned long )(& tgt->srr_imm_list)) { goto ldv_61758; } else { } ldv_61757: ; if (found != 0) { ql_dbg(8192U, vha, 61467, "Scheduling srr work\n"); schedule_work(& tgt->srr_work); } else { ql_dbg(8192U, vha, 61526, "qla_target(%d): imm_srr_id == ctio_srr_id (%d), but there is no corresponding SRR IMM, deleting CTIO SRR %p\n", (int )vha->vp_idx, tgt->ctio_srr_id, sc); list_del(& sc->srr_list_entry); spin_unlock(& tgt->srr_lock); kfree((void const *)sc); return (-22); } } else { } spin_unlock(& tgt->srr_lock); } else { ql_dbg(8192U, vha, 61527, "qla_target(%d): Unable to allocate SRR CTIO entry\n", (int )vha->vp_idx); spin_lock(& tgt->srr_lock); __mptr___1 = (struct list_head const *)tgt->srr_imm_list.next; imm = (struct qla_tgt_srr_imm *)__mptr___1; __mptr___2 = (struct list_head const *)imm->srr_list_entry.next; ti = (struct qla_tgt_srr_imm *)__mptr___2; goto ldv_61768; ldv_61767: ; if (imm->srr_id == tgt->ctio_srr_id) { ql_dbg(8192U, vha, 61468, "IMM SRR %p deleted (id %d)\n", imm, imm->srr_id); list_del(& imm->srr_list_entry); qlt_reject_free_srr_imm(vha, imm, 1); } else { } imm = ti; __mptr___3 = (struct list_head const *)ti->srr_list_entry.next; ti = (struct qla_tgt_srr_imm *)__mptr___3; ldv_61768: ; if ((unsigned long )(& imm->srr_list_entry) != (unsigned long )(& tgt->srr_imm_list)) { goto ldv_61767; } else { } spin_unlock(& tgt->srr_lock); return (-12); } return (0); } } static int qlt_term_ctio_exchange(struct scsi_qla_host *vha , void *ctio , struct qla_tgt_cmd *cmd , uint32_t status ) { int term ; struct ctio7_from_24xx *c ; { term = 0; if ((unsigned long )ctio != (unsigned long )((void *)0)) { c = (struct ctio7_from_24xx *)ctio; term = ((int )c->flags & 16384) == 0; } else { term = 1; } if (term != 0) { qlt_send_term_exchange(vha, cmd, & cmd->atio, 1); } else { } return (term); } } __inline static struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha , uint32_t handle ) { struct qla_hw_data *ha ; struct qla_tgt_cmd *cmd ; { ha = vha->hw; handle = handle - 1U; if ((unsigned long )ha->tgt.cmds[handle] != (unsigned long )((struct qla_tgt_cmd *)0)) { cmd = ha->tgt.cmds[handle]; ha->tgt.cmds[handle] = (struct qla_tgt_cmd *)0; return (cmd); } else { return ((struct qla_tgt_cmd *)0); } } } static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha , uint32_t handle , void *ctio ) { struct qla_tgt_cmd *cmd ; long tmp ; long tmp___0 ; long tmp___1 ; { cmd = (struct qla_tgt_cmd *)0; handle = handle & 2684354559U; if (handle != 0U) { tmp = ldv__builtin_expect(handle == 3758096383U, 0L); if (tmp != 0L) { ql_dbg(16384U, vha, 57373, "%s", (char *)"SKIP_HANDLE CTIO\n"); return ((struct qla_tgt_cmd *)0); } else { } tmp___0 = ldv__builtin_expect(handle > 1024U, 0L); if (tmp___0 != 0L) { ql_dbg(16384U, vha, 57426, "qla_target(%d): Wrong handle %x received\n", (int )vha->vp_idx, handle); return ((struct qla_tgt_cmd *)0); } else { } cmd = qlt_get_cmd(vha, handle); tmp___1 = ldv__builtin_expect((unsigned long )cmd == (unsigned long )((struct qla_tgt_cmd *)0), 0L); if (tmp___1 != 0L) { ql_dbg(16384U, vha, 57427, "qla_target(%d): Suspicious: unable to find the command with handle %x\n", (int )vha->vp_idx, handle); return ((struct qla_tgt_cmd *)0); } else { } } else if ((unsigned long )ctio != (unsigned long )((void *)0)) { ql_dbg(16384U, vha, 57428, "qla_target(%d): Wrong CTIO received: QLA24xx doesn\'t support NULL handles\n", (int )vha->vp_idx); return ((struct qla_tgt_cmd *)0); } else { } return (cmd); } } static void qlt_do_ctio_completion(struct scsi_qla_host *vha , uint32_t handle , uint32_t status , void *ctio ) { struct qla_hw_data *ha ; struct se_cmd *se_cmd ; struct target_core_fabric_ops *tfo ; struct qla_tgt_cmd *cmd ; int tmp ; int tmp___0 ; long tmp___1 ; int rx_status ; long tmp___2 ; long tmp___3 ; { ha = vha->hw; ql_dbg(16384U, vha, 57374, "qla_target(%d): handle(ctio %p status %#x) <- %08x\n", (int )vha->vp_idx, ctio, status, handle); if ((handle & 1073741824U) != 0U) { if (status != 1U) { ql_dbg(8192U, vha, 61469, "Intermediate CTIO received (status %x)\n", status); } else { } return; } else { } cmd = qlt_ctio_to_cmd(vha, handle, ctio); if ((unsigned long )cmd == (unsigned long )((struct qla_tgt_cmd *)0)) { return; } else { } se_cmd = & cmd->se_cmd; tfo = se_cmd->se_tfo; if ((unsigned int )*((unsigned char *)cmd + 1256UL) != 0U) { qlt_unmap_sg(vha, cmd); } else { } tmp___1 = ldv__builtin_expect(status != 1U, 0L); if (tmp___1 != 0L) { switch (status & 65535U) { case 14U: ; case 23U: ; case 2U: ; case 11U: ; case 8U: ql_dbg(8192U, vha, 61528, "qla_target(%d): CTIO with status %#x received, state %x, se_cmd %p, (LIP_RESET=e, ABORTED=2, TARGET_RESET=17, TIMEOUT=b, INVALID_RX_ID=8)\n", (int )vha->vp_idx, status, cmd->state, se_cmd); goto ldv_61805; case 41U: ; case 40U: ql_dbg(8192U, vha, 61529, "qla_target(%d): CTIO with PORT LOGGED OUT (29) or PORT UNAVAILABLE (28) status %x received (state %x, se_cmd %p)\n", (int )vha->vp_idx, status, cmd->state, se_cmd); goto ldv_61805; case 69U: ql_dbg(8192U, vha, 61530, "qla_target(%d): CTIO with SRR_RECEIVED status %x received (state %x, se_cmd %p)\n", (int )vha->vp_idx, status, cmd->state, se_cmd); tmp = qlt_prepare_srr_ctio(vha, cmd, ctio); if (tmp != 0) { goto ldv_61805; } else { return; } default: ql_dbg(8192U, vha, 61531, "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", (int )vha->vp_idx, status, cmd->state, se_cmd); goto ldv_61805; } ldv_61805: ; if (cmd->state != 1) { tmp___0 = qlt_term_ctio_exchange(vha, ctio, cmd, status); if (tmp___0 != 0) { return; } else { } } else { } } else { } if (cmd->state == 3) { ql_dbg(16384U, vha, 57375, "Command %p finished\n", cmd); } else if (cmd->state == 1) { rx_status = 0; cmd->state = 2; tmp___2 = ldv__builtin_expect(status != 1U, 0L); if (tmp___2 != 0L) { rx_status = -5; } else { cmd->write_data_transferred = 1U; } ql_dbg(16384U, vha, 57376, "Data received, context %x, rx_status %d\n", 0, rx_status); (*((ha->tgt.tgt_ops)->handle_data))(cmd); return; } else if (cmd->state == 4) { ql_dbg(8192U, vha, 61470, "Aborted command %p (tag %d) finished\n", cmd, cmd->tag); } else { ql_dbg(8192U, vha, 61532, "qla_target(%d): A command in state (%d) should not return a CTIO complete\n", (int )vha->vp_idx, cmd->state); } tmp___3 = ldv__builtin_expect(status != 1U, 0L); if (tmp___3 != 0L) { ql_dbg(8192U, vha, 61471, "Finishing failed CTIO\n"); dump_stack(); } else { } (*((ha->tgt.tgt_ops)->free_cmd))(cmd); return; } } __inline static int qlt_get_fcp_task_attr(struct scsi_qla_host *vha , uint8_t task_codes ) { int fcp_task_attr ; { switch ((int )task_codes) { case 0: fcp_task_attr = 32; goto ldv_61817; case 1: fcp_task_attr = 33; goto ldv_61817; case 2: fcp_task_attr = 34; goto ldv_61817; case 4: fcp_task_attr = 36; goto ldv_61817; case 5: fcp_task_attr = 32; goto ldv_61817; default: ql_dbg(8192U, vha, 61533, "qla_target: unknown task code %x, use ORDERED instead\n", (int )task_codes); fcp_task_attr = 34; goto ldv_61817; } ldv_61817: ; return (fcp_task_attr); } } static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha , uint8_t *s_id ) ; static void qlt_do_work(struct work_struct *work ) { struct qla_tgt_cmd *cmd ; struct work_struct const *__mptr ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_sess *sess ; struct atio_from_isp *atio ; unsigned char *cdb ; unsigned long flags ; uint32_t data_length ; int ret ; int fcp_task_attr ; int data_dir ; int bidi ; raw_spinlock_t *tmp ; uint8_t *s_id ; long tmp___0 ; int tmp___1 ; u32 tmp___2 ; __u32 tmp___3 ; raw_spinlock_t *tmp___4 ; raw_spinlock_t *tmp___5 ; { __mptr = (struct work_struct const *)work; cmd = (struct qla_tgt_cmd *)__mptr + 0xfffffffffffffbc8UL; vha = cmd->vha; ha = vha->hw; tgt = ha->tgt.qla_tgt; sess = (struct qla_tgt_sess *)0; atio = & cmd->atio; bidi = 0; if (tgt->tgt_stop != 0) { goto out_term; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); sess = (*((ha->tgt.tgt_ops)->find_sess_by_s_id))(vha, (uint8_t const *)(& atio->u.isp24.fcp_hdr.s_id)); if ((unsigned long )sess != (unsigned long )((struct qla_tgt_sess *)0)) { kref_get(& (sess->se_sess)->sess_kref); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___0 = ldv__builtin_expect((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0), 0L); if (tmp___0 != 0L) { s_id = (uint8_t *)(& atio->u.isp24.fcp_hdr.s_id); ql_dbg(8192U, vha, 61474, "qla_target(%d): Unable to find wwn login (s_id %x:%x:%x), trying to create it manually\n", (int )vha->vp_idx, (int )*s_id, (int )*(s_id + 1UL), (int )*(s_id + 2UL)); if ((unsigned int )atio->u.raw.entry_count > 1U) { ql_dbg(8192U, vha, 61475, "Dropping multy entry cmd %p\n", cmd); goto out_term; } else { } mutex_lock_nested(& ha->tgt.tgt_mutex, 0U); sess = qlt_make_local_sess(vha, s_id); mutex_unlock(& ha->tgt.tgt_mutex); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { goto out_term; } else { } } else { } cmd->sess = sess; cmd->loop_id = sess->loop_id; cmd->conf_compl_supported = sess->conf_compl_supported; cdb = (unsigned char *)(& atio->u.isp24.fcp_cmnd.cdb); cmd->tag = atio->u.isp24.exchange_addr; tmp___1 = scsilun_to_int((struct scsi_lun *)(& atio->u.isp24.fcp_cmnd.lun)); cmd->unpacked_lun = (uint32_t )tmp___1; if ((unsigned int )*((unsigned char *)atio + 43UL) != 0U && (unsigned int )*((unsigned char *)atio + 43UL) != 0U) { bidi = 1; data_dir = 1; } else if ((unsigned int )*((unsigned char *)atio + 43UL) != 0U) { data_dir = 2; } else if ((unsigned int )*((unsigned char *)atio + 43UL) != 0U) { data_dir = 1; } else { data_dir = 3; } fcp_task_attr = qlt_get_fcp_task_attr(vha, (int )atio->u.isp24.fcp_cmnd.task_attr); tmp___2 = get_unaligned_le32((void const *)(& atio->u.isp24.fcp_cmnd.add_cdb) + (unsigned long )atio->u.isp24.fcp_cmnd.add_cdb_len); tmp___3 = __fswab32(tmp___2); data_length = tmp___3; ql_dbg(16384U, vha, 57378, "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", cmd, cmd->unpacked_lun, cmd->tag); ret = (*(((vha->hw)->tgt.tgt_ops)->handle_cmd))(vha, cmd, cdb, data_length, fcp_task_attr, data_dir, bidi); if (ret != 0) { goto out_term; } else { } tmp___4 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___4); (*((ha->tgt.tgt_ops)->put_sess))(sess); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; out_term: ql_dbg(8192U, vha, 61472, "Terminating work cmd %p", cmd); tmp___5 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___5); qlt_send_term_exchange(vha, (struct qla_tgt_cmd *)0, & cmd->atio, 1); kmem_cache_free(qla_tgt_cmd_cachep, (void *)cmd); if ((unsigned long )sess != (unsigned long )((struct qla_tgt_sess *)0)) { (*((ha->tgt.tgt_ops)->put_sess))(sess); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha , struct atio_from_isp *atio ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_cmd *cmd ; long tmp ; void *tmp___0 ; size_t __len ; void *__ret ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; { ha = vha->hw; tgt = ha->tgt.qla_tgt; tmp = ldv__builtin_expect(tgt->tgt_stop != 0, 0L); if (tmp != 0L) { ql_dbg(8192U, vha, 61473, "New command while device %p is shutting down\n", tgt); return (-14); } else { } tmp___0 = kmem_cache_zalloc(qla_tgt_cmd_cachep, 32U); cmd = (struct qla_tgt_cmd *)tmp___0; if ((unsigned long )cmd == (unsigned long )((struct qla_tgt_cmd *)0)) { ql_dbg(8192U, vha, 61534, "qla_target(%d): Allocation of cmd failed\n", (int )vha->vp_idx); return (-12); } else { } INIT_LIST_HEAD(& cmd->cmd_list); __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)(& cmd->atio), (void const *)atio, __len); } else { __ret = __builtin_memcpy((void *)(& cmd->atio), (void const *)atio, __len); } cmd->state = 0; cmd->tgt = ha->tgt.qla_tgt; cmd->vha = vha; __init_work(& cmd->work, 0); __constr_expr_0.counter = 137438953408L; cmd->work.data = __constr_expr_0; lockdep_init_map(& cmd->work.lockdep_map, "(&cmd->work)", & __key, 0); INIT_LIST_HEAD(& cmd->work.entry); cmd->work.func = & qlt_do_work; queue_work(qla_tgt_wq, & cmd->work); return (0); } } static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess , uint32_t lun , int fn , void *iocb , int flags ) { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct qla_tgt_mgmt_cmd *mcmd ; int res ; uint8_t tmr_func ; void *tmp ; size_t __len ; void *__ret ; { vha = sess->vha; ha = vha->hw; tmp = mempool_alloc(qla_tgt_mgmt_cmd_mempool, 32U); mcmd = (struct qla_tgt_mgmt_cmd *)tmp; if ((unsigned long )mcmd == (unsigned long )((struct qla_tgt_mgmt_cmd *)0)) { ql_dbg(4096U, vha, 65545, "qla_target(%d): Allocation of management command failed, some commands and their data could leak\n", (int )vha->vp_idx); return (-12); } else { } memset((void *)mcmd, 0, 1152UL); mcmd->sess = sess; if ((unsigned long )iocb != (unsigned long )((void *)0)) { __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)(& mcmd->orig_iocb.imm_ntfy), (void const *)iocb, __len); } else { __ret = __builtin_memcpy((void *)(& mcmd->orig_iocb.imm_ntfy), (void const *)iocb, __len); } } else { } mcmd->tmr_func = (uint8_t )fn; mcmd->flags = (unsigned int )flags; switch (fn) { case 64: ql_dbg(4096U, vha, 65536, "qla_target(%d): CLEAR_ACA received\n", (int )(sess->vha)->vp_idx); tmr_func = 3U; goto ldv_61883; case 32: ql_dbg(4096U, vha, 65537, "qla_target(%d): TARGET_RESET received\n", (int )(sess->vha)->vp_idx); tmr_func = 6U; goto ldv_61883; case 16: ql_dbg(4096U, vha, 65538, "qla_target(%d): LUN_RESET received\n", (int )(sess->vha)->vp_idx); tmr_func = 5U; goto ldv_61883; case 4: ql_dbg(4096U, vha, 65539, "qla_target(%d): CLEAR_TS received\n", (int )(sess->vha)->vp_idx); tmr_func = 4U; goto ldv_61883; case 2: ql_dbg(4096U, vha, 65540, "qla_target(%d): ABORT_TS received\n", (int )(sess->vha)->vp_idx); tmr_func = 2U; goto ldv_61883; default: ql_dbg(4096U, vha, 65546, "qla_target(%d): Unknown task mgmt fn 0x%x\n", (int )(sess->vha)->vp_idx, fn); mempool_free((void *)mcmd, qla_tgt_mgmt_cmd_mempool); return (-38); } ldv_61883: res = (*((ha->tgt.tgt_ops)->handle_tmr))(mcmd, lun, (int )tmr_func, 0U); if (res != 0) { ql_dbg(4096U, vha, 65547, "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", (int )(sess->vha)->vp_idx, res); mempool_free((void *)mcmd, qla_tgt_mgmt_cmd_mempool); return (-14); } else { } return (0); } } static int qlt_handle_task_mgmt(struct scsi_qla_host *vha , void *iocb ) { struct atio_from_isp *a ; struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_sess *sess ; uint32_t lun ; uint32_t unpacked_lun ; int lun_size ; int fn ; int tmp ; int tmp___0 ; int tmp___1 ; { a = (struct atio_from_isp *)iocb; ha = vha->hw; tgt = ha->tgt.qla_tgt; lun = (uint32_t )a->u.isp24.fcp_cmnd.lun; lun_size = 8; fn = (int )a->u.isp24.fcp_cmnd.task_mgmt_flags; sess = (*((ha->tgt.tgt_ops)->find_sess_by_s_id))(vha, (uint8_t const *)(& a->u.isp24.fcp_hdr.s_id)); tmp = scsilun_to_int((struct scsi_lun *)(& lun)); unpacked_lun = (uint32_t )tmp; if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { ql_dbg(8192U, vha, 61476, "qla_target(%d): task mgmt fn 0x%x for non-existant session\n", (int )vha->vp_idx, fn); tmp___0 = qlt_sched_sess_work(tgt, 2, (void const *)iocb, 64U); return (tmp___0); } else { } tmp___1 = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); return (tmp___1); } } static int __qlt_abort_task(struct scsi_qla_host *vha , struct imm_ntfy_from_isp *iocb , struct qla_tgt_sess *sess ) { struct atio_from_isp *a ; struct qla_hw_data *ha ; struct qla_tgt_mgmt_cmd *mcmd ; uint32_t lun ; uint32_t unpacked_lun ; int rc ; void *tmp ; size_t __len ; void *__ret ; int tmp___0 ; { a = (struct atio_from_isp *)iocb; ha = vha->hw; tmp = mempool_alloc(qla_tgt_mgmt_cmd_mempool, 32U); mcmd = (struct qla_tgt_mgmt_cmd *)tmp; if ((unsigned long )mcmd == (unsigned long )((struct qla_tgt_mgmt_cmd *)0)) { ql_dbg(8192U, vha, 61535, "qla_target(%d): %s: Allocation of ABORT cmd failed\n", (int )vha->vp_idx, "__qlt_abort_task"); return (-12); } else { } memset((void *)mcmd, 0, 1152UL); mcmd->sess = sess; __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)(& mcmd->orig_iocb.imm_ntfy), (void const *)iocb, __len); } else { __ret = __builtin_memcpy((void *)(& mcmd->orig_iocb.imm_ntfy), (void const *)iocb, __len); } lun = (uint32_t )a->u.isp24.fcp_cmnd.lun; tmp___0 = scsilun_to_int((struct scsi_lun *)(& lun)); unpacked_lun = (uint32_t )tmp___0; rc = (*((ha->tgt.tgt_ops)->handle_tmr))(mcmd, unpacked_lun, 1, (uint32_t )iocb->u.isp2x.seq_id); if (rc != 0) { ql_dbg(8192U, vha, 61536, "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", (int )vha->vp_idx, rc); mempool_free((void *)mcmd, qla_tgt_mgmt_cmd_mempool); return (-14); } else { } return (0); } } static int qlt_abort_task(struct scsi_qla_host *vha , struct imm_ntfy_from_isp *iocb ) { struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; int loop_id ; int tmp ; int tmp___0 ; { ha = vha->hw; loop_id = (int )ha->device_type < 0 ? (int )((struct atio_from_isp *)iocb)->u.isp2x.target.extended : (int )((struct atio_from_isp *)iocb)->u.isp2x.target.id.standard; sess = (*((ha->tgt.tgt_ops)->find_sess_by_loop_id))(vha, (int )((uint16_t const )loop_id)); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { ql_dbg(8192U, vha, 61477, "qla_target(%d): task abort for unexisting session\n", (int )vha->vp_idx); tmp = qlt_sched_sess_work(ha->tgt.qla_tgt, 1, (void const *)iocb, 64U); return (tmp); } else { } tmp___0 = __qlt_abort_task(vha, iocb, sess); return (tmp___0); } } static int qlt_24xx_handle_els(struct scsi_qla_host *vha , struct imm_ntfy_from_isp *iocb ) { struct qla_hw_data *ha ; int res ; struct qla_tgt *tgt ; { ha = vha->hw; res = 0; ql_dbg(8192U, vha, 61478, "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", (int )vha->vp_idx, (uint8_t *)(& iocb->u.isp24.port_id), (int )iocb->u.isp24.status_subcode); switch ((int )iocb->u.isp24.status_subcode) { case 3: ; case 4: ; case 32: ; case 5: ; case 33: res = qlt_reset(vha, (void *)iocb, 65533); goto ldv_61934; case 80: ; case 82: tgt = ha->tgt.qla_tgt; if ((unsigned int )*((unsigned char *)tgt + 32UL) != 0U) { qlt_send_notify_ack(vha, & tgt->link_reinit_iocb, 0U, 0, 0, 0, 0, 0); tgt->link_reinit_iocb_pending = 0U; } else { } res = 1; goto ldv_61934; default: ql_dbg(8192U, vha, 61537, "qla_target(%d): Unsupported ELS command %x received\n", (int )vha->vp_idx, (int )iocb->u.isp24.status_subcode); res = qlt_reset(vha, (void *)iocb, 65533); goto ldv_61934; } ldv_61934: ; return (res); } } static int qlt_set_data_offset(struct qla_tgt_cmd *cmd , uint32_t offset ) { struct scatterlist *sg ; struct scatterlist *sgp ; struct scatterlist *sg_srr ; struct scatterlist *sg_srr_start ; size_t first_offset ; size_t rem_offset ; size_t tmp ; int i ; int sg_srr_cnt ; int bufflen ; struct _ddebug descriptor ; long tmp___0 ; struct page *tmp___1 ; void *tmp___2 ; struct page *tmp___3 ; struct page *tmp___4 ; { sg_srr_start = (struct scatterlist *)0; first_offset = 0UL; rem_offset = (size_t )offset; tmp = 0UL; bufflen = 0; ql_dbg(16384U, cmd->vha, 57379, "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, cmd->sg_cnt: %u, direction: %d\n", cmd, cmd->sg, cmd->sg_cnt, (unsigned int )cmd->dma_data_direction); descriptor.modname = "qla2xxx"; descriptor.function = "qlt_set_data_offset"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"; descriptor.format = "Rejecting non zero SRR rel_offs: %u\n"; descriptor.lineno = 3165U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "Rejecting non zero SRR rel_offs: %u\n", offset); } else { } return (-1); if ((unsigned long )cmd->sg == (unsigned long )((struct scatterlist *)0) || cmd->sg_cnt == 0) { ql_dbg(16384U, cmd->vha, 57429, "Missing cmd->sg or zero cmd->sg_cnt in qla_tgt_set_data_offset\n"); return (-22); } else { } i = 0; sg = cmd->sg; goto ldv_61957; ldv_61956: tmp___1 = sg_page___3(sg); ql_dbg(16384U, cmd->vha, 57380, "sg[%d]: %p page: %p, length: %d, offset: %d\n", i, sg, tmp___1, sg->length, sg->offset); if ((size_t )sg->length + tmp > (size_t )offset) { first_offset = rem_offset; sg_srr_start = sg; ql_dbg(16384U, cmd->vha, 57381, "Found matching sg[%d], using %p as sg_srr_start, and using first_offset: %zu\n", i, sg, first_offset); goto ldv_61955; } else { } tmp = (size_t )sg->length + tmp; rem_offset = rem_offset - (size_t )sg->length; i = i + 1; sg = sg_next(sg); ldv_61957: ; if (cmd->sg_cnt > i) { goto ldv_61956; } else { } ldv_61955: ; if ((unsigned long )sg_srr_start == (unsigned long )((struct scatterlist *)0)) { ql_dbg(16384U, cmd->vha, 57430, "Unable to locate sg_srr_start for offset: %u\n", offset); return (-22); } else { } sg_srr_cnt = cmd->sg_cnt - i; tmp___2 = kzalloc((unsigned long )sg_srr_cnt * 40UL, 208U); sg_srr = (struct scatterlist *)tmp___2; if ((unsigned long )sg_srr == (unsigned long )((struct scatterlist *)0)) { ql_dbg(16384U, cmd->vha, 57431, "Unable to allocate sgp\n"); return (-12); } else { } sg_init_table(sg_srr, (unsigned int )sg_srr_cnt); sgp = sg_srr; i = 0; sg = sg_srr_start; goto ldv_61960; ldv_61959: ; if (first_offset != 0UL) { tmp___3 = sg_page___3(sg); sg_set_page(sgp, tmp___3, sg->length - (unsigned int )first_offset, (unsigned int )first_offset); first_offset = 0UL; } else { tmp___4 = sg_page___3(sg); sg_set_page(sgp, tmp___4, sg->length, 0U); } bufflen = (int )(sgp->length + (unsigned int )bufflen); sgp = sg_next(sgp); if ((unsigned long )sgp == (unsigned long )((struct scatterlist *)0)) { goto ldv_61958; } else { } i = i + 1; sg = sg_next(sg); ldv_61960: ; if (i < sg_srr_cnt) { goto ldv_61959; } else { } ldv_61958: cmd->sg = sg_srr; cmd->sg_cnt = sg_srr_cnt; cmd->bufflen = bufflen; cmd->offset = (int )((uint32_t )cmd->offset + offset); cmd->free_sg = 1U; ql_dbg(16384U, cmd->vha, 57382, "New cmd->sg: %p\n", cmd->sg); ql_dbg(16384U, cmd->vha, 57383, "New cmd->sg_cnt: %u\n", cmd->sg_cnt); ql_dbg(16384U, cmd->vha, 57384, "New cmd->bufflen: %u\n", cmd->bufflen); ql_dbg(16384U, cmd->vha, 57385, "New cmd->offset: %u\n", cmd->offset); if (cmd->sg_cnt < 0) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"), "i" (3244), "i" (12UL)); ldv_61961: ; goto ldv_61961; } else { } if (cmd->bufflen < 0) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"), "i" (3247), "i" (12UL)); ldv_61962: ; goto ldv_61962; } else { } return (0); } } __inline static int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd , uint32_t srr_rel_offs , int *xmit_type ) { int res ; int rel_offs ; { res = 0; rel_offs = (int )(srr_rel_offs - (uint32_t )cmd->offset); ql_dbg(8192U, cmd->vha, 61479, "srr_rel_offs=%d, rel_offs=%d", srr_rel_offs, rel_offs); *xmit_type = 3; if (rel_offs < 0) { ql_dbg(8192U, cmd->vha, 61538, "qla_target(%d): SRR rel_offs (%d) < 0", (int )(cmd->vha)->vp_idx, rel_offs); res = -1; } else if (cmd->bufflen == rel_offs) { *xmit_type = 2; } else if (rel_offs > 0) { res = qlt_set_data_offset(cmd, (uint32_t )rel_offs); } else { } return (res); } } static void qlt_handle_srr(struct scsi_qla_host *vha , struct qla_tgt_srr_ctio *sctio , struct qla_tgt_srr_imm *imm ) { struct imm_ntfy_from_isp *ntfy ; struct qla_hw_data *ha ; struct qla_tgt_cmd *cmd ; struct se_cmd *se_cmd ; unsigned long flags ; int xmit_type ; int resp ; uint32_t offset ; uint16_t srr_ui ; raw_spinlock_t *tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; int tmp___2 ; int tmp___3 ; raw_spinlock_t *tmp___4 ; int tmp___5 ; raw_spinlock_t *tmp___6 ; { ntfy = & imm->imm_ntfy; ha = vha->hw; cmd = sctio->cmd; se_cmd = & cmd->se_cmd; xmit_type = 0; resp = 0; offset = ntfy->u.isp24.srr_rel_offs; srr_ui = ntfy->u.isp24.srr_ui; ql_dbg(8192U, vha, 61480, "SRR cmd %p, srr_ui %x\n", cmd, (int )srr_ui); switch ((int )srr_ui) { case 7: tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); qlt_send_notify_ack(vha, ntfy, 0U, 0, 0, 0, 0, 0); spin_unlock_irqrestore(& ha->hardware_lock, flags); xmit_type = 2; resp = 1; goto ldv_61988; case 1: ; if ((unsigned long )cmd->sg == (unsigned long )((struct scatterlist *)0) || cmd->sg_cnt == 0) { ql_dbg(8192U, vha, 61539, "Unable to process SRR_IU_DATA_IN due to missing cmd->sg, state: %d\n", cmd->state); dump_stack(); goto out_reject; } else { } if ((unsigned int )se_cmd->scsi_status != 0U) { ql_dbg(16384U, vha, 57386, "Rejecting SRR_IU_DATA_IN with non GOOD scsi_status\n"); goto out_reject; } else { } cmd->bufflen = (int )se_cmd->data_length; tmp___2 = qlt_has_data(cmd); if (tmp___2 != 0) { tmp___0 = qlt_srr_adjust_data(cmd, offset, & xmit_type); if (tmp___0 != 0) { goto out_reject; } else { } tmp___1 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___1); qlt_send_notify_ack(vha, ntfy, 0U, 0, 0, 0, 0, 0); spin_unlock_irqrestore(& ha->hardware_lock, flags); resp = 1; } else { ql_dbg(8192U, vha, 61540, "qla_target(%d): SRR for in data for cmd without them (tag %d, SCSI status %d), reject", (int )vha->vp_idx, cmd->tag, (int )cmd->se_cmd.scsi_status); goto out_reject; } goto ldv_61988; case 5: ; if ((unsigned long )cmd->sg == (unsigned long )((struct scatterlist *)0) || cmd->sg_cnt == 0) { ql_dbg(8192U, vha, 61541, "Unable to process SRR_IU_DATA_OUT due to missing cmd->sg\n"); dump_stack(); goto out_reject; } else { } if ((unsigned int )se_cmd->scsi_status != 0U) { ql_dbg(16384U, vha, 57387, "Rejecting SRR_IU_DATA_OUT with non GOOD scsi_status\n"); goto out_reject; } else { } cmd->bufflen = (int )se_cmd->data_length; tmp___5 = qlt_has_data(cmd); if (tmp___5 != 0) { tmp___3 = qlt_srr_adjust_data(cmd, offset, & xmit_type); if (tmp___3 != 0) { goto out_reject; } else { } tmp___4 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___4); qlt_send_notify_ack(vha, ntfy, 0U, 0, 0, 0, 0, 0); spin_unlock_irqrestore(& ha->hardware_lock, flags); if (xmit_type & 1) { qlt_rdy_to_xfer(cmd); } else { } } else { ql_dbg(8192U, vha, 61542, "qla_target(%d): SRR for out data for cmd without them (tag %d, SCSI status %d), reject", (int )vha->vp_idx, cmd->tag, (int )cmd->se_cmd.scsi_status); goto out_reject; } goto ldv_61988; default: ql_dbg(8192U, vha, 61543, "qla_target(%d): Unknown srr_ui value %x", (int )vha->vp_idx, (int )srr_ui); goto out_reject; } ldv_61988: ; if (resp != 0) { qlt_xmit_response(cmd, xmit_type, (int )se_cmd->scsi_status); } else { } return; out_reject: tmp___6 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___6); qlt_send_notify_ack(vha, ntfy, 0U, 0, 0, 1, 9, 0); if (cmd->state == 1) { cmd->state = 2; dump_stack(); } else { qlt_send_term_exchange(vha, cmd, & cmd->atio, 1); } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha , struct qla_tgt_srr_imm *imm , int ha_locked ) { struct qla_hw_data *ha ; unsigned long flags ; raw_spinlock_t *tmp ; { ha = vha->hw; flags = 0UL; if (ha_locked == 0) { tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); } else { } qlt_send_notify_ack(vha, & imm->imm_ntfy, 0U, 0, 0, 1, 9, 0); if (ha_locked == 0) { spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } kfree((void const *)imm); return; } } static void qlt_handle_srr_work(struct work_struct *work ) { struct qla_tgt *tgt ; struct work_struct const *__mptr ; struct scsi_qla_host *vha ; struct qla_tgt_srr_ctio *sctio ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr___0 ; struct qla_tgt_srr_imm *imm ; struct qla_tgt_srr_imm *i ; struct qla_tgt_srr_imm *ti ; struct qla_tgt_cmd *cmd ; struct se_cmd *se_cmd ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; { __mptr = (struct work_struct const *)work; tgt = (struct qla_tgt *)__mptr + 0xfffffffffffffd10UL; vha = tgt->vha; ql_dbg(8192U, vha, 61481, "Entering SRR work (tgt %p)\n", tgt); restart: tmp = spinlock_check(& tgt->srr_lock); flags = _raw_spin_lock_irqsave(tmp); __mptr___0 = (struct list_head const *)tgt->srr_ctio_list.next; sctio = (struct qla_tgt_srr_ctio *)__mptr___0; goto ldv_62045; ldv_62044: imm = (struct qla_tgt_srr_imm *)0; __mptr___1 = (struct list_head const *)tgt->srr_imm_list.next; i = (struct qla_tgt_srr_imm *)__mptr___1; __mptr___2 = (struct list_head const *)i->srr_list_entry.next; ti = (struct qla_tgt_srr_imm *)__mptr___2; goto ldv_62041; ldv_62040: ; if (i->srr_id == sctio->srr_id) { list_del(& i->srr_list_entry); if ((unsigned long )imm != (unsigned long )((struct qla_tgt_srr_imm *)0)) { ql_dbg(8192U, vha, 61544, "qla_target(%d): There must be only one IMM SRR per CTIO SRR (IMM SRR %p, id %d, CTIO %p\n", (int )vha->vp_idx, i, i->srr_id, sctio); qlt_reject_free_srr_imm(tgt->vha, i, 0); } else { imm = i; } } else { } i = ti; __mptr___3 = (struct list_head const *)ti->srr_list_entry.next; ti = (struct qla_tgt_srr_imm *)__mptr___3; ldv_62041: ; if ((unsigned long )(& i->srr_list_entry) != (unsigned long )(& tgt->srr_imm_list)) { goto ldv_62040; } else { } ql_dbg(8192U, vha, 61482, "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, sctio->srr_id); if ((unsigned long )imm == (unsigned long )((struct qla_tgt_srr_imm *)0)) { ql_dbg(8192U, vha, 61483, "Not found matching IMM for SRR CTIO (id %d)\n", sctio->srr_id); goto ldv_62043; } else { list_del(& sctio->srr_list_entry); } spin_unlock_irqrestore(& tgt->srr_lock, flags); cmd = sctio->cmd; cmd->offset = 0; if ((unsigned int )*((unsigned char *)cmd + 1256UL) != 0U) { kfree((void const *)cmd->sg); cmd->sg = (struct scatterlist *)0; cmd->free_sg = 0U; } else { } se_cmd = & cmd->se_cmd; cmd->sg_cnt = (int )se_cmd->t_data_nents; cmd->sg = se_cmd->t_data_sg; ql_dbg(8192U, vha, 61484, "SRR cmd %p (se_cmd %p, tag %d, op %x), sg_cnt=%d, offset=%d", cmd, & cmd->se_cmd, cmd->tag, (int )*(se_cmd->t_task_cdb), cmd->sg_cnt, cmd->offset); qlt_handle_srr(vha, sctio, imm); kfree((void const *)imm); kfree((void const *)sctio); goto restart; ldv_62043: __mptr___4 = (struct list_head const *)sctio->srr_list_entry.next; sctio = (struct qla_tgt_srr_ctio *)__mptr___4; ldv_62045: ; if ((unsigned long )(& sctio->srr_list_entry) != (unsigned long )(& tgt->srr_ctio_list)) { goto ldv_62044; } else { } spin_unlock_irqrestore(& tgt->srr_lock, flags); return; } } static void qlt_prepare_srr_imm(struct scsi_qla_host *vha , struct imm_ntfy_from_isp *iocb ) { struct qla_tgt_srr_imm *imm ; struct qla_hw_data *ha ; struct qla_tgt *tgt ; struct qla_tgt_srr_ctio *sctio ; void *tmp ; size_t __len ; void *__ret ; int found ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct qla_tgt_srr_ctio *ts ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; { ha = vha->hw; tgt = ha->tgt.qla_tgt; tgt->imm_srr_id = tgt->imm_srr_id + 1; ql_dbg(8192U, vha, 61485, "qla_target(%d): SRR received\n", (int )vha->vp_idx); tmp = kzalloc(88UL, 32U); imm = (struct qla_tgt_srr_imm *)tmp; if ((unsigned long )imm != (unsigned long )((struct qla_tgt_srr_imm *)0)) { __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)(& imm->imm_ntfy), (void const *)iocb, __len); } else { __ret = __builtin_memcpy((void *)(& imm->imm_ntfy), (void const *)iocb, __len); } spin_lock(& tgt->srr_lock); imm->srr_id = tgt->imm_srr_id; list_add_tail(& imm->srr_list_entry, & tgt->srr_imm_list); ql_dbg(8192U, vha, 61486, "IMM NTFY SRR %p added (id %d, ui %x)\n", imm, imm->srr_id, (int )iocb->u.isp24.srr_ui); if (tgt->imm_srr_id == tgt->ctio_srr_id) { found = 0; __mptr = (struct list_head const *)tgt->srr_ctio_list.next; sctio = (struct qla_tgt_srr_ctio *)__mptr; goto ldv_62065; ldv_62064: ; if (sctio->srr_id == imm->srr_id) { found = 1; goto ldv_62063; } else { } __mptr___0 = (struct list_head const *)sctio->srr_list_entry.next; sctio = (struct qla_tgt_srr_ctio *)__mptr___0; ldv_62065: ; if ((unsigned long )(& sctio->srr_list_entry) != (unsigned long )(& tgt->srr_ctio_list)) { goto ldv_62064; } else { } ldv_62063: ; if (found != 0) { ql_dbg(8192U, vha, 61487, "%s", (char *)"Scheduling srr work\n"); schedule_work(& tgt->srr_work); } else { ql_dbg(8192U, vha, 61488, "qla_target(%d): imm_srr_id == ctio_srr_id (%d), but there is no corresponding SRR CTIO, deleting IMM SRR %p\n", (int )vha->vp_idx, tgt->ctio_srr_id, imm); list_del(& imm->srr_list_entry); kfree((void const *)imm); spin_unlock(& tgt->srr_lock); goto out_reject; } } else { } spin_unlock(& tgt->srr_lock); } else { ql_dbg(8192U, vha, 61545, "qla_target(%d): Unable to allocate SRR IMM entry, SRR request will be rejected\n", (int )vha->vp_idx); spin_lock(& tgt->srr_lock); __mptr___1 = (struct list_head const *)tgt->srr_ctio_list.next; sctio = (struct qla_tgt_srr_ctio *)__mptr___1; __mptr___2 = (struct list_head const *)sctio->srr_list_entry.next; ts = (struct qla_tgt_srr_ctio *)__mptr___2; goto ldv_62075; ldv_62074: ; if (sctio->srr_id == tgt->imm_srr_id) { ql_dbg(8192U, vha, 61489, "CTIO SRR %p deleted (id %d)\n", sctio, sctio->srr_id); list_del(& sctio->srr_list_entry); qlt_send_term_exchange(vha, sctio->cmd, & (sctio->cmd)->atio, 1); kfree((void const *)sctio); } else { } sctio = ts; __mptr___3 = (struct list_head const *)ts->srr_list_entry.next; ts = (struct qla_tgt_srr_ctio *)__mptr___3; ldv_62075: ; if ((unsigned long )(& sctio->srr_list_entry) != (unsigned long )(& tgt->srr_ctio_list)) { goto ldv_62074; } else { } spin_unlock(& tgt->srr_lock); goto out_reject; } return; out_reject: qlt_send_notify_ack(vha, iocb, 0U, 0, 0, 1, 9, 0); return; } } static void qlt_handle_imm_notify(struct scsi_qla_host *vha , struct imm_ntfy_from_isp *iocb ) { struct qla_hw_data *ha ; uint32_t add_flags ; int send_notify_ack ; uint16_t status ; int tmp ; struct qla_tgt *tgt ; size_t __len ; void *__ret ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { ha = vha->hw; add_flags = 0U; send_notify_ack = 1; status = iocb->u.isp2x.status; switch ((int )status) { case 14: ql_dbg(8192U, vha, 61490, "qla_target(%d): LIP reset (loop %#x), subcode %x\n", (int )vha->vp_idx, (int )iocb->u.isp24.nport_handle, (int )iocb->u.isp24.status_subcode); tmp = qlt_reset(vha, (void *)iocb, 65534); if (tmp == 0) { send_notify_ack = 0; } else { } goto ldv_62086; case 15: tgt = ha->tgt.qla_tgt; ql_dbg(8192U, vha, 61491, "qla_target(%d): LINK REINIT (loop %#x, subcode %x)\n", (int )vha->vp_idx, (int )iocb->u.isp24.nport_handle, (int )iocb->u.isp24.status_subcode); if ((unsigned int )*((unsigned char *)tgt + 32UL) != 0U) { qlt_send_notify_ack(vha, & tgt->link_reinit_iocb, 0U, 0, 0, 0, 0, 0); } else { } __len = 64UL; if (__len > 63UL) { __ret = __memcpy((void *)(& tgt->link_reinit_iocb), (void const *)iocb, __len); } else { __ret = __builtin_memcpy((void *)(& tgt->link_reinit_iocb), (void const *)iocb, __len); } tgt->link_reinit_iocb_pending = 1U; send_notify_ack = 0; goto ldv_62086; case 41: ql_dbg(8192U, vha, 61492, "qla_target(%d): Port logout (loop %#x, subcode %x)\n", (int )vha->vp_idx, (int )iocb->u.isp24.nport_handle, (int )iocb->u.isp24.status_subcode); tmp___0 = qlt_reset(vha, (void *)iocb, 65533); if (tmp___0 == 0) { send_notify_ack = 0; } else { } goto ldv_62086; case 45: ql_dbg(8192U, vha, 61493, "qla_target(%d): Global TPRLO (%x)\n", (int )vha->vp_idx, (int )status); tmp___1 = qlt_reset(vha, (void *)iocb, 65532); if (tmp___1 == 0) { send_notify_ack = 0; } else { } goto ldv_62086; case 42: ql_dbg(8192U, vha, 61494, "qla_target(%d): Port config changed (%x)\n", (int )vha->vp_idx, (int )status); tmp___2 = qlt_reset(vha, (void *)iocb, 65534); if (tmp___2 == 0) { send_notify_ack = 0; } else { } goto ldv_62086; case 46: ql_dbg(8192U, vha, 61546, "qla_target(%d): Link failure detected\n", (int )vha->vp_idx); tmp___3 = qlt_reset(vha, (void *)iocb, 65532); if (tmp___3 == 0) { send_notify_ack = 0; } else { } goto ldv_62086; case 22: ql_dbg(8192U, vha, 61547, "qla_target(%d): Cannot provide requested capability (IOCB overflowed the immediate notify resource count)\n", (int )vha->vp_idx); goto ldv_62086; case 32: ql_dbg(8192U, vha, 61495, "qla_target(%d): Abort Task (S %08x I %#x -> L %#x)\n", (int )vha->vp_idx, (int )iocb->u.isp2x.seq_id, (int )ha->device_type < 0 ? (int )((struct atio_from_isp *)iocb)->u.isp2x.target.extended : (int )((struct atio_from_isp *)iocb)->u.isp2x.target.id.standard, (int )iocb->u.isp2x.lun); tmp___4 = qlt_abort_task(vha, iocb); if (tmp___4 == 0) { send_notify_ack = 0; } else { } goto ldv_62086; case 52: ql_dbg(8192U, vha, 61548, "qla_target(%d): Out of resources, host %ld\n", (int )vha->vp_idx, vha->host_no); goto ldv_62086; case 54: ql_dbg(8192U, vha, 61496, "qla_target(%d): Immediate notify task %x\n", (int )vha->vp_idx, (int )iocb->u.isp2x.task_flags); tmp___5 = qlt_handle_task_mgmt(vha, (void *)iocb); if (tmp___5 == 0) { send_notify_ack = 0; } else { } goto ldv_62086; case 70: tmp___6 = qlt_24xx_handle_els(vha, iocb); if (tmp___6 == 0) { send_notify_ack = 0; } else { } goto ldv_62086; case 69: qlt_prepare_srr_imm(vha, iocb); send_notify_ack = 0; goto ldv_62086; default: ql_dbg(8192U, vha, 61549, "qla_target(%d): Received unknown immediate notify status %x\n", (int )vha->vp_idx, (int )status); goto ldv_62086; } ldv_62086: ; if (send_notify_ack != 0) { qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); } else { } return; } } static void qlt_send_busy(struct scsi_qla_host *vha , struct atio_from_isp *atio , uint16_t status ) { struct ctio7_to_24xx *ctio24 ; struct qla_hw_data *ha ; request_t *pkt ; struct qla_tgt_sess *sess ; void *tmp ; __u16 tmp___0 ; { ha = vha->hw; sess = (struct qla_tgt_sess *)0; sess = (*((ha->tgt.tgt_ops)->find_sess_by_s_id))(vha, (uint8_t const *)(& atio->u.isp24.fcp_hdr.s_id)); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { qlt_send_term_exchange(vha, (struct qla_tgt_cmd *)0, atio, 1); return; } else { } tmp = qla2x00_alloc_iocbs(vha, (srb_t *)0); pkt = (request_t *)tmp; if ((unsigned long )pkt == (unsigned long )((request_t *)0)) { ql_dbg(8192U, vha, 61550, "qla_target(%d): %s failed: unable to allocate request packet", (int )vha->vp_idx, "qlt_send_busy"); return; } else { } pkt->entry_count = 1U; pkt->handle = 4294967295U; ctio24 = (struct ctio7_to_24xx *)pkt; ctio24->entry_type = 18U; ctio24->nport_handle = sess->loop_id; ctio24->timeout = 10U; ctio24->vp_index = (uint8_t )vha->vp_idx; ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio24->exchange_addr = atio->u.isp24.exchange_addr; ctio24->u.status1.flags = (uint16_t )((int )((short )((int )atio->u.isp24.attr << 9)) | -32448); tmp___0 = __fswab16((int )atio->u.isp24.fcp_hdr.ox_id); ctio24->u.status1.ox_id = tmp___0; ctio24->u.status1.scsi_status = status; ctio24->u.status1.residual = get_unaligned_le32((void const *)(& atio->u.isp24.fcp_cmnd.add_cdb) + (unsigned long )atio->u.isp24.fcp_cmnd.add_cdb_len); if (ctio24->u.status1.residual != 0U) { ctio24->u.status1.scsi_status = (uint16_t )((unsigned int )ctio24->u.status1.scsi_status | 2048U); } else { } qla2x00_start_iocbs(vha, vha->req); return; } } static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha , struct atio_from_isp *atio ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; int rc ; long tmp ; u32 tmp___0 ; __u32 tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; { ha = vha->hw; tgt = ha->tgt.qla_tgt; tmp = ldv__builtin_expect((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0), 0L); if (tmp != 0L) { ql_dbg(8192U, vha, 61497, "ATIO pkt, but no tgt (ha %p)", ha); return; } else { } ql_dbg(16384U, vha, 57388, "qla_target(%d): ATIO pkt %p: type %02x count %02x", (int )vha->vp_idx, atio, (int )atio->u.raw.entry_type, (int )atio->u.raw.entry_count); tgt->irq_cmd_count = tgt->irq_cmd_count + 1; switch ((int )atio->u.raw.entry_type) { case 6: tmp___0 = get_unaligned_le32((void const *)(& atio->u.isp24.fcp_cmnd.add_cdb) + (unsigned long )atio->u.isp24.fcp_cmnd.add_cdb_len); tmp___1 = __fswab32(tmp___0); ql_dbg(16384U, vha, 57389, "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n", (int )vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, (int )atio->u.isp24.fcp_cmnd.rddata, (int )atio->u.isp24.fcp_cmnd.wrdata, (int )atio->u.isp24.fcp_cmnd.add_cdb_len, tmp___1, (int )atio->u.isp24.fcp_hdr.s_id[0], (int )atio->u.isp24.fcp_hdr.s_id[1], (int )atio->u.isp24.fcp_hdr.s_id[2]); tmp___2 = ldv__builtin_expect(atio->u.isp24.exchange_addr == 4294967295U, 0L); if (tmp___2 != 0L) { ql_dbg(16384U, vha, 57432, "qla_target(%d): ATIO_TYPE7 received with UNKNOWN exchange address, sending QUEUE_FULL\n", (int )vha->vp_idx); qlt_send_busy(vha, atio, 40); goto ldv_62121; } else { } tmp___3 = ldv__builtin_expect((unsigned int )atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0U, 1L); if (tmp___3 != 0L) { rc = qlt_handle_cmd_for_atio(vha, atio); } else { rc = qlt_handle_task_mgmt(vha, (void *)atio); } tmp___4 = ldv__builtin_expect(rc != 0, 0L); if (tmp___4 != 0L) { if (rc == -3) { qlt_send_busy(vha, atio, 8); } else if (tgt->tgt_stop != 0) { ql_dbg(16384U, vha, 57433, "qla_target: Unable to send command to target for req, ignoring.\n"); } else { ql_dbg(16384U, vha, 57434, "qla_target(%d): Unable to send command to target, sending BUSY status.\n", (int )vha->vp_idx); qlt_send_busy(vha, atio, 8); } } else { } goto ldv_62121; case 13: tmp___5 = ldv__builtin_expect((unsigned int )atio->u.isp2x.entry_status != 0U, 0L); if (tmp___5 != 0L) { ql_dbg(16384U, vha, 57435, "qla_target(%d): Received ATIO packet %x with error status %x\n", (int )vha->vp_idx, (int )atio->u.raw.entry_type, (int )atio->u.isp2x.entry_status); goto ldv_62121; } else { } ql_dbg(16384U, vha, 57390, "%s", (char *)"IMMED_NOTIFY ATIO"); qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); goto ldv_62121; default: ql_dbg(16384U, vha, 57436, "qla_target(%d): Received unknown ATIO atio type %x\n", (int )vha->vp_idx, (int )atio->u.raw.entry_type); goto ldv_62121; } ldv_62121: tgt->irq_cmd_count = tgt->irq_cmd_count - 1; return; } } static void qlt_response_pkt(struct scsi_qla_host *vha , response_t *pkt ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; long tmp ; struct ctio7_from_24xx *entry ; struct atio_from_isp *atio ; int rc ; long tmp___0 ; struct ctio_to_2xxx *entry___0 ; struct ctio_to_2xxx *entry___1 ; struct nack_to_isp *entry___2 ; struct abts_resp_from_24xx_fw *entry___3 ; { ha = vha->hw; tgt = ha->tgt.qla_tgt; tmp = ldv__builtin_expect((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0), 0L); if (tmp != 0L) { ql_dbg(16384U, vha, 57437, "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n", (int )vha->vp_idx, (int )pkt->entry_type, ha); return; } else { } ql_dbg(16384U, vha, 57391, "qla_target(%d): response pkt %p: T %02x C %02x S %02x handle %#x\n", (int )vha->vp_idx, pkt, (int )pkt->entry_type, (int )pkt->entry_count, (int )pkt->entry_status, pkt->handle); tgt->irq_cmd_count = tgt->irq_cmd_count + 1; switch ((int )pkt->entry_type) { case 18: entry = (struct ctio7_from_24xx *)pkt; ql_dbg(16384U, vha, 57392, "CTIO_TYPE7: instance %d\n", (int )vha->vp_idx); qlt_do_ctio_completion(vha, entry->handle, (uint32_t )((int )entry->status | ((int )pkt->entry_status << 16)), (void *)entry); goto ldv_62132; case 22: atio = (struct atio_from_isp *)pkt; ql_dbg(16384U, vha, 57393, "ACCEPT_TGT_IO instance %d status %04x lun %04x read/write %d data_length %04x target_id %02x rx_id %04x\n ", (int )vha->vp_idx, (int )atio->u.isp2x.status, (int )atio->u.isp2x.lun, (int )atio->u.isp2x.execution_codes, atio->u.isp2x.data_length, (int )ha->device_type < 0 ? (int )atio->u.isp2x.target.extended : (int )atio->u.isp2x.target.id.standard, (int )atio->u.isp2x.rx_id); if ((unsigned int )atio->u.isp2x.status != 61U) { ql_dbg(16384U, vha, 57438, "qla_target(%d): ATIO with error status %x received\n", (int )vha->vp_idx, (int )atio->u.isp2x.status); goto ldv_62132; } else { } ql_dbg(16384U, vha, 57394, "FCP CDB: 0x%02x, sizeof(cdb): %lu", (int )atio->u.isp2x.cdb[0], 16UL); rc = qlt_handle_cmd_for_atio(vha, atio); tmp___0 = ldv__builtin_expect(rc != 0, 0L); if (tmp___0 != 0L) { if (rc == -3) { qlt_send_busy(vha, atio, 0); } else if (tgt->tgt_stop != 0) { ql_dbg(16384U, vha, 57439, "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); qlt_send_term_exchange(vha, (struct qla_tgt_cmd *)0, atio, 1); } else { ql_dbg(16384U, vha, 57440, "qla_target(%d): Unable to send command to target, sending BUSY status\n", (int )vha->vp_idx); qlt_send_busy(vha, atio, 0); } } else { } goto ldv_62132; case 23: entry___0 = (struct ctio_to_2xxx *)pkt; ql_dbg(16384U, vha, 57395, "CONTINUE_TGT_IO: instance %d\n", (int )vha->vp_idx); qlt_do_ctio_completion(vha, entry___0->handle, (uint32_t )((int )entry___0->status | ((int )pkt->entry_status << 16)), (void *)entry___0); goto ldv_62132; case 31: entry___1 = (struct ctio_to_2xxx *)pkt; ql_dbg(16384U, vha, 57396, "CTIO_A64: instance %d\n", (int )vha->vp_idx); qlt_do_ctio_completion(vha, entry___1->handle, (uint32_t )((int )entry___1->status | ((int )pkt->entry_status << 16)), (void *)entry___1); goto ldv_62132; case 13: ql_dbg(16384U, vha, 57397, "%s", (char *)"IMMED_NOTIFY\n"); qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); goto ldv_62132; case 14: ; if (tgt->notify_ack_expected > 0) { entry___2 = (struct nack_to_isp *)pkt; ql_dbg(16384U, vha, 57398, "NOTIFY_ACK seq %08x status %x\n", (int )entry___2->u.isp2x.seq_id, (int )entry___2->u.isp2x.status); tgt->notify_ack_expected = tgt->notify_ack_expected - 1; if ((unsigned int )entry___2->u.isp2x.status != 1U) { ql_dbg(16384U, vha, 57441, "qla_target(%d): NOTIFY_ACK failed %x\n", (int )vha->vp_idx, (int )entry___2->u.isp2x.status); } else { } } else { ql_dbg(16384U, vha, 57442, "qla_target(%d): Unexpected NOTIFY_ACK received\n", (int )vha->vp_idx); } goto ldv_62132; case 84: ql_dbg(16384U, vha, 57399, "ABTS_RECV_24XX: instance %d\n", (int )vha->vp_idx); qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); goto ldv_62132; case 85: ; if (tgt->abts_resp_expected > 0) { entry___3 = (struct abts_resp_from_24xx_fw *)pkt; ql_dbg(16384U, vha, 57400, "ABTS_RESP_24XX: compl_status %x\n", (int )entry___3->compl_status); tgt->abts_resp_expected = tgt->abts_resp_expected - 1; if ((unsigned int )entry___3->compl_status != 0U) { if (entry___3->error_subcode1 == 30U && entry___3->error_subcode2 == 0U) { qlt_24xx_retry_term_exchange(vha, entry___3); } else { ql_dbg(16384U, vha, 57443, "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)", (int )vha->vp_idx, (int )entry___3->compl_status, entry___3->error_subcode1, entry___3->error_subcode2); } } else { } } else { ql_dbg(16384U, vha, 57444, "qla_target(%d): Unexpected ABTS_RESP_24XX received\n", (int )vha->vp_idx); } goto ldv_62132; default: ql_dbg(16384U, vha, 57445, "qla_target(%d): Received unknown response pkt type %x\n", (int )vha->vp_idx, (int )pkt->entry_type); goto ldv_62132; } ldv_62132: tgt->irq_cmd_count = tgt->irq_cmd_count - 1; return; } } void qlt_async_event(uint16_t code , struct scsi_qla_host *vha , uint16_t *mailbox ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; int login_code ; int tmp ; long tmp___0 ; { ha = vha->hw; tgt = ha->tgt.qla_tgt; tmp = atomic_read((atomic_t const *)(& vha->loop_state)); ql_dbg(16384U, vha, 57401, "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n", vha->host_no, tmp, (int )vha->flags.init_done, (int )ha->operating_mode, (int )ha->current_topology); if ((unsigned long )ha->tgt.tgt_ops == (unsigned long )((struct qla_tgt_func_tmpl *)0)) { return; } else { } tmp___0 = ldv__builtin_expect((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0), 0L); if (tmp___0 != 0L) { ql_dbg(16384U, vha, 57402, "ASYNC EVENT %#x, but no tgt (ha %p)\n", (int )code, ha); return; } else { } if (((unsigned int )code == 32816U || (unsigned int )code == 32822U) && (int )ha->device_type & 1) { return; } else { } tgt->irq_cmd_count = tgt->irq_cmd_count + 1; switch ((int )code) { case 32769: ; case 32770: ; case 32771: ; case 32772: ql_dbg(8192U, vha, 61498, "qla_target(%d): System error async event %#x occurred", (int )vha->vp_idx, (int )code); goto ldv_62159; case 32773: set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); goto ldv_62159; case 32785: ql_dbg(8192U, vha, 61499, "qla_target(%d): Async LOOP_UP occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", (int )vha->vp_idx, (int )*mailbox, (int )*(mailbox + 1UL), (int )*(mailbox + 2UL), (int )*(mailbox + 3UL)); if ((unsigned int )*((unsigned char *)tgt + 32UL) != 0U) { qlt_send_notify_ack(vha, & tgt->link_reinit_iocb, 0U, 0, 0, 0, 0, 0); tgt->link_reinit_iocb_pending = 0U; } else { } goto ldv_62159; case 32784: ; case 32786: ; case 32787: ; case 32789: ql_dbg(8192U, vha, 61500, "qla_target(%d): Async event %#x occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", (int )vha->vp_idx, (int )code, (int )*mailbox, (int )*(mailbox + 1UL), (int )*(mailbox + 2UL), (int )*(mailbox + 3UL)); goto ldv_62159; case 32788: ql_dbg(8192U, vha, 61501, "qla_target(%d): Port update async event %#x occurred: updating the ports database (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", (int )vha->vp_idx, (int )code, (int )*mailbox, (int )*(mailbox + 1UL), (int )*(mailbox + 2UL), (int )*(mailbox + 3UL)); login_code = (int )*(mailbox + 2UL); if (login_code == 4) { ql_dbg(8192U, vha, 61502, "Async MB 2: Got PLOGI Complete\n"); } else if (login_code == 7) { ql_dbg(8192U, vha, 61503, "Async MB 2: Port Logged Out\n"); } else { } goto ldv_62159; default: ql_dbg(8192U, vha, 61504, "qla_target(%d): Async event %#x occurred: ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", (int )vha->vp_idx, (int )code, (int )*mailbox, (int )*(mailbox + 1UL), (int )*(mailbox + 2UL), (int )*(mailbox + 3UL)); goto ldv_62159; } ldv_62159: tgt->irq_cmd_count = tgt->irq_cmd_count - 1; return; } } static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha , uint16_t loop_id ) { fc_port_t *fcport ; int rc ; void *tmp ; { tmp = kzalloc(128UL, 208U); fcport = (fc_port_t *)tmp; if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { ql_dbg(8192U, vha, 61551, "qla_target(%d): Allocation of tmp FC port failed", (int )vha->vp_idx); return ((fc_port_t *)0); } else { } ql_dbg(8192U, vha, 61505, "loop_id %d", (int )loop_id); fcport->loop_id = loop_id; rc = qla2x00_get_port_database(vha, fcport, 0); if (rc != 0) { ql_dbg(8192U, vha, 61552, "qla_target(%d): Failed to retrieve fcport information -- get_port_database() returned %x (loop_id=0x%04x)", (int )vha->vp_idx, rc, (int )loop_id); kfree((void const *)fcport); return ((fc_port_t *)0); } else { } return (fcport); } } static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha , uint8_t *s_id ) { struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; fc_port_t *fcport ; int rc ; int global_resets ; uint16_t loop_id ; int tmp ; int tmp___0 ; { ha = vha->hw; sess = (struct qla_tgt_sess *)0; fcport = (fc_port_t *)0; loop_id = 0U; retry: global_resets = atomic_read((atomic_t const *)(& (ha->tgt.qla_tgt)->tgt_global_resets_count)); rc = qla24xx_get_loop_id(vha, (uint8_t const *)s_id, & loop_id); if (rc != 0) { if ((unsigned int )*s_id == 255U && (unsigned int )*(s_id + 1UL) == 252U) { ql_dbg(8192U, vha, 61506, "Unable to find initiator with S_ID %x:%x:%x", (int )*s_id, (int )*(s_id + 1UL), (int )*(s_id + 2UL)); } else { ql_dbg(8192U, vha, 61553, "qla_target(%d): Unable to find initiator with S_ID %x:%x:%x", (int )vha->vp_idx, (int )*s_id, (int )*(s_id + 1UL), (int )*(s_id + 2UL)); } return ((struct qla_tgt_sess *)0); } else { } fcport = qlt_get_port_database(vha, (int )loop_id); if ((unsigned long )fcport == (unsigned long )((fc_port_t *)0)) { return ((struct qla_tgt_sess *)0); } else { } tmp___0 = atomic_read((atomic_t const *)(& (ha->tgt.qla_tgt)->tgt_global_resets_count)); if (tmp___0 != global_resets) { tmp = atomic_read((atomic_t const *)(& (ha->tgt.qla_tgt)->tgt_global_resets_count)); ql_dbg(8192U, vha, 61507, "qla_target(%d): global reset during session discovery (counter was %d, new %d), retrying", (int )vha->vp_idx, global_resets, tmp); goto retry; } else { } sess = qlt_create_sess(vha, fcport, 1); kfree((void const *)fcport); return (sess); } } static void qlt_abort_work(struct qla_tgt *tgt , struct qla_tgt_sess_work_param *prm ) { struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; unsigned long flags ; uint32_t be_s_id ; uint8_t s_id[3U] ; int rc ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; { vha = tgt->vha; ha = vha->hw; sess = (struct qla_tgt_sess *)0; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if (tgt->tgt_stop != 0) { goto out_term; } else { } s_id[0] = prm->ldv_60976.abts.fcp_hdr_le.s_id[2]; s_id[1] = prm->ldv_60976.abts.fcp_hdr_le.s_id[1]; s_id[2] = prm->ldv_60976.abts.fcp_hdr_le.s_id[0]; sess = (*((ha->tgt.tgt_ops)->find_sess_by_s_id))(vha, (uint8_t const *)(& be_s_id)); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { spin_unlock_irqrestore(& ha->hardware_lock, flags); mutex_lock_nested(& ha->tgt.tgt_mutex, 0U); sess = qlt_make_local_sess(vha, (uint8_t *)(& s_id)); mutex_unlock(& ha->tgt.tgt_mutex); tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { goto out_term; } else { } } else { kref_get(& (sess->se_sess)->sess_kref); } if (tgt->tgt_stop != 0) { goto out_term; } else { } rc = __qlt_24xx_handle_abts(vha, & prm->ldv_60976.abts, sess); if (rc != 0) { goto out_term; } else { } (*((ha->tgt.tgt_ops)->put_sess))(sess); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; out_term: qlt_24xx_send_abts_resp(vha, & prm->ldv_60976.abts, 4U, 0); if ((unsigned long )sess != (unsigned long )((struct qla_tgt_sess *)0)) { (*((ha->tgt.tgt_ops)->put_sess))(sess); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qlt_tmr_work(struct qla_tgt *tgt , struct qla_tgt_sess_work_param *prm ) { struct atio_from_isp *a ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct qla_tgt_sess *sess ; unsigned long flags ; uint8_t *s_id ; int rc ; uint32_t lun ; uint32_t unpacked_lun ; int lun_size ; int fn ; void *iocb ; raw_spinlock_t *tmp ; raw_spinlock_t *tmp___0 ; int tmp___1 ; { a = & prm->ldv_60976.tm_iocb2; vha = tgt->vha; ha = vha->hw; sess = (struct qla_tgt_sess *)0; s_id = (uint8_t *)0U; tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if (tgt->tgt_stop != 0) { goto out_term; } else { } s_id = (uint8_t *)(& prm->ldv_60976.tm_iocb2.u.isp24.fcp_hdr.s_id); sess = (*((ha->tgt.tgt_ops)->find_sess_by_s_id))(vha, (uint8_t const *)s_id); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { spin_unlock_irqrestore(& ha->hardware_lock, flags); mutex_lock_nested(& ha->tgt.tgt_mutex, 0U); sess = qlt_make_local_sess(vha, s_id); mutex_unlock(& ha->tgt.tgt_mutex); tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); if ((unsigned long )sess == (unsigned long )((struct qla_tgt_sess *)0)) { goto out_term; } else { } } else { kref_get(& (sess->se_sess)->sess_kref); } iocb = (void *)a; lun = (uint32_t )a->u.isp24.fcp_cmnd.lun; lun_size = 4; fn = (int )a->u.isp24.fcp_cmnd.task_mgmt_flags; tmp___1 = scsilun_to_int((struct scsi_lun *)(& lun)); unpacked_lun = (uint32_t )tmp___1; rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); if (rc != 0) { goto out_term; } else { } (*((ha->tgt.tgt_ops)->put_sess))(sess); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; out_term: qlt_send_term_exchange(vha, (struct qla_tgt_cmd *)0, & prm->ldv_60976.tm_iocb2, 1); if ((unsigned long )sess != (unsigned long )((struct qla_tgt_sess *)0)) { (*((ha->tgt.tgt_ops)->put_sess))(sess); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } static void qlt_sess_work_fn(struct work_struct *work ) { struct qla_tgt *tgt ; struct work_struct const *__mptr ; struct scsi_qla_host *vha ; unsigned long flags ; raw_spinlock_t *tmp ; struct qla_tgt_sess_work_param *prm ; struct list_head const *__mptr___0 ; raw_spinlock_t *tmp___0 ; int tmp___1 ; { __mptr = (struct work_struct const *)work; tgt = (struct qla_tgt *)__mptr + 0xfffffffffffffe78UL; vha = tgt->vha; ql_dbg(8192U, vha, 61440, "Sess work (tgt %p)", tgt); tmp = spinlock_check(& tgt->sess_work_lock); flags = _raw_spin_lock_irqsave(tmp); goto ldv_62249; ldv_62248: __mptr___0 = (struct list_head const *)tgt->sess_works_list.next; prm = (struct qla_tgt_sess_work_param *)__mptr___0; list_del(& prm->sess_works_list_entry); spin_unlock_irqrestore(& tgt->sess_work_lock, flags); switch (prm->type) { case 1: qlt_abort_work(tgt, prm); goto ldv_62241; case 2: qlt_tmr_work(tgt, prm); goto ldv_62241; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"), "i" (4416), "i" (12UL)); ldv_62244: ; goto ldv_62244; goto ldv_62241; } ldv_62241: tmp___0 = spinlock_check(& tgt->sess_work_lock); flags = _raw_spin_lock_irqsave(tmp___0); kfree((void const *)prm); ldv_62249: tmp___1 = list_empty((struct list_head const *)(& tgt->sess_works_list)); if (tmp___1 == 0) { goto ldv_62248; } else { } spin_unlock_irqrestore(& tgt->sess_work_lock, flags); return; } } int qlt_add_target(struct qla_hw_data *ha , struct scsi_qla_host *base_vha ) { struct qla_tgt *tgt ; long tmp ; long tmp___0 ; void *tmp___1 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; atomic_long_t __constr_expr_0 ; struct lock_class_key __key___1 ; struct lock_class_key __key___2 ; struct lock_class_key __key___3 ; atomic_long_t __constr_expr_1 ; struct lock_class_key __key___4 ; struct lock_class_key __key___5 ; atomic_long_t __constr_expr_2 ; int _min1 ; int _min2 ; { if (ql2x_ini_mode == 2) { return (0); } else { } if ((unsigned int )ha->tgt.atio_q_length == 0U) { ql_log(1U, base_vha, 57456, "This adapter does not support target mode.\n"); return (0); } else { } ql_dbg(16384U, base_vha, 57403, "Registering target for host %ld(%p)", base_vha->host_no, ha); tmp = ldv__builtin_expect((unsigned long )ha->tgt.qla_tgt != (unsigned long )((struct qla_tgt *)0), 0L); if (tmp != 0L) { goto _L; } else { tmp___0 = ldv__builtin_expect((unsigned long )ha->tgt.tgt_ops != (unsigned long )((struct qla_tgt_func_tmpl *)0), 0L); if (tmp___0 != 0L) { _L: /* CIL Label */ __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"), "i" (4444), "i" (12UL)); ldv_62256: ; goto ldv_62256; } else { } } tmp___1 = kzalloc(856UL, 208U); tgt = (struct qla_tgt *)tmp___1; if ((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0)) { ql_dbg(16384U, base_vha, 57446, "Unable to allocate struct qla_tgt\n"); return (-12); } else { } if (((int )((base_vha->host)->hostt)->supported_mode & 2) == 0) { ((base_vha->host)->hostt)->supported_mode = (unsigned char )((unsigned int )((base_vha->host)->hostt)->supported_mode | 2U); } else { } tgt->ha = ha; tgt->vha = base_vha; __init_waitqueue_head(& tgt->waitQ, "&tgt->waitQ", & __key); INIT_LIST_HEAD(& tgt->sess_list); INIT_LIST_HEAD(& tgt->del_sess_list); __init_work(& tgt->sess_del_work.work, 0); __constr_expr_0.counter = 137438953408L; tgt->sess_del_work.work.data = __constr_expr_0; lockdep_init_map(& tgt->sess_del_work.work.lockdep_map, "(&(&tgt->sess_del_work)->work)", & __key___0, 0); INIT_LIST_HEAD(& tgt->sess_del_work.work.entry); tgt->sess_del_work.work.func = (void (*)(struct work_struct * ))(& qlt_del_sess_work_fn); init_timer_key(& tgt->sess_del_work.timer, 2U, "(&(&tgt->sess_del_work)->timer)", & __key___1); tgt->sess_del_work.timer.function = & delayed_work_timer_fn; tgt->sess_del_work.timer.data = (unsigned long )(& tgt->sess_del_work); spinlock_check(& tgt->sess_work_lock); __raw_spin_lock_init(& tgt->sess_work_lock.ldv_6105.rlock, "&(&tgt->sess_work_lock)->rlock", & __key___2); __init_work(& tgt->sess_work, 0); __constr_expr_1.counter = 137438953408L; tgt->sess_work.data = __constr_expr_1; lockdep_init_map(& tgt->sess_work.lockdep_map, "(&tgt->sess_work)", & __key___3, 0); INIT_LIST_HEAD(& tgt->sess_work.entry); tgt->sess_work.func = & qlt_sess_work_fn; INIT_LIST_HEAD(& tgt->sess_works_list); spinlock_check(& tgt->srr_lock); __raw_spin_lock_init(& tgt->srr_lock.ldv_6105.rlock, "&(&tgt->srr_lock)->rlock", & __key___4); INIT_LIST_HEAD(& tgt->srr_ctio_list); INIT_LIST_HEAD(& tgt->srr_imm_list); __init_work(& tgt->srr_work, 0); __constr_expr_2.counter = 137438953408L; tgt->srr_work.data = __constr_expr_2; lockdep_init_map(& tgt->srr_work.lockdep_map, "(&tgt->srr_work)", & __key___5, 0); INIT_LIST_HEAD(& tgt->srr_work.entry); tgt->srr_work.func = & qlt_handle_srr_work; atomic_set(& tgt->tgt_global_resets_count, 0); ha->tgt.qla_tgt = tgt; ql_dbg(16384U, base_vha, 57447, "qla_target(%d): using 64 Bit PCI addressing", (int )base_vha->vp_idx); tgt->tgt_enable_64bit_addr = 1U; _min1 = 1270; _min2 = (int )(base_vha->req)->length + -3 > 0 ? (int )(base_vha->req)->length * 5 + -19 : 0; tgt->sg_tablesize = _min1 < _min2 ? _min1 : _min2; tgt->datasegs_per_cmd = 1; tgt->datasegs_per_cont = 5; mutex_lock_nested(& qla_tgt_mutex, 0U); list_add_tail(& tgt->tgt_list_entry, & qla_tgt_glist); mutex_unlock(& qla_tgt_mutex); return (0); } } int qlt_remove_target(struct qla_hw_data *ha , struct scsi_qla_host *vha ) { { if ((unsigned long )ha->tgt.qla_tgt == (unsigned long )((struct qla_tgt *)0)) { return (0); } else { } mutex_lock_nested(& qla_tgt_mutex, 0U); list_del(& (ha->tgt.qla_tgt)->tgt_list_entry); mutex_unlock(& qla_tgt_mutex); ql_dbg(16384U, vha, 57404, "Unregistering target for host %ld(%p)", vha->host_no, ha); qlt_release(ha->tgt.qla_tgt); return (0); } } static void qlt_lport_dump(struct scsi_qla_host *vha , u64 wwpn , unsigned char *b ) { int i ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; struct _ddebug descriptor___2 ; long tmp___2 ; struct _ddebug descriptor___3 ; long tmp___3 ; struct _ddebug descriptor___4 ; long tmp___4 ; struct _ddebug descriptor___5 ; long tmp___5 ; struct _ddebug descriptor___6 ; long tmp___6 ; struct _ddebug descriptor___7 ; long tmp___7 ; { descriptor.modname = "qla2xxx"; descriptor.function = "qlt_lport_dump"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"; descriptor.format = "qla2xxx HW vha->node_name: "; descriptor.lineno = 4512U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "qla2xxx HW vha->node_name: "); } else { } i = 0; goto ldv_62285; ldv_62284: descriptor___0.modname = "qla2xxx"; descriptor___0.function = "qlt_lport_dump"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"; descriptor___0.format = "%02x "; descriptor___0.lineno = 4514U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor___0, "%02x ", (int )vha->node_name[i]); } else { } i = i + 1; ldv_62285: ; if (i <= 7) { goto ldv_62284; } else { } descriptor___1.modname = "qla2xxx"; descriptor___1.function = "qlt_lport_dump"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"; descriptor___1.format = "\n"; descriptor___1.lineno = 4515U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___1, "\n"); } else { } descriptor___2.modname = "qla2xxx"; descriptor___2.function = "qlt_lport_dump"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"; descriptor___2.format = "qla2xxx HW vha->port_name: "; descriptor___2.lineno = 4516U; descriptor___2.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor___2, "qla2xxx HW vha->port_name: "); } else { } i = 0; goto ldv_62291; ldv_62290: descriptor___3.modname = "qla2xxx"; descriptor___3.function = "qlt_lport_dump"; descriptor___3.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"; descriptor___3.format = "%02x "; descriptor___3.lineno = 4518U; descriptor___3.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_pr_debug(& descriptor___3, "%02x ", (int )vha->port_name[i]); } else { } i = i + 1; ldv_62291: ; if (i <= 7) { goto ldv_62290; } else { } descriptor___4.modname = "qla2xxx"; descriptor___4.function = "qlt_lport_dump"; descriptor___4.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"; descriptor___4.format = "\n"; descriptor___4.lineno = 4519U; descriptor___4.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___4.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_pr_debug(& descriptor___4, "\n"); } else { } descriptor___5.modname = "qla2xxx"; descriptor___5.function = "qlt_lport_dump"; descriptor___5.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"; descriptor___5.format = "qla2xxx passed configfs WWPN: "; descriptor___5.lineno = 4521U; descriptor___5.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___5.flags & 1L, 0L); if (tmp___5 != 0L) { __dynamic_pr_debug(& descriptor___5, "qla2xxx passed configfs WWPN: "); } else { } put_unaligned_be64(wwpn, (void *)b); i = 0; goto ldv_62297; ldv_62296: descriptor___6.modname = "qla2xxx"; descriptor___6.function = "qlt_lport_dump"; descriptor___6.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"; descriptor___6.format = "%02x "; descriptor___6.lineno = 4524U; descriptor___6.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___6.flags & 1L, 0L); if (tmp___6 != 0L) { __dynamic_pr_debug(& descriptor___6, "%02x ", (int )*(b + (unsigned long )i)); } else { } i = i + 1; ldv_62297: ; if (i <= 7) { goto ldv_62296; } else { } descriptor___7.modname = "qla2xxx"; descriptor___7.function = "qlt_lport_dump"; descriptor___7.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"; descriptor___7.format = "\n"; descriptor___7.lineno = 4525U; descriptor___7.flags = 0U; tmp___7 = ldv__builtin_expect((long )descriptor___7.flags & 1L, 0L); if (tmp___7 != 0L) { __dynamic_pr_debug(& descriptor___7, "\n"); } else { } return; } } int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops , u64 wwpn , int (*callback)(struct scsi_qla_host * ) , void *target_lport_ptr ) { struct qla_tgt *tgt ; struct scsi_qla_host *vha ; struct qla_hw_data *ha ; struct Scsi_Host *host ; unsigned long flags ; int rc ; u8 b[8U] ; struct list_head const *__mptr ; raw_spinlock_t *tmp ; struct _ddebug descriptor ; long tmp___0 ; struct Scsi_Host *tmp___1 ; int tmp___2 ; struct list_head const *__mptr___0 ; { mutex_lock_nested(& qla_tgt_mutex, 0U); __mptr = (struct list_head const *)qla_tgt_glist.next; tgt = (struct qla_tgt *)__mptr + 0xfffffffffffffcb8UL; goto ldv_62325; ldv_62324: vha = tgt->vha; ha = vha->hw; host = vha->host; if ((unsigned long )host == (unsigned long )((struct Scsi_Host *)0)) { goto ldv_62318; } else { } if ((unsigned long )ha->tgt.tgt_ops != (unsigned long )((struct qla_tgt_func_tmpl *)0)) { goto ldv_62318; } else { } if (((int )(host->hostt)->supported_mode & 2) == 0) { goto ldv_62318; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); if (((int )host->active_mode & 2) != 0) { descriptor.modname = "qla2xxx"; descriptor.function = "qlt_lport_register"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/scsi/qla2xxx/qla2xxx.ko--X--defaultlinux-3.12-rc1.tar.xz--X--118_1a--X--cpachecker/linux-3.12-rc1.tar.xz/csd_deg_dscv/27/dscv_tempdir/dscv/ri/118_1a/drivers/scsi/qla2xxx/qla_target.o.c.prepared"; descriptor.format = "MODE_TARGET already active on qla2xxx(%d)\n"; descriptor.lineno = 4565U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "MODE_TARGET already active on qla2xxx(%d)\n", host->host_no); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); goto ldv_62318; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___1 = scsi_host_get(host); if ((unsigned long )tmp___1 == (unsigned long )((struct Scsi_Host *)0)) { ql_dbg(16384U, vha, 57448, "Unable to scsi_host_get() for qla2xxx scsi_host\n"); goto ldv_62318; } else { } qlt_lport_dump(vha, wwpn, (unsigned char *)(& b)); tmp___2 = memcmp((void const *)(& vha->port_name), (void const *)(& b), 8UL); if (tmp___2 != 0) { scsi_host_put(host); goto ldv_62318; } else { } ha->tgt.tgt_ops = qla_tgt_ops; ha->tgt.target_lport_ptr = target_lport_ptr; rc = (*callback)(vha); if (rc != 0) { ha->tgt.tgt_ops = (struct qla_tgt_func_tmpl *)0; ha->tgt.target_lport_ptr = (void *)0; } else { } mutex_unlock(& qla_tgt_mutex); return (rc); ldv_62318: __mptr___0 = (struct list_head const *)tgt->tgt_list_entry.next; tgt = (struct qla_tgt *)__mptr___0 + 0xfffffffffffffcb8UL; ldv_62325: ; if ((unsigned long )(& tgt->tgt_list_entry) != (unsigned long )(& qla_tgt_glist)) { goto ldv_62324; } else { } mutex_unlock(& qla_tgt_mutex); return (-19); } } void qlt_lport_deregister(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct Scsi_Host *sh ; { ha = vha->hw; sh = vha->host; ha->tgt.target_lport_ptr = (void *)0; ha->tgt.tgt_ops = (struct qla_tgt_func_tmpl *)0; scsi_host_put(sh); return; } } void qlt_set_mode(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; switch (ql2x_ini_mode) { case 1: ; case 0: (vha->host)->active_mode = 2U; goto ldv_62353; case 2: (vha->host)->active_mode = (unsigned char )((unsigned int )(vha->host)->active_mode | 2U); goto ldv_62353; default: ; goto ldv_62353; } ldv_62353: ; if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { qla_reverse_ini_mode(vha); } else { } return; } } void qlt_clear_mode(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; { ha = vha->hw; switch (ql2x_ini_mode) { case 1: (vha->host)->active_mode = 0U; goto ldv_62361; case 0: (vha->host)->active_mode = 1U; goto ldv_62361; case 2: (vha->host)->active_mode = (unsigned int )(vha->host)->active_mode & 1U; goto ldv_62361; default: ; goto ldv_62361; } ldv_62361: ; if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { qla_reverse_ini_mode(vha); } else { } return; } } void qlt_enable_vha(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; unsigned long flags ; raw_spinlock_t *tmp ; { ha = vha->hw; tgt = ha->tgt.qla_tgt; if ((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0)) { ql_dbg(16384U, vha, 57449, "Unable to locate qla_tgt pointer from struct qla_hw_data\n"); dump_stack(); return; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); tgt->tgt_stopped = 0; qlt_set_mode(vha); spin_unlock_irqrestore(& ha->hardware_lock, flags); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); qla2x00_wait_for_hba_online(vha); return; } } void qlt_disable_vha(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct qla_tgt *tgt ; unsigned long flags ; raw_spinlock_t *tmp ; { ha = vha->hw; tgt = ha->tgt.qla_tgt; if ((unsigned long )tgt == (unsigned long )((struct qla_tgt *)0)) { ql_dbg(16384U, vha, 57450, "Unable to locate qla_tgt pointer from struct qla_hw_data\n"); dump_stack(); return; } else { } tmp = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp); qlt_clear_mode(vha); spin_unlock_irqrestore(& ha->hardware_lock, flags); set_bit(2L, (unsigned long volatile *)(& vha->dpc_flags)); qla2xxx_wake_dpc(vha); qla2x00_wait_for_hba_online(vha); return; } } void qlt_vport_create(struct scsi_qla_host *vha , struct qla_hw_data *ha ) { bool tmp ; int tmp___0 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; { tmp = qla_tgt_mode_enabled(vha); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } __mutex_init(& ha->tgt.tgt_mutex, "&ha->tgt.tgt_mutex", & __key); __mutex_init(& ha->tgt.tgt_host_action_mutex, "&ha->tgt.tgt_host_action_mutex", & __key___0); qlt_clear_mode(vha); ha->tgt.atio_q_length = 4096U; return; } } void qlt_rff_id(struct scsi_qla_host *vha , struct ct_sns_req *ct_req ) { bool tmp ; bool tmp___0 ; bool tmp___1 ; { tmp___1 = qla_tgt_mode_enabled(vha); if ((int )tmp___1) { tmp = qla_ini_mode_enabled(vha); if ((int )tmp) { ct_req->req.rff_id.fc4_feature = 3U; } else { ct_req->req.rff_id.fc4_feature = 1U; } } else { tmp___0 = qla_ini_mode_enabled(vha); if ((int )tmp___0) { ct_req->req.rff_id.fc4_feature = 2U; } else { } } return; } } void qlt_init_atio_q_entries(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; uint16_t cnt ; struct atio_from_isp *pkt ; bool tmp ; int tmp___0 ; { ha = vha->hw; pkt = (struct atio_from_isp *)ha->tgt.atio_ring; tmp = qla_tgt_mode_enabled(vha); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } cnt = 0U; goto ldv_62406; ldv_62405: pkt->u.raw.signature = 3735936685U; pkt = pkt + 1; cnt = (uint16_t )((int )cnt + 1); ldv_62406: ; if ((int )ha->tgt.atio_q_length > (int )cnt) { goto ldv_62405; } else { } return; } } void qlt_24xx_process_atio_queue(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct atio_from_isp *pkt ; int cnt ; int i ; { ha = vha->hw; if (*((unsigned long *)vha + 19UL) == 0UL) { return; } else { } goto ldv_62419; ldv_62418: pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; cnt = (int )pkt->u.raw.entry_count; qlt_24xx_atio_pkt_all_vps(vha, pkt); i = 0; goto ldv_62416; ldv_62415: ha->tgt.atio_ring_index = (uint16_t )((int )ha->tgt.atio_ring_index + 1); if ((int )ha->tgt.atio_ring_index == (int )ha->tgt.atio_q_length) { ha->tgt.atio_ring_index = 0U; ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; } else { ha->tgt.atio_ring_ptr = ha->tgt.atio_ring_ptr + 1; } pkt->u.raw.signature = 3735936685U; pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; i = i + 1; ldv_62416: ; if (i < cnt) { goto ldv_62415; } else { } __asm__ volatile ("sfence": : : "memory"); ldv_62419: ; if ((ha->tgt.atio_ring_ptr)->signature != 3735936685U) { goto ldv_62418; } else { } writel((unsigned int )ha->tgt.atio_ring_index, (void volatile *)(vha->hw)->tgt.atio_q_out); return; } } void qlt_24xx_config_rings(struct scsi_qla_host *vha ) { struct qla_hw_data *ha ; struct qla_msix_entry *msix ; struct init_cb_24xx *icb ; { ha = vha->hw; if (ql2x_ini_mode == 2) { return; } else { } writel(0U, (void volatile *)(vha->hw)->tgt.atio_q_in); writel(0U, (void volatile *)(vha->hw)->tgt.atio_q_out); readl((void const volatile *)(vha->hw)->tgt.atio_q_out); if ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U) { msix = ha->msix_entries + 2UL; icb = (struct init_cb_24xx *)ha->init_cb; icb->msix_atio = msix->entry; ql_dbg(1073741824U, vha, 61554, "Registering ICB vector 0x%x for atio que.\n", (int )msix->entry); } else { } return; } } void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha , struct nvram_24xx *nv ) { struct qla_hw_data *ha ; bool tmp ; int tmp___0 ; bool tmp___1 ; { ha = vha->hw; tmp___1 = qla_tgt_mode_enabled(vha); if ((int )tmp___1) { if (ha->tgt.saved_set == 0) { ha->tgt.saved_exchange_count = nv->exchange_count; ha->tgt.saved_firmware_options_1 = nv->firmware_options_1; ha->tgt.saved_firmware_options_2 = nv->firmware_options_2; ha->tgt.saved_firmware_options_3 = nv->firmware_options_3; ha->tgt.saved_set = 1; } else { } nv->exchange_count = 65535U; nv->firmware_options_1 = nv->firmware_options_1 | 16U; tmp = qla_ini_mode_enabled(vha); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { nv->firmware_options_1 = nv->firmware_options_1 | 32U; } else { } nv->firmware_options_1 = nv->firmware_options_1 & 4294959103U; nv->firmware_options_1 = nv->firmware_options_1 & 4294966783U; nv->firmware_options_2 = nv->firmware_options_2 | 4096U; nv->host_p = nv->host_p & 4294966271U; nv->firmware_options_2 = nv->firmware_options_2 | 16384U; } else { if (ha->tgt.saved_set != 0) { nv->exchange_count = ha->tgt.saved_exchange_count; nv->firmware_options_1 = ha->tgt.saved_firmware_options_1; nv->firmware_options_2 = ha->tgt.saved_firmware_options_2; nv->firmware_options_3 = ha->tgt.saved_firmware_options_3; } else { } return; } nv->firmware_options_3 = nv->firmware_options_3 | 576U; if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { if (*((unsigned long *)vha + 19UL) != 0UL) { ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_classes = 12U; } else { } nv->firmware_options_2 = nv->firmware_options_2 | 256U; } else { if (*((unsigned long *)vha + 19UL) != 0UL) { ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_classes = 8U; } else { } nv->firmware_options_2 = nv->firmware_options_2 & 4294967039U; } return; } } void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha , struct init_cb_24xx *icb ) { struct qla_hw_data *ha ; size_t __len ; void *__ret ; { ha = vha->hw; if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& icb->node_name), (void const *)(& ha->tgt.tgt_node_name), __len); } else { __ret = __builtin_memcpy((void *)(& icb->node_name), (void const *)(& ha->tgt.tgt_node_name), __len); } icb->firmware_options_1 = icb->firmware_options_1 | 16384U; } else { } return; } } void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha , struct nvram_81xx *nv ) { struct qla_hw_data *ha ; bool tmp ; int tmp___0 ; bool tmp___1 ; { ha = vha->hw; if (ql2x_ini_mode == 2) { return; } else { } tmp___1 = qla_tgt_mode_enabled(vha); if ((int )tmp___1) { if (ha->tgt.saved_set == 0) { ha->tgt.saved_exchange_count = nv->exchange_count; ha->tgt.saved_firmware_options_1 = nv->firmware_options_1; ha->tgt.saved_firmware_options_2 = nv->firmware_options_2; ha->tgt.saved_firmware_options_3 = nv->firmware_options_3; ha->tgt.saved_set = 1; } else { } nv->exchange_count = 65535U; nv->firmware_options_1 = nv->firmware_options_1 | 16U; tmp = qla_ini_mode_enabled(vha); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { nv->firmware_options_1 = nv->firmware_options_1 | 32U; } else { } nv->firmware_options_1 = nv->firmware_options_1 & 4294959103U; nv->firmware_options_1 = nv->firmware_options_1 & 4294966783U; nv->firmware_options_2 = nv->firmware_options_2 | 4096U; nv->host_p = nv->host_p & 4294966271U; nv->firmware_options_2 = nv->firmware_options_2 | 16384U; } else { if (ha->tgt.saved_set != 0) { nv->exchange_count = ha->tgt.saved_exchange_count; nv->firmware_options_1 = ha->tgt.saved_firmware_options_1; nv->firmware_options_2 = ha->tgt.saved_firmware_options_2; nv->firmware_options_3 = ha->tgt.saved_firmware_options_3; } else { } return; } nv->firmware_options_3 = nv->firmware_options_3 | 576U; if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { if (*((unsigned long *)vha + 19UL) != 0UL) { ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_classes = 12U; } else { } nv->firmware_options_2 = nv->firmware_options_2 | 256U; } else { if (*((unsigned long *)vha + 19UL) != 0UL) { ((struct fc_host_attrs *)(vha->host)->shost_data)->supported_classes = 8U; } else { } nv->firmware_options_2 = nv->firmware_options_2 & 4294967039U; } return; } } void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha , struct init_cb_81xx *icb ) { struct qla_hw_data *ha ; size_t __len ; void *__ret ; { ha = vha->hw; if (ql2x_ini_mode == 2) { return; } else { } if ((unsigned int )*((unsigned char *)ha + 3808UL) != 0U) { __len = 8UL; if (__len > 63UL) { __ret = __memcpy((void *)(& icb->node_name), (void const *)(& ha->tgt.tgt_node_name), __len); } else { __ret = __builtin_memcpy((void *)(& icb->node_name), (void const *)(& ha->tgt.tgt_node_name), __len); } icb->firmware_options_1 = icb->firmware_options_1 | 16384U; } else { } return; } } void qlt_83xx_iospace_config(struct qla_hw_data *ha ) { { if (ql2x_ini_mode == 2) { return; } else { } ha->msix_count = (unsigned int )ha->msix_count + 1U; return; } } int qlt_24xx_process_response_error(struct scsi_qla_host *vha , struct sts_entry_24xx *pkt ) { { switch ((int )pkt->entry_type) { case 84: ; case 85: ; case 18: ; case 14: ; return (1); default: ; return (0); } } } void qlt_modify_vp_config(struct scsi_qla_host *vha , struct vp_config_entry_24xx *vpmod ) { bool tmp ; bool tmp___0 ; int tmp___1 ; { tmp = qla_tgt_mode_enabled(vha); if ((int )tmp) { vpmod->options_idx1 = (unsigned int )vpmod->options_idx1 & 223U; } else { } tmp___0 = qla_ini_mode_enabled(vha); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { vpmod->options_idx1 = (unsigned int )vpmod->options_idx1 & 239U; } else { } return; } } void qlt_probe_one_stage1(struct scsi_qla_host *base_vha , struct qla_hw_data *ha ) { struct lock_class_key __key ; struct lock_class_key __key___0 ; { if (ql2x_ini_mode == 2) { return; } else { } if ((unsigned int )ha->mqenable != 0U || ((ha->device_type & 32768U) != 0U || (ha->device_type & 65536U) != 0U)) { (base_vha->hw)->tgt.atio_q_in = & (ha->mqiobase)->isp25mq.atio_q_in; (base_vha->hw)->tgt.atio_q_out = & (ha->mqiobase)->isp25mq.atio_q_out; } else { (base_vha->hw)->tgt.atio_q_in = & (ha->iobase)->isp24.atio_q_in; (base_vha->hw)->tgt.atio_q_out = & (ha->iobase)->isp24.atio_q_out; } __mutex_init(& ha->tgt.tgt_mutex, "&ha->tgt.tgt_mutex", & __key); __mutex_init(& ha->tgt.tgt_host_action_mutex, "&ha->tgt.tgt_host_action_mutex", & __key___0); qlt_clear_mode(base_vha); return; } } irqreturn_t qla83xx_msix_atio_q(int irq , void *dev_id ) { struct rsp_que *rsp ; scsi_qla_host_t *vha ; struct qla_hw_data *ha ; unsigned long flags ; void *tmp ; raw_spinlock_t *tmp___0 ; { rsp = (struct rsp_que *)dev_id; ha = rsp->hw; tmp = pci_get_drvdata(ha->pdev); vha = (scsi_qla_host_t *)tmp; tmp___0 = spinlock_check(& ha->hardware_lock); flags = _raw_spin_lock_irqsave(tmp___0); qlt_24xx_process_atio_queue(vha); qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } int qlt_mem_alloc(struct qla_hw_data *ha ) { void *tmp ; void *tmp___0 ; { if (ql2x_ini_mode == 2) { return (0); } else { } tmp = kzalloc(4096UL, 208U); ha->tgt.tgt_vp_map = (struct qla_tgt_vp_map *)tmp; if ((unsigned long )ha->tgt.tgt_vp_map == (unsigned long )((struct qla_tgt_vp_map *)0)) { return (-12); } else { } tmp___0 = dma_alloc_attrs(& (ha->pdev)->dev, (unsigned long )((int )ha->tgt.atio_q_length + 1) * 64UL, & ha->tgt.atio_dma, 208U, (struct dma_attrs *)0); ha->tgt.atio_ring = (struct atio *)tmp___0; if ((unsigned long )ha->tgt.atio_ring == (unsigned long )((struct atio *)0)) { kfree((void const *)ha->tgt.tgt_vp_map); return (-12); } else { } return (0); } } void qlt_mem_free(struct qla_hw_data *ha ) { { if (ql2x_ini_mode == 2) { return; } else { } if ((unsigned long )ha->tgt.atio_ring != (unsigned long )((struct atio *)0)) { dma_free_attrs(& (ha->pdev)->dev, (unsigned long )((int )ha->tgt.atio_q_length + 1) * 64UL, (void *)ha->tgt.atio_ring, ha->tgt.atio_dma, (struct dma_attrs *)0); } else { } kfree((void const *)ha->tgt.tgt_vp_map); return; } } void qlt_update_vp_map(struct scsi_qla_host *vha , int cmd ) { { if (ql2x_ini_mode == 2) { return; } else { } switch (cmd) { case 1: ((vha->hw)->tgt.tgt_vp_map + (unsigned long )vha->vp_idx)->vha = vha; goto ldv_62497; case 2: ((vha->hw)->tgt.tgt_vp_map + (unsigned long )vha->d_id.b.al_pa)->idx = (uint8_t )vha->vp_idx; goto ldv_62497; case 3: ((vha->hw)->tgt.tgt_vp_map + (unsigned long )vha->vp_idx)->vha = (scsi_qla_host_t *)0; goto ldv_62497; case 4: ((vha->hw)->tgt.tgt_vp_map + (unsigned long )vha->d_id.b.al_pa)->idx = 0U; goto ldv_62497; } ldv_62497: ; return; } } static int qlt_parse_ini_mode(void) { int tmp ; int tmp___0 ; int tmp___1 ; { tmp___1 = strcasecmp((char const *)qlini_mode, "exclusive"); if (tmp___1 == 0) { ql2x_ini_mode = 0; } else { tmp___0 = strcasecmp((char const *)qlini_mode, "disabled"); if (tmp___0 == 0) { ql2x_ini_mode = 1; } else { tmp = strcasecmp((char const *)qlini_mode, "enabled"); if (tmp == 0) { ql2x_ini_mode = 2; } else { return (0); } } } return (1); } } int qlt_init(void) { int ret ; int tmp ; struct lock_class_key __key ; char const *__lock_name ; struct workqueue_struct *tmp___0 ; { tmp = qlt_parse_ini_mode(); if (tmp == 0) { ql_log(0U, (scsi_qla_host_t *)0, 57451, "qlt_parse_ini_mode() failed\n"); return (-22); } else { } if (ql2x_ini_mode == 2) { return (0); } else { } qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep", 1400UL, 8UL, 0UL, (void (*)(void * ))0); if ((unsigned long )qla_tgt_cmd_cachep == (unsigned long )((struct kmem_cache *)0)) { ql_log(0U, (scsi_qla_host_t *)0, 57452, "kmem_cache_create for qla_tgt_cmd_cachep failed\n"); return (-12); } else { } qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 1152UL, 8UL, 0UL, (void (*)(void * ))0); if ((unsigned long )qla_tgt_mgmt_cmd_cachep == (unsigned long )((struct kmem_cache *)0)) { ql_log(0U, (scsi_qla_host_t *)0, 57453, "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); ret = -12; goto out; } else { } qla_tgt_mgmt_cmd_mempool = mempool_create(25, & mempool_alloc_slab, & mempool_free_slab, (void *)qla_tgt_mgmt_cmd_cachep); if ((unsigned long )qla_tgt_mgmt_cmd_mempool == (unsigned long )((mempool_t *)0)) { ql_log(0U, (scsi_qla_host_t *)0, 57454, "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); ret = -12; goto out_mgmt_cmd_cachep; } else { } __lock_name = "qla_tgt_wq"; tmp___0 = __alloc_workqueue_key("qla_tgt_wq", 0U, 0, & __key, __lock_name); qla_tgt_wq = tmp___0; if ((unsigned long )qla_tgt_wq == (unsigned long )((struct workqueue_struct *)0)) { ql_log(0U, (scsi_qla_host_t *)0, 57455, "alloc_workqueue for qla_tgt_wq failed\n"); ret = -12; goto out_cmd_mempool; } else { } return (ql2x_ini_mode == 1); out_cmd_mempool: mempool_destroy(qla_tgt_mgmt_cmd_mempool); out_mgmt_cmd_cachep: kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); out: kmem_cache_destroy(qla_tgt_cmd_cachep); return (ret); } } void qlt_exit(void) { { if (ql2x_ini_mode == 2) { return; } else { } destroy_workqueue(qla_tgt_wq); mempool_destroy(qla_tgt_mgmt_cmd_mempool); kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); kmem_cache_destroy(qla_tgt_cmd_cachep); return; } } void disable_suitable_timer_17(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_17) { ldv_timer_state_17 = 0; return; } else { } return; } } void choose_timer_17(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_17 = 2; return; } } int reg_timer_17(struct timer_list *timer ) { { ldv_timer_list_17 = timer; ldv_timer_state_17 = 1; return (0); } } void activate_pending_timer_17(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_17 == (unsigned long )timer) { if (ldv_timer_state_17 == 2 || pending_flag != 0) { ldv_timer_list_17 = timer; ldv_timer_list_17->data = data; ldv_timer_state_17 = 1; } else { } return; } else { } reg_timer_17(timer); ldv_timer_list_17->data = data; return; } } int ldv_del_timer_81(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type ldv_func_res ; int tmp ; { tmp = del_timer(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_7(ldv_func_arg1); return (ldv_func_res); } } int ldv_scsi_add_host_with_dma_82(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___0 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_72 = 1; ldv_initialize_scsi_host_template_72(); } else { } return (ldv_func_res); } } __inline static void ldv_error(void); int ldv_rlock = 1; int ldv_wlock = 1; void ldv_read_lock_irqsave(rwlock_t *lock ) { { if (ldv_wlock == 1) { } else { ldv_error(); } ldv_rlock = ldv_rlock + 1; return; } } void ldv_read_unlock_irqrestore(rwlock_t *lock ) { { if (ldv_rlock > 1) { } else { ldv_error(); } ldv_rlock = ldv_rlock - 1; return; } } void ldv_write_lock_irqsave(rwlock_t *lock ) { { if (ldv_wlock == 1) { } else { ldv_error(); } ldv_wlock = 2; return; } } void ldv_write_unlock_irqrestore(rwlock_t *lock ) { { if (ldv_wlock != 1) { } else { ldv_error(); } ldv_wlock = 1; return; } } void ldv_read_lock(rwlock_t *lock ) { { if (ldv_wlock == 1) { } else { ldv_error(); } ldv_rlock = ldv_rlock + 1; return; } } void ldv_read_unlock(rwlock_t *lock ) { { if (ldv_rlock > 1) { } else { ldv_error(); } ldv_rlock = ldv_rlock - 1; return; } } void ldv_write_lock(rwlock_t *lock ) { { if (ldv_wlock == 1) { } else { ldv_error(); } ldv_wlock = 2; return; } } void ldv_write_unlock(rwlock_t *lock ) { { if (ldv_wlock != 1) { } else { ldv_error(); } ldv_wlock = 1; return; } } int ldv_read_trylock(rwlock_t *lock ) { int tmp ; { if (ldv_wlock == 1) { tmp = ldv_undef_int(); if (tmp) { ldv_rlock = ldv_rlock + 1; return (1); } else { return (0); } } else { return (0); } } } int ldv_write_trylock(rwlock_t *lock ) { int tmp ; { if (ldv_wlock == 1) { tmp = ldv_undef_int(); if (tmp) { ldv_wlock = 2; return (1); } else { return (0); } } else { return (0); } } } void ldv_read_lock_irq(rwlock_t *lock ) { { if (ldv_wlock == 1) { } else { ldv_error(); } ldv_rlock = ldv_rlock + 1; return; } } void ldv_read_unlock_irq(rwlock_t *lock ) { { if (ldv_rlock > 1) { } else { ldv_error(); } ldv_rlock = ldv_rlock - 1; return; } } void ldv_write_lock_irq(rwlock_t *lock ) { { if (ldv_wlock == 1) { } else { ldv_error(); } ldv_wlock = 2; return; } } void ldv_write_unlock_irq(rwlock_t *lock ) { { if (ldv_wlock != 1) { } else { ldv_error(); } ldv_wlock = 1; return; } } void ldv_read_lock_bh(rwlock_t *lock ) { { if (ldv_wlock == 1) { } else { ldv_error(); } ldv_rlock = ldv_rlock + 1; return; } } void ldv_read_unlock_bh(rwlock_t *lock ) { { if (ldv_rlock > 1) { } else { ldv_error(); } ldv_rlock = ldv_rlock - 1; return; } } void ldv_write_lock_bh(rwlock_t *lock ) { { if (ldv_wlock == 1) { } else { ldv_error(); } ldv_wlock = 2; return; } } void ldv_write_unlock_bh(rwlock_t *lock ) { { if (ldv_wlock != 1) { } else { ldv_error(); } ldv_wlock = 1; return; } } void ldv_check_final_state(void) { { if (ldv_rlock == 1) { } else { ldv_error(); } if (ldv_wlock == 1) { } else { ldv_error(); } return; } }