extern void __VERIFIER_error() __attribute__ ((__noreturn__)); /* Generated by CIL v. 1.5.1 */ /* print_CIL_Input is false */ typedef unsigned char __u8; typedef short __s16; typedef unsigned short __u16; typedef int __s32; typedef unsigned int __u32; typedef long long __s64; typedef unsigned long long __u64; typedef signed char s8; typedef unsigned char u8; typedef unsigned short u16; typedef int s32; typedef unsigned int u32; typedef long long s64; typedef unsigned long long u64; typedef long __kernel_long_t; typedef unsigned long __kernel_ulong_t; typedef int __kernel_pid_t; typedef unsigned int __kernel_uid32_t; typedef unsigned int __kernel_gid32_t; typedef __kernel_ulong_t __kernel_size_t; typedef __kernel_long_t __kernel_ssize_t; typedef long long __kernel_loff_t; typedef __kernel_long_t __kernel_time_t; typedef __kernel_long_t __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef __u16 __le16; typedef __u16 __be16; typedef __u32 __le32; typedef __u32 __be32; typedef __u64 __le64; typedef __u16 __sum16; typedef __u32 __wsum; struct kernel_symbol { unsigned long value ; char const *name ; }; struct module; typedef __u32 __kernel_dev_t; typedef __kernel_dev_t dev_t; typedef unsigned short umode_t; typedef __kernel_pid_t pid_t; typedef __kernel_clockid_t clockid_t; typedef _Bool bool; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_loff_t loff_t; typedef __kernel_size_t size_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_time_t time_t; typedef __s32 int32_t; typedef __u8 uint8_t; typedef __u32 uint32_t; typedef __u64 uint64_t; typedef unsigned long sector_t; typedef unsigned long blkcnt_t; typedef u64 dma_addr_t; typedef unsigned int gfp_t; typedef unsigned int fmode_t; typedef unsigned int oom_flags_t; typedef u64 phys_addr_t; typedef phys_addr_t resource_size_t; struct __anonstruct_atomic_t_6 { int counter ; }; typedef struct __anonstruct_atomic_t_6 atomic_t; struct __anonstruct_atomic64_t_7 { long counter ; }; typedef struct __anonstruct_atomic64_t_7 atomic64_t; struct list_head { struct list_head *next ; struct list_head *prev ; }; struct hlist_node; struct hlist_head { struct hlist_node *first ; }; struct hlist_node { struct hlist_node *next ; struct hlist_node **pprev ; }; struct callback_head { struct callback_head *next ; void (*func)(struct callback_head * ) ; }; typedef u64 cycle_t; struct pt_regs { unsigned long r15 ; unsigned long r14 ; unsigned long r13 ; unsigned long r12 ; unsigned long bp ; unsigned long bx ; unsigned long r11 ; unsigned long r10 ; unsigned long r9 ; unsigned long r8 ; unsigned long ax ; unsigned long cx ; unsigned long dx ; unsigned long si ; unsigned long di ; unsigned long orig_ax ; unsigned long ip ; unsigned long cs ; unsigned long flags ; unsigned long sp ; unsigned long ss ; }; struct __anonstruct____missing_field_name_9 { unsigned int a ; unsigned int b ; }; struct __anonstruct____missing_field_name_10 { u16 limit0 ; u16 base0 ; unsigned char base1 ; unsigned char type : 4 ; unsigned char s : 1 ; unsigned char dpl : 2 ; unsigned char p : 1 ; unsigned char limit : 4 ; unsigned char avl : 1 ; unsigned char l : 1 ; unsigned char d : 1 ; unsigned char g : 1 ; unsigned char base2 ; }; union __anonunion____missing_field_name_8 { struct __anonstruct____missing_field_name_9 __annonCompField4 ; struct __anonstruct____missing_field_name_10 __annonCompField5 ; }; struct desc_struct { union __anonunion____missing_field_name_8 __annonCompField6 ; }; typedef unsigned long pteval_t; typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; struct __anonstruct_pte_t_11 { pteval_t pte ; }; typedef struct __anonstruct_pte_t_11 pte_t; struct pgprot { pgprotval_t pgprot ; }; typedef struct pgprot pgprot_t; struct __anonstruct_pgd_t_12 { pgdval_t pgd ; }; typedef struct __anonstruct_pgd_t_12 pgd_t; struct page; typedef struct page *pgtable_t; struct file; struct seq_file; struct thread_struct; struct mm_struct; struct task_struct; struct cpumask; struct qspinlock { atomic_t val ; }; typedef struct qspinlock arch_spinlock_t; struct qrwlock { atomic_t cnts ; arch_spinlock_t lock ; }; typedef struct qrwlock arch_rwlock_t; typedef void (*ctor_fn_t)(void); struct _ddebug { char const *modname ; char const *function ; char const *filename ; char const *format ; unsigned int lineno : 18 ; unsigned char flags ; }; struct device; struct net_device; struct file_operations; struct completion; enum system_states { SYSTEM_BOOTING = 0, SYSTEM_RUNNING = 1, SYSTEM_HALT = 2, SYSTEM_POWER_OFF = 3, SYSTEM_RESTART = 4 } ; struct bug_entry { int bug_addr_disp ; int file_disp ; unsigned short line ; unsigned short flags ; }; struct timespec; struct compat_timespec; struct __anonstruct_futex_16 { u32 *uaddr ; u32 val ; u32 flags ; u32 bitset ; u64 time ; u32 *uaddr2 ; }; struct __anonstruct_nanosleep_17 { clockid_t clockid ; struct timespec *rmtp ; struct compat_timespec *compat_rmtp ; u64 expires ; }; struct pollfd; struct __anonstruct_poll_18 { struct pollfd *ufds ; int nfds ; int has_timeout ; unsigned long tv_sec ; unsigned long tv_nsec ; }; union __anonunion____missing_field_name_15 { struct __anonstruct_futex_16 futex ; struct __anonstruct_nanosleep_17 nanosleep ; struct __anonstruct_poll_18 poll ; }; struct restart_block { long (*fn)(struct restart_block * ) ; union __anonunion____missing_field_name_15 __annonCompField7 ; }; struct kernel_vm86_regs { struct pt_regs pt ; unsigned short es ; unsigned short __esh ; unsigned short ds ; unsigned short __dsh ; unsigned short fs ; unsigned short __fsh ; unsigned short gs ; unsigned short __gsh ; }; union __anonunion____missing_field_name_19 { struct pt_regs *regs ; struct kernel_vm86_regs *vm86 ; }; struct math_emu_info { long ___orig_eip ; union __anonunion____missing_field_name_19 __annonCompField8 ; }; struct cpumask { unsigned long bits[128U] ; }; typedef struct cpumask cpumask_t; typedef struct cpumask *cpumask_var_t; struct fregs_state { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u32 status ; }; struct __anonstruct____missing_field_name_29 { u64 rip ; u64 rdp ; }; struct __anonstruct____missing_field_name_30 { u32 fip ; u32 fcs ; u32 foo ; u32 fos ; }; union __anonunion____missing_field_name_28 { struct __anonstruct____missing_field_name_29 __annonCompField12 ; struct __anonstruct____missing_field_name_30 __annonCompField13 ; }; union __anonunion____missing_field_name_31 { u32 padding1[12U] ; u32 sw_reserved[12U] ; }; struct fxregs_state { u16 cwd ; u16 swd ; u16 twd ; u16 fop ; union __anonunion____missing_field_name_28 __annonCompField14 ; u32 mxcsr ; u32 mxcsr_mask ; u32 st_space[32U] ; u32 xmm_space[64U] ; u32 padding[12U] ; union __anonunion____missing_field_name_31 __annonCompField15 ; }; struct swregs_state { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u8 ftop ; u8 changed ; u8 lookahead ; u8 no_update ; u8 rm ; u8 alimit ; struct math_emu_info *info ; u32 entry_eip ; }; struct xstate_header { u64 xfeatures ; u64 xcomp_bv ; u64 reserved[6U] ; }; struct xregs_state { struct fxregs_state i387 ; struct xstate_header header ; u8 __reserved[464U] ; }; union fpregs_state { struct fregs_state fsave ; struct fxregs_state fxsave ; struct swregs_state soft ; struct xregs_state xsave ; }; struct fpu { union fpregs_state state ; unsigned int last_cpu ; unsigned char fpstate_active ; unsigned char fpregs_active ; unsigned char counter ; }; struct seq_operations; struct perf_event; struct thread_struct { struct desc_struct tls_array[3U] ; unsigned long sp0 ; unsigned long sp ; unsigned short es ; unsigned short ds ; unsigned short fsindex ; unsigned short gsindex ; unsigned long fs ; unsigned long gs ; struct fpu fpu ; struct perf_event *ptrace_bps[4U] ; unsigned long debugreg6 ; unsigned long ptrace_dr7 ; unsigned long cr2 ; unsigned long trap_nr ; unsigned long error_code ; unsigned long *io_bitmap_ptr ; unsigned long iopl ; unsigned int io_bitmap_max ; }; typedef atomic64_t atomic_long_t; typedef int pao_T__; typedef int pao_T_____0; struct lockdep_map; struct stack_trace { unsigned int nr_entries ; unsigned int max_entries ; unsigned long *entries ; int skip ; }; struct lockdep_subclass_key { char __one_byte ; }; struct lock_class_key { struct lockdep_subclass_key subkeys[8U] ; }; struct lock_class { struct list_head hash_entry ; struct list_head lock_entry ; struct lockdep_subclass_key *key ; unsigned int subclass ; unsigned int dep_gen_id ; unsigned long usage_mask ; struct stack_trace usage_traces[13U] ; struct list_head locks_after ; struct list_head locks_before ; unsigned int version ; unsigned long ops ; char const *name ; int name_version ; unsigned long contention_point[4U] ; unsigned long contending_point[4U] ; }; struct lockdep_map { struct lock_class_key *key ; struct lock_class *class_cache[2U] ; char const *name ; int cpu ; unsigned long ip ; }; struct held_lock { u64 prev_chain_key ; unsigned long acquire_ip ; struct lockdep_map *instance ; struct lockdep_map *nest_lock ; u64 waittime_stamp ; u64 holdtime_stamp ; unsigned short class_idx : 13 ; unsigned char irq_context : 2 ; unsigned char trylock : 1 ; unsigned char read : 2 ; unsigned char check : 1 ; unsigned char hardirqs_off : 1 ; unsigned short references : 12 ; unsigned int pin_count ; }; struct raw_spinlock { arch_spinlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct raw_spinlock raw_spinlock_t; struct __anonstruct____missing_field_name_35 { u8 __padding[24U] ; struct lockdep_map dep_map ; }; union __anonunion____missing_field_name_34 { struct raw_spinlock rlock ; struct __anonstruct____missing_field_name_35 __annonCompField17 ; }; struct spinlock { union __anonunion____missing_field_name_34 __annonCompField18 ; }; typedef struct spinlock spinlock_t; struct __anonstruct_rwlock_t_36 { arch_rwlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_rwlock_t_36 rwlock_t; struct seqcount { unsigned int sequence ; struct lockdep_map dep_map ; }; typedef struct seqcount seqcount_t; struct __anonstruct_seqlock_t_45 { struct seqcount seqcount ; spinlock_t lock ; }; typedef struct __anonstruct_seqlock_t_45 seqlock_t; struct timespec { __kernel_time_t tv_sec ; long tv_nsec ; }; struct user_namespace; struct __anonstruct_kuid_t_46 { uid_t val ; }; typedef struct __anonstruct_kuid_t_46 kuid_t; struct __anonstruct_kgid_t_47 { gid_t val ; }; typedef struct __anonstruct_kgid_t_47 kgid_t; struct kstat { u64 ino ; dev_t dev ; umode_t mode ; unsigned int nlink ; kuid_t uid ; kgid_t gid ; dev_t rdev ; loff_t size ; struct timespec atime ; struct timespec mtime ; struct timespec ctime ; unsigned long blksize ; unsigned long long blocks ; }; struct vm_area_struct; struct __wait_queue_head { spinlock_t lock ; struct list_head task_list ; }; typedef struct __wait_queue_head wait_queue_head_t; struct __anonstruct_nodemask_t_48 { unsigned long bits[16U] ; }; typedef struct __anonstruct_nodemask_t_48 nodemask_t; struct free_area { struct list_head free_list[6U] ; unsigned long nr_free ; }; struct pglist_data; struct zone_padding { char x[0U] ; }; struct zone_reclaim_stat { unsigned long recent_rotated[2U] ; unsigned long recent_scanned[2U] ; }; struct zone; struct lruvec { struct list_head lists[5U] ; struct zone_reclaim_stat reclaim_stat ; struct zone *zone ; }; struct per_cpu_pages { int count ; int high ; int batch ; struct list_head lists[3U] ; }; struct per_cpu_pageset { struct per_cpu_pages pcp ; s8 expire ; s8 stat_threshold ; s8 vm_stat_diff[39U] ; }; enum zone_type { ZONE_DMA = 0, ZONE_DMA32 = 1, ZONE_NORMAL = 2, ZONE_MOVABLE = 3, __MAX_NR_ZONES = 4 } ; struct zone { unsigned long watermark[3U] ; long lowmem_reserve[4U] ; int node ; unsigned int inactive_ratio ; struct pglist_data *zone_pgdat ; struct per_cpu_pageset *pageset ; unsigned long dirty_balance_reserve ; unsigned long min_unmapped_pages ; unsigned long min_slab_pages ; unsigned long zone_start_pfn ; unsigned long managed_pages ; unsigned long spanned_pages ; unsigned long present_pages ; char const *name ; int nr_migrate_reserve_block ; unsigned long nr_isolate_pageblock ; seqlock_t span_seqlock ; wait_queue_head_t *wait_table ; unsigned long wait_table_hash_nr_entries ; unsigned long wait_table_bits ; struct zone_padding _pad1_ ; struct free_area free_area[11U] ; unsigned long flags ; spinlock_t lock ; struct zone_padding _pad2_ ; spinlock_t lru_lock ; struct lruvec lruvec ; atomic_long_t inactive_age ; unsigned long percpu_drift_mark ; unsigned long compact_cached_free_pfn ; unsigned long compact_cached_migrate_pfn[2U] ; unsigned int compact_considered ; unsigned int compact_defer_shift ; int compact_order_failed ; bool compact_blockskip_flush ; struct zone_padding _pad3_ ; atomic_long_t vm_stat[39U] ; }; struct zonelist_cache { unsigned short z_to_n[4096U] ; unsigned long fullzones[64U] ; unsigned long last_full_zap ; }; struct zoneref { struct zone *zone ; int zone_idx ; }; struct zonelist { struct zonelist_cache *zlcache_ptr ; struct zoneref _zonerefs[4097U] ; struct zonelist_cache zlcache ; }; struct pglist_data { struct zone node_zones[4U] ; struct zonelist node_zonelists[2U] ; int nr_zones ; spinlock_t node_size_lock ; unsigned long node_start_pfn ; unsigned long node_present_pages ; unsigned long node_spanned_pages ; int node_id ; wait_queue_head_t kswapd_wait ; wait_queue_head_t pfmemalloc_wait ; struct task_struct *kswapd ; int kswapd_max_order ; enum zone_type classzone_idx ; spinlock_t numabalancing_migrate_lock ; unsigned long numabalancing_migrate_next_window ; unsigned long numabalancing_migrate_nr_pages ; unsigned long first_deferred_pfn ; }; typedef struct pglist_data pg_data_t; struct optimistic_spin_queue { atomic_t tail ; }; struct mutex { atomic_t count ; spinlock_t wait_lock ; struct list_head wait_list ; struct task_struct *owner ; void *magic ; struct lockdep_map dep_map ; }; struct mutex_waiter { struct list_head list ; struct task_struct *task ; void *magic ; }; struct rw_semaphore; struct rw_semaphore { long count ; struct list_head wait_list ; raw_spinlock_t wait_lock ; struct optimistic_spin_queue osq ; struct task_struct *owner ; struct lockdep_map dep_map ; }; struct completion { unsigned int done ; wait_queue_head_t wait ; }; union ktime { s64 tv64 ; }; typedef union ktime ktime_t; struct notifier_block; struct timer_list { struct hlist_node entry ; unsigned long expires ; void (*function)(unsigned long ) ; unsigned long data ; u32 flags ; int slack ; int start_pid ; void *start_site ; char start_comm[16U] ; struct lockdep_map lockdep_map ; }; struct hrtimer; enum hrtimer_restart; struct rb_node { unsigned long __rb_parent_color ; struct rb_node *rb_right ; struct rb_node *rb_left ; }; struct rb_root { struct rb_node *rb_node ; }; struct ctl_table; struct nsproxy; struct ctl_table_root; struct ctl_table_header; struct ctl_dir; typedef int proc_handler(struct ctl_table * , int , void * , size_t * , loff_t * ); struct ctl_table_poll { atomic_t event ; wait_queue_head_t wait ; }; struct ctl_table { char const *procname ; void *data ; int maxlen ; umode_t mode ; struct ctl_table *child ; proc_handler *proc_handler ; struct ctl_table_poll *poll ; void *extra1 ; void *extra2 ; }; struct ctl_node { struct rb_node node ; struct ctl_table_header *header ; }; struct __anonstruct____missing_field_name_50 { struct ctl_table *ctl_table ; int used ; int count ; int nreg ; }; union __anonunion____missing_field_name_49 { struct __anonstruct____missing_field_name_50 __annonCompField19 ; struct callback_head rcu ; }; struct ctl_table_set; struct ctl_table_header { union __anonunion____missing_field_name_49 __annonCompField20 ; struct completion *unregistering ; struct ctl_table *ctl_table_arg ; struct ctl_table_root *root ; struct ctl_table_set *set ; struct ctl_dir *parent ; struct ctl_node *node ; }; struct ctl_dir { struct ctl_table_header header ; struct rb_root root ; }; struct ctl_table_set { int (*is_seen)(struct ctl_table_set * ) ; struct ctl_dir dir ; }; struct ctl_table_root { struct ctl_table_set default_set ; struct ctl_table_set *(*lookup)(struct ctl_table_root * , struct nsproxy * ) ; int (*permissions)(struct ctl_table_header * , struct ctl_table * ) ; }; struct workqueue_struct; struct work_struct; struct work_struct { atomic_long_t data ; struct list_head entry ; void (*func)(struct work_struct * ) ; struct lockdep_map lockdep_map ; }; struct delayed_work { struct work_struct work ; struct timer_list timer ; struct workqueue_struct *wq ; int cpu ; }; struct notifier_block { int (*notifier_call)(struct notifier_block * , unsigned long , void * ) ; struct notifier_block *next ; int priority ; }; struct resource { resource_size_t start ; resource_size_t end ; char const *name ; unsigned long flags ; struct resource *parent ; struct resource *sibling ; struct resource *child ; }; struct pci_dev; struct pm_message { int event ; }; typedef struct pm_message pm_message_t; struct dev_pm_ops { int (*prepare)(struct device * ) ; void (*complete)(struct device * ) ; int (*suspend)(struct device * ) ; int (*resume)(struct device * ) ; int (*freeze)(struct device * ) ; int (*thaw)(struct device * ) ; int (*poweroff)(struct device * ) ; int (*restore)(struct device * ) ; int (*suspend_late)(struct device * ) ; int (*resume_early)(struct device * ) ; int (*freeze_late)(struct device * ) ; int (*thaw_early)(struct device * ) ; int (*poweroff_late)(struct device * ) ; int (*restore_early)(struct device * ) ; int (*suspend_noirq)(struct device * ) ; int (*resume_noirq)(struct device * ) ; int (*freeze_noirq)(struct device * ) ; int (*thaw_noirq)(struct device * ) ; int (*poweroff_noirq)(struct device * ) ; int (*restore_noirq)(struct device * ) ; int (*runtime_suspend)(struct device * ) ; int (*runtime_resume)(struct device * ) ; int (*runtime_idle)(struct device * ) ; }; enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; struct wakeup_source; struct wake_irq; struct pm_subsys_data { spinlock_t lock ; unsigned int refcount ; struct list_head clock_list ; }; struct dev_pm_qos; struct dev_pm_info { pm_message_t power_state ; unsigned char can_wakeup : 1 ; unsigned char async_suspend : 1 ; bool is_prepared ; bool is_suspended ; bool is_noirq_suspended ; bool is_late_suspended ; bool ignore_children ; bool early_init ; bool direct_complete ; spinlock_t lock ; struct list_head entry ; struct completion completion ; struct wakeup_source *wakeup ; bool wakeup_path ; bool syscore ; struct timer_list suspend_timer ; unsigned long timer_expires ; struct work_struct work ; wait_queue_head_t wait_queue ; struct wake_irq *wakeirq ; atomic_t usage_count ; atomic_t child_count ; unsigned char disable_depth : 3 ; unsigned char idle_notification : 1 ; unsigned char request_pending : 1 ; unsigned char deferred_resume : 1 ; unsigned char run_wake : 1 ; unsigned char runtime_auto : 1 ; unsigned char no_callbacks : 1 ; unsigned char irq_safe : 1 ; unsigned char use_autosuspend : 1 ; unsigned char timer_autosuspends : 1 ; unsigned char memalloc_noio : 1 ; enum rpm_request request ; enum rpm_status runtime_status ; int runtime_error ; int autosuspend_delay ; unsigned long last_busy ; unsigned long active_jiffies ; unsigned long suspended_jiffies ; unsigned long accounting_timestamp ; struct pm_subsys_data *subsys_data ; void (*set_latency_tolerance)(struct device * , s32 ) ; struct dev_pm_qos *qos ; }; struct dev_pm_domain { struct dev_pm_ops ops ; void (*detach)(struct device * , bool ) ; int (*activate)(struct device * ) ; void (*sync)(struct device * ) ; void (*dismiss)(struct device * ) ; }; struct pci_bus; struct __anonstruct_mm_context_t_115 { void *ldt ; int size ; unsigned short ia32_compat ; struct mutex lock ; void *vdso ; atomic_t perf_rdpmc_allowed ; }; typedef struct __anonstruct_mm_context_t_115 mm_context_t; struct bio_vec; struct llist_node; struct llist_node { struct llist_node *next ; }; struct cred; struct inode; struct arch_uprobe_task { unsigned long saved_scratch_register ; unsigned int saved_trap_nr ; unsigned int saved_tf ; }; enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; struct __anonstruct____missing_field_name_148 { struct arch_uprobe_task autask ; unsigned long vaddr ; }; struct __anonstruct____missing_field_name_149 { struct callback_head dup_xol_work ; unsigned long dup_xol_addr ; }; union __anonunion____missing_field_name_147 { struct __anonstruct____missing_field_name_148 __annonCompField33 ; struct __anonstruct____missing_field_name_149 __annonCompField34 ; }; struct uprobe; struct return_instance; struct uprobe_task { enum uprobe_task_state state ; union __anonunion____missing_field_name_147 __annonCompField35 ; struct uprobe *active_uprobe ; unsigned long xol_vaddr ; struct return_instance *return_instances ; unsigned int depth ; }; struct xol_area; struct uprobes_state { struct xol_area *xol_area ; }; struct address_space; struct mem_cgroup; typedef void compound_page_dtor(struct page * ); union __anonunion____missing_field_name_150 { struct address_space *mapping ; void *s_mem ; }; union __anonunion____missing_field_name_152 { unsigned long index ; void *freelist ; bool pfmemalloc ; }; struct __anonstruct____missing_field_name_156 { unsigned short inuse ; unsigned short objects : 15 ; unsigned char frozen : 1 ; }; union __anonunion____missing_field_name_155 { atomic_t _mapcount ; struct __anonstruct____missing_field_name_156 __annonCompField38 ; int units ; }; struct __anonstruct____missing_field_name_154 { union __anonunion____missing_field_name_155 __annonCompField39 ; atomic_t _count ; }; union __anonunion____missing_field_name_153 { unsigned long counters ; struct __anonstruct____missing_field_name_154 __annonCompField40 ; unsigned int active ; }; struct __anonstruct____missing_field_name_151 { union __anonunion____missing_field_name_152 __annonCompField37 ; union __anonunion____missing_field_name_153 __annonCompField41 ; }; struct __anonstruct____missing_field_name_158 { struct page *next ; int pages ; int pobjects ; }; struct slab; struct __anonstruct____missing_field_name_159 { compound_page_dtor *compound_dtor ; unsigned long compound_order ; }; union __anonunion____missing_field_name_157 { struct list_head lru ; struct __anonstruct____missing_field_name_158 __annonCompField43 ; struct slab *slab_page ; struct callback_head callback_head ; struct __anonstruct____missing_field_name_159 __annonCompField44 ; pgtable_t pmd_huge_pte ; }; struct kmem_cache; union __anonunion____missing_field_name_160 { unsigned long private ; spinlock_t *ptl ; struct kmem_cache *slab_cache ; struct page *first_page ; }; struct page { unsigned long flags ; union __anonunion____missing_field_name_150 __annonCompField36 ; struct __anonstruct____missing_field_name_151 __annonCompField42 ; union __anonunion____missing_field_name_157 __annonCompField45 ; union __anonunion____missing_field_name_160 __annonCompField46 ; struct mem_cgroup *mem_cgroup ; }; struct page_frag { struct page *page ; __u32 offset ; __u32 size ; }; struct __anonstruct_shared_161 { struct rb_node rb ; unsigned long rb_subtree_last ; }; struct anon_vma; struct vm_operations_struct; struct mempolicy; struct vm_area_struct { unsigned long vm_start ; unsigned long vm_end ; struct vm_area_struct *vm_next ; struct vm_area_struct *vm_prev ; struct rb_node vm_rb ; unsigned long rb_subtree_gap ; struct mm_struct *vm_mm ; pgprot_t vm_page_prot ; unsigned long vm_flags ; struct __anonstruct_shared_161 shared ; struct list_head anon_vma_chain ; struct anon_vma *anon_vma ; struct vm_operations_struct const *vm_ops ; unsigned long vm_pgoff ; struct file *vm_file ; void *vm_private_data ; struct mempolicy *vm_policy ; }; struct core_thread { struct task_struct *task ; struct core_thread *next ; }; struct core_state { atomic_t nr_threads ; struct core_thread dumper ; struct completion startup ; }; struct task_rss_stat { int events ; int count[3U] ; }; struct mm_rss_stat { atomic_long_t count[3U] ; }; struct kioctx_table; struct linux_binfmt; struct mmu_notifier_mm; struct mm_struct { struct vm_area_struct *mmap ; struct rb_root mm_rb ; u32 vmacache_seqnum ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; unsigned long mmap_base ; unsigned long mmap_legacy_base ; unsigned long task_size ; unsigned long highest_vm_end ; pgd_t *pgd ; atomic_t mm_users ; atomic_t mm_count ; atomic_long_t nr_ptes ; atomic_long_t nr_pmds ; int map_count ; spinlock_t page_table_lock ; struct rw_semaphore mmap_sem ; struct list_head mmlist ; unsigned long hiwater_rss ; unsigned long hiwater_vm ; unsigned long total_vm ; unsigned long locked_vm ; unsigned long pinned_vm ; unsigned long shared_vm ; unsigned long exec_vm ; unsigned long stack_vm ; unsigned long def_flags ; unsigned long start_code ; unsigned long end_code ; unsigned long start_data ; unsigned long end_data ; unsigned long start_brk ; unsigned long brk ; unsigned long start_stack ; unsigned long arg_start ; unsigned long arg_end ; unsigned long env_start ; unsigned long env_end ; unsigned long saved_auxv[46U] ; struct mm_rss_stat rss_stat ; struct linux_binfmt *binfmt ; cpumask_var_t cpu_vm_mask_var ; mm_context_t context ; unsigned long flags ; struct core_state *core_state ; spinlock_t ioctx_lock ; struct kioctx_table *ioctx_table ; struct task_struct *owner ; struct file *exe_file ; struct mmu_notifier_mm *mmu_notifier_mm ; struct cpumask cpumask_allocation ; unsigned long numa_next_scan ; unsigned long numa_scan_offset ; int numa_scan_seq ; bool tlb_flush_pending ; struct uprobes_state uprobes_state ; void *bd_addr ; }; typedef __u64 Elf64_Addr; typedef __u16 Elf64_Half; typedef __u32 Elf64_Word; typedef __u64 Elf64_Xword; struct elf64_sym { Elf64_Word st_name ; unsigned char st_info ; unsigned char st_other ; Elf64_Half st_shndx ; Elf64_Addr st_value ; Elf64_Xword st_size ; }; typedef struct elf64_sym Elf64_Sym; union __anonunion____missing_field_name_166 { unsigned long bitmap[4U] ; struct callback_head callback_head ; }; struct idr_layer { int prefix ; int layer ; struct idr_layer *ary[256U] ; int count ; union __anonunion____missing_field_name_166 __annonCompField47 ; }; struct idr { struct idr_layer *hint ; struct idr_layer *top ; int layers ; int cur ; spinlock_t lock ; int id_free_cnt ; struct idr_layer *id_free ; }; struct ida_bitmap { long nr_busy ; unsigned long bitmap[15U] ; }; struct ida { struct idr idr ; struct ida_bitmap *free_bitmap ; }; struct dentry; struct iattr; struct super_block; struct file_system_type; struct kernfs_open_node; struct kernfs_iattrs; struct kernfs_root; struct kernfs_elem_dir { unsigned long subdirs ; struct rb_root children ; struct kernfs_root *root ; }; struct kernfs_node; struct kernfs_elem_symlink { struct kernfs_node *target_kn ; }; struct kernfs_ops; struct kernfs_elem_attr { struct kernfs_ops const *ops ; struct kernfs_open_node *open ; loff_t size ; struct kernfs_node *notify_next ; }; union __anonunion____missing_field_name_171 { struct kernfs_elem_dir dir ; struct kernfs_elem_symlink symlink ; struct kernfs_elem_attr attr ; }; struct kernfs_node { atomic_t count ; atomic_t active ; struct lockdep_map dep_map ; struct kernfs_node *parent ; char const *name ; struct rb_node rb ; void const *ns ; unsigned int hash ; union __anonunion____missing_field_name_171 __annonCompField48 ; void *priv ; unsigned short flags ; umode_t mode ; unsigned int ino ; struct kernfs_iattrs *iattr ; }; struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root * , int * , char * ) ; int (*show_options)(struct seq_file * , struct kernfs_root * ) ; int (*mkdir)(struct kernfs_node * , char const * , umode_t ) ; int (*rmdir)(struct kernfs_node * ) ; int (*rename)(struct kernfs_node * , struct kernfs_node * , char const * ) ; }; struct kernfs_root { struct kernfs_node *kn ; unsigned int flags ; struct ida ino_ida ; struct kernfs_syscall_ops *syscall_ops ; struct list_head supers ; wait_queue_head_t deactivate_waitq ; }; struct kernfs_open_file { struct kernfs_node *kn ; struct file *file ; void *priv ; struct mutex mutex ; int event ; struct list_head list ; char *prealloc_buf ; size_t atomic_write_len ; bool mmapped ; struct vm_operations_struct const *vm_ops ; }; struct kernfs_ops { int (*seq_show)(struct seq_file * , void * ) ; void *(*seq_start)(struct seq_file * , loff_t * ) ; void *(*seq_next)(struct seq_file * , void * , loff_t * ) ; void (*seq_stop)(struct seq_file * , void * ) ; ssize_t (*read)(struct kernfs_open_file * , char * , size_t , loff_t ) ; size_t atomic_write_len ; bool prealloc ; ssize_t (*write)(struct kernfs_open_file * , char * , size_t , loff_t ) ; int (*mmap)(struct kernfs_open_file * , struct vm_area_struct * ) ; struct lock_class_key lockdep_key ; }; struct sock; struct kobject; enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; struct kobj_ns_type_operations { enum kobj_ns_type type ; bool (*current_may_mount)(void) ; void *(*grab_current_ns)(void) ; void const *(*netlink_ns)(struct sock * ) ; void const *(*initial_ns)(void) ; void (*drop_ns)(void * ) ; }; struct bin_attribute; struct attribute { char const *name ; umode_t mode ; bool ignore_lockdep ; struct lock_class_key *key ; struct lock_class_key skey ; }; struct attribute_group { char const *name ; umode_t (*is_visible)(struct kobject * , struct attribute * , int ) ; struct attribute **attrs ; struct bin_attribute **bin_attrs ; }; struct bin_attribute { struct attribute attr ; size_t size ; void *private ; ssize_t (*read)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; ssize_t (*write)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; int (*mmap)(struct file * , struct kobject * , struct bin_attribute * , struct vm_area_struct * ) ; }; struct sysfs_ops { ssize_t (*show)(struct kobject * , struct attribute * , char * ) ; ssize_t (*store)(struct kobject * , struct attribute * , char const * , size_t ) ; }; struct kref { atomic_t refcount ; }; struct kset; struct kobj_type; struct kobject { char const *name ; struct list_head entry ; struct kobject *parent ; struct kset *kset ; struct kobj_type *ktype ; struct kernfs_node *sd ; struct kref kref ; struct delayed_work release ; unsigned char state_initialized : 1 ; unsigned char state_in_sysfs : 1 ; unsigned char state_add_uevent_sent : 1 ; unsigned char state_remove_uevent_sent : 1 ; unsigned char uevent_suppress : 1 ; }; struct kobj_type { void (*release)(struct kobject * ) ; struct sysfs_ops const *sysfs_ops ; struct attribute **default_attrs ; struct kobj_ns_type_operations const *(*child_ns_type)(struct kobject * ) ; void const *(*namespace)(struct kobject * ) ; }; struct kobj_uevent_env { char *argv[3U] ; char *envp[32U] ; int envp_idx ; char buf[2048U] ; int buflen ; }; struct kset_uevent_ops { int (* const filter)(struct kset * , struct kobject * ) ; char const *(* const name)(struct kset * , struct kobject * ) ; int (* const uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ; }; struct kset { struct list_head list ; spinlock_t list_lock ; struct kobject kobj ; struct kset_uevent_ops const *uevent_ops ; }; struct kernel_param; struct kernel_param_ops { unsigned int flags ; int (*set)(char const * , struct kernel_param const * ) ; int (*get)(char * , struct kernel_param const * ) ; void (*free)(void * ) ; }; struct kparam_string; struct kparam_array; union __anonunion____missing_field_name_172 { void *arg ; struct kparam_string const *str ; struct kparam_array const *arr ; }; struct kernel_param { char const *name ; struct module *mod ; struct kernel_param_ops const *ops ; u16 const perm ; s8 level ; u8 flags ; union __anonunion____missing_field_name_172 __annonCompField49 ; }; struct kparam_string { unsigned int maxlen ; char *string ; }; struct kparam_array { unsigned int max ; unsigned int elemsize ; unsigned int *num ; struct kernel_param_ops const *ops ; void *elem ; }; struct latch_tree_node { struct rb_node node[2U] ; }; struct mod_arch_specific { }; struct module_param_attrs; struct module_kobject { struct kobject kobj ; struct module *mod ; struct kobject *drivers_dir ; struct module_param_attrs *mp ; struct completion *kobj_completion ; }; struct module_attribute { struct attribute attr ; ssize_t (*show)(struct module_attribute * , struct module_kobject * , char * ) ; ssize_t (*store)(struct module_attribute * , struct module_kobject * , char const * , size_t ) ; void (*setup)(struct module * , char const * ) ; int (*test)(struct module * ) ; void (*free)(struct module * ) ; }; struct exception_table_entry; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; struct mod_tree_node { struct module *mod ; struct latch_tree_node node ; }; struct module_sect_attrs; struct module_notes_attrs; struct tracepoint; struct trace_event_call; struct trace_enum_map; struct module { enum module_state state ; struct list_head list ; char name[56U] ; struct module_kobject mkobj ; struct module_attribute *modinfo_attrs ; char const *version ; char const *srcversion ; struct kobject *holders_dir ; struct kernel_symbol const *syms ; unsigned long const *crcs ; unsigned int num_syms ; struct mutex param_lock ; struct kernel_param *kp ; unsigned int num_kp ; unsigned int num_gpl_syms ; struct kernel_symbol const *gpl_syms ; unsigned long const *gpl_crcs ; struct kernel_symbol const *unused_syms ; unsigned long const *unused_crcs ; unsigned int num_unused_syms ; unsigned int num_unused_gpl_syms ; struct kernel_symbol const *unused_gpl_syms ; unsigned long const *unused_gpl_crcs ; bool sig_ok ; bool async_probe_requested ; struct kernel_symbol const *gpl_future_syms ; unsigned long const *gpl_future_crcs ; unsigned int num_gpl_future_syms ; unsigned int num_exentries ; struct exception_table_entry *extable ; int (*init)(void) ; void *module_init ; void *module_core ; unsigned int init_size ; unsigned int core_size ; unsigned int init_text_size ; unsigned int core_text_size ; struct mod_tree_node mtn_core ; struct mod_tree_node mtn_init ; unsigned int init_ro_size ; unsigned int core_ro_size ; struct mod_arch_specific arch ; unsigned int taints ; unsigned int num_bugs ; struct list_head bug_list ; struct bug_entry *bug_table ; Elf64_Sym *symtab ; Elf64_Sym *core_symtab ; unsigned int num_symtab ; unsigned int core_num_syms ; char *strtab ; char *core_strtab ; struct module_sect_attrs *sect_attrs ; struct module_notes_attrs *notes_attrs ; char *args ; void *percpu ; unsigned int percpu_size ; unsigned int num_tracepoints ; struct tracepoint * const *tracepoints_ptrs ; unsigned int num_trace_bprintk_fmt ; char const **trace_bprintk_fmt_start ; struct trace_event_call **trace_events ; unsigned int num_trace_events ; struct trace_enum_map **trace_enums ; unsigned int num_trace_enums ; unsigned int num_ftrace_callsites ; unsigned long *ftrace_callsites ; bool klp_alive ; struct list_head source_list ; struct list_head target_list ; void (*exit)(void) ; atomic_t refcnt ; ctor_fn_t (**ctors)(void) ; unsigned int num_ctors ; }; struct kernel_cap_struct { __u32 cap[2U] ; }; typedef struct kernel_cap_struct kernel_cap_t; struct plist_node { int prio ; struct list_head prio_list ; struct list_head node_list ; }; typedef unsigned long cputime_t; struct sem_undo_list; struct sysv_sem { struct sem_undo_list *undo_list ; }; struct user_struct; struct sysv_shm { struct list_head shm_clist ; }; struct __anonstruct_sigset_t_180 { unsigned long sig[1U] ; }; typedef struct __anonstruct_sigset_t_180 sigset_t; struct siginfo; typedef void __signalfn_t(int ); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; union sigval { int sival_int ; void *sival_ptr ; }; typedef union sigval sigval_t; struct __anonstruct__kill_182 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; }; struct __anonstruct__timer_183 { __kernel_timer_t _tid ; int _overrun ; char _pad[0U] ; sigval_t _sigval ; int _sys_private ; }; struct __anonstruct__rt_184 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; sigval_t _sigval ; }; struct __anonstruct__sigchld_185 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; int _status ; __kernel_clock_t _utime ; __kernel_clock_t _stime ; }; struct __anonstruct__addr_bnd_187 { void *_lower ; void *_upper ; }; struct __anonstruct__sigfault_186 { void *_addr ; short _addr_lsb ; struct __anonstruct__addr_bnd_187 _addr_bnd ; }; struct __anonstruct__sigpoll_188 { long _band ; int _fd ; }; struct __anonstruct__sigsys_189 { void *_call_addr ; int _syscall ; unsigned int _arch ; }; union __anonunion__sifields_181 { int _pad[28U] ; struct __anonstruct__kill_182 _kill ; struct __anonstruct__timer_183 _timer ; struct __anonstruct__rt_184 _rt ; struct __anonstruct__sigchld_185 _sigchld ; struct __anonstruct__sigfault_186 _sigfault ; struct __anonstruct__sigpoll_188 _sigpoll ; struct __anonstruct__sigsys_189 _sigsys ; }; struct siginfo { int si_signo ; int si_errno ; int si_code ; union __anonunion__sifields_181 _sifields ; }; typedef struct siginfo siginfo_t; struct sigpending { struct list_head list ; sigset_t signal ; }; struct sigaction { __sighandler_t sa_handler ; unsigned long sa_flags ; __sigrestore_t sa_restorer ; sigset_t sa_mask ; }; struct k_sigaction { struct sigaction sa ; }; enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; struct pid_namespace; struct upid { int nr ; struct pid_namespace *ns ; struct hlist_node pid_chain ; }; struct pid { atomic_t count ; unsigned int level ; struct hlist_head tasks[3U] ; struct callback_head rcu ; struct upid numbers[1U] ; }; struct pid_link { struct hlist_node node ; struct pid *pid ; }; struct percpu_counter { raw_spinlock_t lock ; s64 count ; struct list_head list ; s32 *counters ; }; struct seccomp_filter; struct seccomp { int mode ; struct seccomp_filter *filter ; }; struct rt_mutex_waiter; struct rlimit { __kernel_ulong_t rlim_cur ; __kernel_ulong_t rlim_max ; }; struct timerqueue_node { struct rb_node node ; ktime_t expires ; }; struct timerqueue_head { struct rb_root head ; struct timerqueue_node *next ; }; struct hrtimer_clock_base; struct hrtimer_cpu_base; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; struct hrtimer { struct timerqueue_node node ; ktime_t _softexpires ; enum hrtimer_restart (*function)(struct hrtimer * ) ; struct hrtimer_clock_base *base ; unsigned long state ; int start_pid ; void *start_site ; char start_comm[16U] ; }; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base ; int index ; clockid_t clockid ; struct timerqueue_head active ; ktime_t (*get_time)(void) ; ktime_t offset ; }; struct hrtimer_cpu_base { raw_spinlock_t lock ; seqcount_t seq ; struct hrtimer *running ; unsigned int cpu ; unsigned int active_bases ; unsigned int clock_was_set_seq ; bool migration_enabled ; bool nohz_active ; unsigned char in_hrtirq : 1 ; unsigned char hres_active : 1 ; unsigned char hang_detected : 1 ; ktime_t expires_next ; struct hrtimer *next_timer ; unsigned int nr_events ; unsigned int nr_retries ; unsigned int nr_hangs ; unsigned int max_hang_time ; struct hrtimer_clock_base clock_base[4U] ; }; struct task_io_accounting { u64 rchar ; u64 wchar ; u64 syscr ; u64 syscw ; u64 read_bytes ; u64 write_bytes ; u64 cancelled_write_bytes ; }; struct latency_record { unsigned long backtrace[12U] ; unsigned int count ; unsigned long time ; unsigned long max ; }; struct assoc_array_ptr; struct assoc_array { struct assoc_array_ptr *root ; unsigned long nr_leaves_on_tree ; }; typedef int32_t key_serial_t; typedef uint32_t key_perm_t; struct key; struct signal_struct; struct key_type; struct keyring_index_key { struct key_type *type ; char const *description ; size_t desc_len ; }; union __anonunion____missing_field_name_196 { struct list_head graveyard_link ; struct rb_node serial_node ; }; struct key_user; union __anonunion____missing_field_name_197 { time_t expiry ; time_t revoked_at ; }; struct __anonstruct____missing_field_name_199 { struct key_type *type ; char *description ; }; union __anonunion____missing_field_name_198 { struct keyring_index_key index_key ; struct __anonstruct____missing_field_name_199 __annonCompField52 ; }; union __anonunion_type_data_200 { struct list_head link ; unsigned long x[2U] ; void *p[2U] ; int reject_error ; }; union __anonunion_payload_202 { unsigned long value ; void *rcudata ; void *data ; void *data2[2U] ; }; union __anonunion____missing_field_name_201 { union __anonunion_payload_202 payload ; struct assoc_array keys ; }; struct key { atomic_t usage ; key_serial_t serial ; union __anonunion____missing_field_name_196 __annonCompField50 ; struct rw_semaphore sem ; struct key_user *user ; void *security ; union __anonunion____missing_field_name_197 __annonCompField51 ; time_t last_used_at ; kuid_t uid ; kgid_t gid ; key_perm_t perm ; unsigned short quotalen ; unsigned short datalen ; unsigned long flags ; union __anonunion____missing_field_name_198 __annonCompField53 ; union __anonunion_type_data_200 type_data ; union __anonunion____missing_field_name_201 __annonCompField54 ; }; struct audit_context; struct group_info { atomic_t usage ; int ngroups ; int nblocks ; kgid_t small_block[32U] ; kgid_t *blocks[0U] ; }; struct cred { atomic_t usage ; atomic_t subscribers ; void *put_addr ; unsigned int magic ; kuid_t uid ; kgid_t gid ; kuid_t suid ; kgid_t sgid ; kuid_t euid ; kgid_t egid ; kuid_t fsuid ; kgid_t fsgid ; unsigned int securebits ; kernel_cap_t cap_inheritable ; kernel_cap_t cap_permitted ; kernel_cap_t cap_effective ; kernel_cap_t cap_bset ; unsigned char jit_keyring ; struct key *session_keyring ; struct key *process_keyring ; struct key *thread_keyring ; struct key *request_key_auth ; void *security ; struct user_struct *user ; struct user_namespace *user_ns ; struct group_info *group_info ; struct callback_head rcu ; }; struct percpu_ref; typedef void percpu_ref_func_t(struct percpu_ref * ); struct percpu_ref { atomic_long_t count ; unsigned long percpu_count_ptr ; percpu_ref_func_t *release ; percpu_ref_func_t *confirm_switch ; bool force_atomic ; struct callback_head rcu ; }; struct cgroup; struct cgroup_root; struct cgroup_subsys; struct cgroup_taskset; struct cgroup_subsys_state { struct cgroup *cgroup ; struct cgroup_subsys *ss ; struct percpu_ref refcnt ; struct cgroup_subsys_state *parent ; struct list_head sibling ; struct list_head children ; int id ; unsigned int flags ; u64 serial_nr ; struct callback_head callback_head ; struct work_struct destroy_work ; }; struct css_set { atomic_t refcount ; struct hlist_node hlist ; struct list_head tasks ; struct list_head mg_tasks ; struct list_head cgrp_links ; struct cgroup *dfl_cgrp ; struct cgroup_subsys_state *subsys[12U] ; struct list_head mg_preload_node ; struct list_head mg_node ; struct cgroup *mg_src_cgrp ; struct css_set *mg_dst_cset ; struct list_head e_cset_node[12U] ; struct callback_head callback_head ; }; struct cgroup { struct cgroup_subsys_state self ; unsigned long flags ; int id ; int populated_cnt ; struct kernfs_node *kn ; struct kernfs_node *procs_kn ; struct kernfs_node *populated_kn ; unsigned int subtree_control ; unsigned int child_subsys_mask ; struct cgroup_subsys_state *subsys[12U] ; struct cgroup_root *root ; struct list_head cset_links ; struct list_head e_csets[12U] ; struct list_head pidlists ; struct mutex pidlist_mutex ; wait_queue_head_t offline_waitq ; struct work_struct release_agent_work ; }; struct cgroup_root { struct kernfs_root *kf_root ; unsigned int subsys_mask ; int hierarchy_id ; struct cgroup cgrp ; atomic_t nr_cgrps ; struct list_head root_list ; unsigned int flags ; struct idr cgroup_idr ; char release_agent_path[4096U] ; char name[64U] ; }; struct cftype { char name[64U] ; int private ; umode_t mode ; size_t max_write_len ; unsigned int flags ; struct cgroup_subsys *ss ; struct list_head node ; struct kernfs_ops *kf_ops ; u64 (*read_u64)(struct cgroup_subsys_state * , struct cftype * ) ; s64 (*read_s64)(struct cgroup_subsys_state * , struct cftype * ) ; int (*seq_show)(struct seq_file * , void * ) ; void *(*seq_start)(struct seq_file * , loff_t * ) ; void *(*seq_next)(struct seq_file * , void * , loff_t * ) ; void (*seq_stop)(struct seq_file * , void * ) ; int (*write_u64)(struct cgroup_subsys_state * , struct cftype * , u64 ) ; int (*write_s64)(struct cgroup_subsys_state * , struct cftype * , s64 ) ; ssize_t (*write)(struct kernfs_open_file * , char * , size_t , loff_t ) ; struct lock_class_key lockdep_key ; }; struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state * ) ; int (*css_online)(struct cgroup_subsys_state * ) ; void (*css_offline)(struct cgroup_subsys_state * ) ; void (*css_released)(struct cgroup_subsys_state * ) ; void (*css_free)(struct cgroup_subsys_state * ) ; void (*css_reset)(struct cgroup_subsys_state * ) ; void (*css_e_css_changed)(struct cgroup_subsys_state * ) ; int (*can_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*cancel_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*fork)(struct task_struct * ) ; void (*exit)(struct cgroup_subsys_state * , struct cgroup_subsys_state * , struct task_struct * ) ; void (*bind)(struct cgroup_subsys_state * ) ; int disabled ; int early_init ; bool broken_hierarchy ; bool warned_broken_hierarchy ; int id ; char const *name ; struct cgroup_root *root ; struct idr css_idr ; struct list_head cfts ; struct cftype *dfl_cftypes ; struct cftype *legacy_cftypes ; unsigned int depends_on ; }; struct futex_pi_state; struct robust_list_head; struct bio_list; struct fs_struct; struct perf_event_context; struct blk_plug; struct nameidata; struct cfs_rq; struct task_group; struct sighand_struct { atomic_t count ; struct k_sigaction action[64U] ; spinlock_t siglock ; wait_queue_head_t signalfd_wqh ; }; struct pacct_struct { int ac_flag ; long ac_exitcode ; unsigned long ac_mem ; cputime_t ac_utime ; cputime_t ac_stime ; unsigned long ac_minflt ; unsigned long ac_majflt ; }; struct cpu_itimer { cputime_t expires ; cputime_t incr ; u32 error ; u32 incr_error ; }; struct cputime { cputime_t utime ; cputime_t stime ; }; struct task_cputime { cputime_t utime ; cputime_t stime ; unsigned long long sum_exec_runtime ; }; struct task_cputime_atomic { atomic64_t utime ; atomic64_t stime ; atomic64_t sum_exec_runtime ; }; struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic ; int running ; }; struct autogroup; struct tty_struct; struct taskstats; struct tty_audit_buf; struct signal_struct { atomic_t sigcnt ; atomic_t live ; int nr_threads ; struct list_head thread_head ; wait_queue_head_t wait_chldexit ; struct task_struct *curr_target ; struct sigpending shared_pending ; int group_exit_code ; int notify_count ; struct task_struct *group_exit_task ; int group_stop_count ; unsigned int flags ; unsigned char is_child_subreaper : 1 ; unsigned char has_child_subreaper : 1 ; int posix_timer_id ; struct list_head posix_timers ; struct hrtimer real_timer ; struct pid *leader_pid ; ktime_t it_real_incr ; struct cpu_itimer it[2U] ; struct thread_group_cputimer cputimer ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct pid *tty_old_pgrp ; int leader ; struct tty_struct *tty ; struct autogroup *autogroup ; seqlock_t stats_lock ; cputime_t utime ; cputime_t stime ; cputime_t cutime ; cputime_t cstime ; cputime_t gtime ; cputime_t cgtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; unsigned long cnvcsw ; unsigned long cnivcsw ; unsigned long min_flt ; unsigned long maj_flt ; unsigned long cmin_flt ; unsigned long cmaj_flt ; unsigned long inblock ; unsigned long oublock ; unsigned long cinblock ; unsigned long coublock ; unsigned long maxrss ; unsigned long cmaxrss ; struct task_io_accounting ioac ; unsigned long long sum_sched_runtime ; struct rlimit rlim[16U] ; struct pacct_struct pacct ; struct taskstats *stats ; unsigned int audit_tty ; unsigned int audit_tty_log_passwd ; struct tty_audit_buf *tty_audit_buf ; oom_flags_t oom_flags ; short oom_score_adj ; short oom_score_adj_min ; struct mutex cred_guard_mutex ; }; struct user_struct { atomic_t __count ; atomic_t processes ; atomic_t sigpending ; atomic_t inotify_watches ; atomic_t inotify_devs ; atomic_t fanotify_listeners ; atomic_long_t epoll_watches ; unsigned long mq_bytes ; unsigned long locked_shm ; struct key *uid_keyring ; struct key *session_keyring ; struct hlist_node uidhash_node ; kuid_t uid ; atomic_long_t locked_vm ; }; struct backing_dev_info; struct reclaim_state; struct sched_info { unsigned long pcount ; unsigned long long run_delay ; unsigned long long last_arrival ; unsigned long long last_queued ; }; struct task_delay_info { spinlock_t lock ; unsigned int flags ; u64 blkio_start ; u64 blkio_delay ; u64 swapin_delay ; u32 blkio_count ; u32 swapin_count ; u64 freepages_start ; u64 freepages_delay ; u32 freepages_count ; }; struct wake_q_node { struct wake_q_node *next ; }; struct io_context; struct pipe_inode_info; struct uts_namespace; struct load_weight { unsigned long weight ; u32 inv_weight ; }; struct sched_avg { u64 last_runnable_update ; s64 decay_count ; unsigned long load_avg_contrib ; unsigned long utilization_avg_contrib ; u32 runnable_avg_sum ; u32 avg_period ; u32 running_avg_sum ; }; struct sched_statistics { u64 wait_start ; u64 wait_max ; u64 wait_count ; u64 wait_sum ; u64 iowait_count ; u64 iowait_sum ; u64 sleep_start ; u64 sleep_max ; s64 sum_sleep_runtime ; u64 block_start ; u64 block_max ; u64 exec_max ; u64 slice_max ; u64 nr_migrations_cold ; u64 nr_failed_migrations_affine ; u64 nr_failed_migrations_running ; u64 nr_failed_migrations_hot ; u64 nr_forced_migrations ; u64 nr_wakeups ; u64 nr_wakeups_sync ; u64 nr_wakeups_migrate ; u64 nr_wakeups_local ; u64 nr_wakeups_remote ; u64 nr_wakeups_affine ; u64 nr_wakeups_affine_attempts ; u64 nr_wakeups_passive ; u64 nr_wakeups_idle ; }; struct sched_entity { struct load_weight load ; struct rb_node run_node ; struct list_head group_node ; unsigned int on_rq ; u64 exec_start ; u64 sum_exec_runtime ; u64 vruntime ; u64 prev_sum_exec_runtime ; u64 nr_migrations ; struct sched_statistics statistics ; int depth ; struct sched_entity *parent ; struct cfs_rq *cfs_rq ; struct cfs_rq *my_q ; struct sched_avg avg ; }; struct rt_rq; struct sched_rt_entity { struct list_head run_list ; unsigned long timeout ; unsigned long watchdog_stamp ; unsigned int time_slice ; struct sched_rt_entity *back ; struct sched_rt_entity *parent ; struct rt_rq *rt_rq ; struct rt_rq *my_q ; }; struct sched_dl_entity { struct rb_node rb_node ; u64 dl_runtime ; u64 dl_deadline ; u64 dl_period ; u64 dl_bw ; s64 runtime ; u64 deadline ; unsigned int flags ; int dl_throttled ; int dl_new ; int dl_boosted ; int dl_yielded ; struct hrtimer dl_timer ; }; struct memcg_oom_info { struct mem_cgroup *memcg ; gfp_t gfp_mask ; int order ; unsigned char may_oom : 1 ; }; struct sched_class; struct files_struct; struct compat_robust_list_head; struct numa_group; struct ftrace_ret_stack; struct task_struct { long volatile state ; void *stack ; atomic_t usage ; unsigned int flags ; unsigned int ptrace ; struct llist_node wake_entry ; int on_cpu ; struct task_struct *last_wakee ; unsigned long wakee_flips ; unsigned long wakee_flip_decay_ts ; int wake_cpu ; int on_rq ; int prio ; int static_prio ; int normal_prio ; unsigned int rt_priority ; struct sched_class const *sched_class ; struct sched_entity se ; struct sched_rt_entity rt ; struct task_group *sched_task_group ; struct sched_dl_entity dl ; struct hlist_head preempt_notifiers ; unsigned int btrace_seq ; unsigned int policy ; int nr_cpus_allowed ; cpumask_t cpus_allowed ; unsigned long rcu_tasks_nvcsw ; bool rcu_tasks_holdout ; struct list_head rcu_tasks_holdout_list ; int rcu_tasks_idle_cpu ; struct sched_info sched_info ; struct list_head tasks ; struct plist_node pushable_tasks ; struct rb_node pushable_dl_tasks ; struct mm_struct *mm ; struct mm_struct *active_mm ; u32 vmacache_seqnum ; struct vm_area_struct *vmacache[4U] ; struct task_rss_stat rss_stat ; int exit_state ; int exit_code ; int exit_signal ; int pdeath_signal ; unsigned long jobctl ; unsigned int personality ; unsigned char in_execve : 1 ; unsigned char in_iowait : 1 ; unsigned char sched_reset_on_fork : 1 ; unsigned char sched_contributes_to_load : 1 ; unsigned char sched_migrated : 1 ; unsigned char memcg_kmem_skip_account : 1 ; unsigned char brk_randomized : 1 ; unsigned long atomic_flags ; struct restart_block restart_block ; pid_t pid ; pid_t tgid ; struct task_struct *real_parent ; struct task_struct *parent ; struct list_head children ; struct list_head sibling ; struct task_struct *group_leader ; struct list_head ptraced ; struct list_head ptrace_entry ; struct pid_link pids[3U] ; struct list_head thread_group ; struct list_head thread_node ; struct completion *vfork_done ; int *set_child_tid ; int *clear_child_tid ; cputime_t utime ; cputime_t stime ; cputime_t utimescaled ; cputime_t stimescaled ; cputime_t gtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; u64 start_time ; u64 real_start_time ; unsigned long min_flt ; unsigned long maj_flt ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct cred const *real_cred ; struct cred const *cred ; char comm[16U] ; struct nameidata *nameidata ; struct sysv_sem sysvsem ; struct sysv_shm sysvshm ; unsigned long last_switch_count ; struct thread_struct thread ; struct fs_struct *fs ; struct files_struct *files ; struct nsproxy *nsproxy ; struct signal_struct *signal ; struct sighand_struct *sighand ; sigset_t blocked ; sigset_t real_blocked ; sigset_t saved_sigmask ; struct sigpending pending ; unsigned long sas_ss_sp ; size_t sas_ss_size ; int (*notifier)(void * ) ; void *notifier_data ; sigset_t *notifier_mask ; struct callback_head *task_works ; struct audit_context *audit_context ; kuid_t loginuid ; unsigned int sessionid ; struct seccomp seccomp ; u32 parent_exec_id ; u32 self_exec_id ; spinlock_t alloc_lock ; raw_spinlock_t pi_lock ; struct wake_q_node wake_q ; struct rb_root pi_waiters ; struct rb_node *pi_waiters_leftmost ; struct rt_mutex_waiter *pi_blocked_on ; struct mutex_waiter *blocked_on ; unsigned int irq_events ; unsigned long hardirq_enable_ip ; unsigned long hardirq_disable_ip ; unsigned int hardirq_enable_event ; unsigned int hardirq_disable_event ; int hardirqs_enabled ; int hardirq_context ; unsigned long softirq_disable_ip ; unsigned long softirq_enable_ip ; unsigned int softirq_disable_event ; unsigned int softirq_enable_event ; int softirqs_enabled ; int softirq_context ; u64 curr_chain_key ; int lockdep_depth ; unsigned int lockdep_recursion ; struct held_lock held_locks[48U] ; gfp_t lockdep_reclaim_gfp ; void *journal_info ; struct bio_list *bio_list ; struct blk_plug *plug ; struct reclaim_state *reclaim_state ; struct backing_dev_info *backing_dev_info ; struct io_context *io_context ; unsigned long ptrace_message ; siginfo_t *last_siginfo ; struct task_io_accounting ioac ; u64 acct_rss_mem1 ; u64 acct_vm_mem1 ; cputime_t acct_timexpd ; nodemask_t mems_allowed ; seqcount_t mems_allowed_seq ; int cpuset_mem_spread_rotor ; int cpuset_slab_spread_rotor ; struct css_set *cgroups ; struct list_head cg_list ; struct robust_list_head *robust_list ; struct compat_robust_list_head *compat_robust_list ; struct list_head pi_state_list ; struct futex_pi_state *pi_state_cache ; struct perf_event_context *perf_event_ctxp[2U] ; struct mutex perf_event_mutex ; struct list_head perf_event_list ; struct mempolicy *mempolicy ; short il_next ; short pref_node_fork ; int numa_scan_seq ; unsigned int numa_scan_period ; unsigned int numa_scan_period_max ; int numa_preferred_nid ; unsigned long numa_migrate_retry ; u64 node_stamp ; u64 last_task_numa_placement ; u64 last_sum_exec_runtime ; struct callback_head numa_work ; struct list_head numa_entry ; struct numa_group *numa_group ; unsigned long *numa_faults ; unsigned long total_numa_faults ; unsigned long numa_faults_locality[3U] ; unsigned long numa_pages_migrated ; struct callback_head rcu ; struct pipe_inode_info *splice_pipe ; struct page_frag task_frag ; struct task_delay_info *delays ; int make_it_fail ; int nr_dirtied ; int nr_dirtied_pause ; unsigned long dirty_paused_when ; int latency_record_count ; struct latency_record latency_record[32U] ; unsigned long timer_slack_ns ; unsigned long default_timer_slack_ns ; unsigned int kasan_depth ; int curr_ret_stack ; struct ftrace_ret_stack *ret_stack ; unsigned long long ftrace_timestamp ; atomic_t trace_overrun ; atomic_t tracing_graph_pause ; unsigned long trace ; unsigned long trace_recursion ; struct memcg_oom_info memcg_oom ; struct uprobe_task *utask ; unsigned int sequential_io ; unsigned int sequential_io_avg ; unsigned long task_state_change ; int pagefault_disabled ; }; enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; typedef enum irqreturn irqreturn_t; struct ixgbe_hw; struct ethtool_eeprom; struct ethtool_cmd; struct ethtool_rxnfc; struct ethtool_pauseparam; struct ieee_ets; struct ethtool_wolinfo; struct ethtool_channels; struct dcb_app; struct ethtool_coalesce; struct ethtool_ringparam; struct ieee_pfc; typedef unsigned long kernel_ulong_t; struct pci_device_id { __u32 vendor ; __u32 device ; __u32 subvendor ; __u32 subdevice ; __u32 class ; __u32 class_mask ; kernel_ulong_t driver_data ; }; struct acpi_device_id { __u8 id[9U] ; kernel_ulong_t driver_data ; }; struct of_device_id { char name[32U] ; char type[32U] ; char compatible[128U] ; void const *data ; }; struct klist_node; struct klist_node { void *n_klist ; struct list_head n_node ; struct kref n_ref ; }; struct path; struct seq_file { char *buf ; size_t size ; size_t from ; size_t count ; size_t pad_until ; loff_t index ; loff_t read_pos ; u64 version ; struct mutex lock ; struct seq_operations const *op ; int poll_event ; struct user_namespace *user_ns ; void *private ; }; struct seq_operations { void *(*start)(struct seq_file * , loff_t * ) ; void (*stop)(struct seq_file * , void * ) ; void *(*next)(struct seq_file * , void * , loff_t * ) ; int (*show)(struct seq_file * , void * ) ; }; struct pinctrl; struct pinctrl_state; struct dev_pin_info { struct pinctrl *p ; struct pinctrl_state *default_state ; struct pinctrl_state *sleep_state ; struct pinctrl_state *idle_state ; }; struct dma_map_ops; struct dev_archdata { struct dma_map_ops *dma_ops ; void *iommu ; }; struct device_private; struct device_driver; struct driver_private; struct class; struct subsys_private; struct bus_type; struct device_node; struct fwnode_handle; struct iommu_ops; struct iommu_group; struct device_attribute; struct bus_type { char const *name ; char const *dev_name ; struct device *dev_root ; struct device_attribute *dev_attrs ; struct attribute_group const **bus_groups ; struct attribute_group const **dev_groups ; struct attribute_group const **drv_groups ; int (*match)(struct device * , struct device_driver * ) ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*online)(struct device * ) ; int (*offline)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct dev_pm_ops const *pm ; struct iommu_ops const *iommu_ops ; struct subsys_private *p ; struct lock_class_key lock_key ; }; struct device_type; enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; struct device_driver { char const *name ; struct bus_type *bus ; struct module *owner ; char const *mod_name ; bool suppress_bind_attrs ; enum probe_type probe_type ; struct of_device_id const *of_match_table ; struct acpi_device_id const *acpi_match_table ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct attribute_group const **groups ; struct dev_pm_ops const *pm ; struct driver_private *p ; }; struct class_attribute; struct class { char const *name ; struct module *owner ; struct class_attribute *class_attrs ; struct attribute_group const **dev_groups ; struct kobject *dev_kobj ; int (*dev_uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * ) ; void (*class_release)(struct class * ) ; void (*dev_release)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct kobj_ns_type_operations const *ns_type ; void const *(*namespace)(struct device * ) ; struct dev_pm_ops const *pm ; struct subsys_private *p ; }; struct class_attribute { struct attribute attr ; ssize_t (*show)(struct class * , struct class_attribute * , char * ) ; ssize_t (*store)(struct class * , struct class_attribute * , char const * , size_t ) ; }; struct device_type { char const *name ; struct attribute_group const **groups ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * , kuid_t * , kgid_t * ) ; void (*release)(struct device * ) ; struct dev_pm_ops const *pm ; }; struct device_attribute { struct attribute attr ; ssize_t (*show)(struct device * , struct device_attribute * , char * ) ; ssize_t (*store)(struct device * , struct device_attribute * , char const * , size_t ) ; }; struct device_dma_parameters { unsigned int max_segment_size ; unsigned long segment_boundary_mask ; }; struct dma_coherent_mem; struct cma; struct device { struct device *parent ; struct device_private *p ; struct kobject kobj ; char const *init_name ; struct device_type const *type ; struct mutex mutex ; struct bus_type *bus ; struct device_driver *driver ; void *platform_data ; void *driver_data ; struct dev_pm_info power ; struct dev_pm_domain *pm_domain ; struct dev_pin_info *pins ; int numa_node ; u64 *dma_mask ; u64 coherent_dma_mask ; unsigned long dma_pfn_offset ; struct device_dma_parameters *dma_parms ; struct list_head dma_pools ; struct dma_coherent_mem *dma_mem ; struct cma *cma_area ; struct dev_archdata archdata ; struct device_node *of_node ; struct fwnode_handle *fwnode ; dev_t devt ; u32 id ; spinlock_t devres_lock ; struct list_head devres_head ; struct klist_node knode_class ; struct class *class ; struct attribute_group const **groups ; void (*release)(struct device * ) ; struct iommu_group *iommu_group ; bool offline_disabled ; bool offline ; }; struct wakeup_source { char const *name ; struct list_head entry ; spinlock_t lock ; struct wake_irq *wakeirq ; struct timer_list timer ; unsigned long timer_expires ; ktime_t total_time ; ktime_t max_time ; ktime_t last_time ; ktime_t start_prevent_time ; ktime_t prevent_sleep_time ; unsigned long event_count ; unsigned long active_count ; unsigned long relax_count ; unsigned long expire_count ; unsigned long wakeup_count ; bool active ; bool autosleep_enabled ; }; struct hotplug_slot; struct pci_slot { struct pci_bus *bus ; struct list_head list ; struct hotplug_slot *hotplug ; unsigned char number ; struct kobject kobj ; }; typedef int pci_power_t; typedef unsigned int pci_channel_state_t; enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ; typedef unsigned short pci_dev_flags_t; typedef unsigned short pci_bus_flags_t; enum pcie_link_width { PCIE_LNK_WIDTH_RESRV = 0, PCIE_LNK_X1 = 1, PCIE_LNK_X2 = 2, PCIE_LNK_X4 = 4, PCIE_LNK_X8 = 8, PCIE_LNK_X12 = 12, PCIE_LNK_X16 = 16, PCIE_LNK_X32 = 32, PCIE_LNK_WIDTH_UNKNOWN = 255 } ; enum pci_bus_speed { PCI_SPEED_33MHz = 0, PCI_SPEED_66MHz = 1, PCI_SPEED_66MHz_PCIX = 2, PCI_SPEED_100MHz_PCIX = 3, PCI_SPEED_133MHz_PCIX = 4, PCI_SPEED_66MHz_PCIX_ECC = 5, PCI_SPEED_100MHz_PCIX_ECC = 6, PCI_SPEED_133MHz_PCIX_ECC = 7, PCI_SPEED_66MHz_PCIX_266 = 9, PCI_SPEED_100MHz_PCIX_266 = 10, PCI_SPEED_133MHz_PCIX_266 = 11, AGP_UNKNOWN = 12, AGP_1X = 13, AGP_2X = 14, AGP_4X = 15, AGP_8X = 16, PCI_SPEED_66MHz_PCIX_533 = 17, PCI_SPEED_100MHz_PCIX_533 = 18, PCI_SPEED_133MHz_PCIX_533 = 19, PCIE_SPEED_2_5GT = 20, PCIE_SPEED_5_0GT = 21, PCIE_SPEED_8_0GT = 22, PCI_SPEED_UNKNOWN = 255 } ; struct pcie_link_state; struct pci_vpd; struct pci_sriov; struct pci_ats; struct proc_dir_entry; struct pci_driver; union __anonunion____missing_field_name_220 { struct pci_sriov *sriov ; struct pci_dev *physfn ; }; struct pci_dev { struct list_head bus_list ; struct pci_bus *bus ; struct pci_bus *subordinate ; void *sysdata ; struct proc_dir_entry *procent ; struct pci_slot *slot ; unsigned int devfn ; unsigned short vendor ; unsigned short device ; unsigned short subsystem_vendor ; unsigned short subsystem_device ; unsigned int class ; u8 revision ; u8 hdr_type ; u8 pcie_cap ; u8 msi_cap ; u8 msix_cap ; unsigned char pcie_mpss : 3 ; u8 rom_base_reg ; u8 pin ; u16 pcie_flags_reg ; u8 dma_alias_devfn ; struct pci_driver *driver ; u64 dma_mask ; struct device_dma_parameters dma_parms ; pci_power_t current_state ; u8 pm_cap ; unsigned char pme_support : 5 ; unsigned char pme_interrupt : 1 ; unsigned char pme_poll : 1 ; unsigned char d1_support : 1 ; unsigned char d2_support : 1 ; unsigned char no_d1d2 : 1 ; unsigned char no_d3cold : 1 ; unsigned char d3cold_allowed : 1 ; unsigned char mmio_always_on : 1 ; unsigned char wakeup_prepared : 1 ; unsigned char runtime_d3cold : 1 ; unsigned char ignore_hotplug : 1 ; unsigned int d3_delay ; unsigned int d3cold_delay ; struct pcie_link_state *link_state ; pci_channel_state_t error_state ; struct device dev ; int cfg_size ; unsigned int irq ; struct resource resource[17U] ; bool match_driver ; unsigned char transparent : 1 ; unsigned char multifunction : 1 ; unsigned char is_added : 1 ; unsigned char is_busmaster : 1 ; unsigned char no_msi : 1 ; unsigned char no_64bit_msi : 1 ; unsigned char block_cfg_access : 1 ; unsigned char broken_parity_status : 1 ; unsigned char irq_reroute_variant : 2 ; unsigned char msi_enabled : 1 ; unsigned char msix_enabled : 1 ; unsigned char ari_enabled : 1 ; unsigned char is_managed : 1 ; unsigned char needs_freset : 1 ; unsigned char state_saved : 1 ; unsigned char is_physfn : 1 ; unsigned char is_virtfn : 1 ; unsigned char reset_fn : 1 ; unsigned char is_hotplug_bridge : 1 ; unsigned char __aer_firmware_first_valid : 1 ; unsigned char __aer_firmware_first : 1 ; unsigned char broken_intx_masking : 1 ; unsigned char io_window_1k : 1 ; unsigned char irq_managed : 1 ; unsigned char has_secondary_link : 1 ; pci_dev_flags_t dev_flags ; atomic_t enable_cnt ; u32 saved_config_space[16U] ; struct hlist_head saved_cap_space ; struct bin_attribute *rom_attr ; int rom_attr_enabled ; struct bin_attribute *res_attr[17U] ; struct bin_attribute *res_attr_wc[17U] ; struct list_head msi_list ; struct attribute_group const **msi_irq_groups ; struct pci_vpd *vpd ; union __anonunion____missing_field_name_220 __annonCompField58 ; struct pci_ats *ats ; phys_addr_t rom ; size_t romlen ; char *driver_override ; }; struct pci_ops; struct msi_controller; struct pci_bus { struct list_head node ; struct pci_bus *parent ; struct list_head children ; struct list_head devices ; struct pci_dev *self ; struct list_head slots ; struct resource *resource[4U] ; struct list_head resources ; struct resource busn_res ; struct pci_ops *ops ; struct msi_controller *msi ; void *sysdata ; struct proc_dir_entry *procdir ; unsigned char number ; unsigned char primary ; unsigned char max_bus_speed ; unsigned char cur_bus_speed ; char name[48U] ; unsigned short bridge_ctl ; pci_bus_flags_t bus_flags ; struct device *bridge ; struct device dev ; struct bin_attribute *legacy_io ; struct bin_attribute *legacy_mem ; unsigned char is_added : 1 ; }; struct pci_ops { void *(*map_bus)(struct pci_bus * , unsigned int , int ) ; int (*read)(struct pci_bus * , unsigned int , int , int , u32 * ) ; int (*write)(struct pci_bus * , unsigned int , int , int , u32 ) ; }; struct pci_dynids { spinlock_t lock ; struct list_head list ; }; typedef unsigned int pci_ers_result_t; struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev * , enum pci_channel_state ) ; pci_ers_result_t (*mmio_enabled)(struct pci_dev * ) ; pci_ers_result_t (*link_reset)(struct pci_dev * ) ; pci_ers_result_t (*slot_reset)(struct pci_dev * ) ; void (*reset_notify)(struct pci_dev * , bool ) ; void (*resume)(struct pci_dev * ) ; }; struct pci_driver { struct list_head node ; char const *name ; struct pci_device_id const *id_table ; int (*probe)(struct pci_dev * , struct pci_device_id const * ) ; void (*remove)(struct pci_dev * ) ; int (*suspend)(struct pci_dev * , pm_message_t ) ; int (*suspend_late)(struct pci_dev * , pm_message_t ) ; int (*resume_early)(struct pci_dev * ) ; int (*resume)(struct pci_dev * ) ; void (*shutdown)(struct pci_dev * ) ; int (*sriov_configure)(struct pci_dev * , int ) ; struct pci_error_handlers const *err_handler ; struct device_driver driver ; struct pci_dynids dynids ; }; struct shrink_control { gfp_t gfp_mask ; unsigned long nr_to_scan ; int nid ; struct mem_cgroup *memcg ; }; struct shrinker { unsigned long (*count_objects)(struct shrinker * , struct shrink_control * ) ; unsigned long (*scan_objects)(struct shrinker * , struct shrink_control * ) ; int seeks ; long batch ; unsigned long flags ; struct list_head list ; atomic_long_t *nr_deferred ; }; struct file_ra_state; struct writeback_control; struct bdi_writeback; struct vm_fault { unsigned int flags ; unsigned long pgoff ; void *virtual_address ; struct page *cow_page ; struct page *page ; unsigned long max_pgoff ; pte_t *pte ; }; struct vm_operations_struct { void (*open)(struct vm_area_struct * ) ; void (*close)(struct vm_area_struct * ) ; int (*fault)(struct vm_area_struct * , struct vm_fault * ) ; void (*map_pages)(struct vm_area_struct * , struct vm_fault * ) ; int (*page_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*pfn_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*access)(struct vm_area_struct * , unsigned long , void * , int , int ) ; char const *(*name)(struct vm_area_struct * ) ; int (*set_policy)(struct vm_area_struct * , struct mempolicy * ) ; struct mempolicy *(*get_policy)(struct vm_area_struct * , unsigned long ) ; struct page *(*find_special_page)(struct vm_area_struct * , unsigned long ) ; }; struct kvec; struct scatterlist { unsigned long sg_magic ; unsigned long page_link ; unsigned int offset ; unsigned int length ; dma_addr_t dma_address ; unsigned int dma_length ; }; struct sg_table { struct scatterlist *sgl ; unsigned int nents ; unsigned int orig_nents ; }; struct dma_pool; struct msix_entry { u32 vector ; u16 entry ; }; struct dma_attrs { unsigned long flags[1U] ; }; enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; struct dma_map_ops { void *(*alloc)(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ) ; void (*free)(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ) ; int (*mmap)(struct device * , struct vm_area_struct * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; int (*get_sgtable)(struct device * , struct sg_table * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; dma_addr_t (*map_page)(struct device * , struct page * , unsigned long , size_t , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_page)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ) ; int (*map_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*sync_single_for_cpu)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_single_for_device)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_sg_for_cpu)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; void (*sync_sg_for_device)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; int (*mapping_error)(struct device * , dma_addr_t ) ; int (*dma_supported)(struct device * , u64 ) ; int (*set_dma_mask)(struct device * , u64 ) ; int is_phys ; }; struct iovec { void *iov_base ; __kernel_size_t iov_len ; }; struct kvec { void *iov_base ; size_t iov_len ; }; union __anonunion____missing_field_name_221 { struct iovec const *iov ; struct kvec const *kvec ; struct bio_vec const *bvec ; }; struct iov_iter { int type ; size_t iov_offset ; size_t count ; union __anonunion____missing_field_name_221 __annonCompField59 ; unsigned long nr_segs ; }; struct dql { unsigned int num_queued ; unsigned int adj_limit ; unsigned int last_obj_cnt ; unsigned int limit ; unsigned int num_completed ; unsigned int prev_ovlimit ; unsigned int prev_num_queued ; unsigned int prev_last_obj_cnt ; unsigned int lowest_slack ; unsigned long slack_start_time ; unsigned int max_limit ; unsigned int min_limit ; unsigned int slack_hold_time ; }; typedef unsigned short __kernel_sa_family_t; typedef __kernel_sa_family_t sa_family_t; struct sockaddr { sa_family_t sa_family ; char sa_data[14U] ; }; struct kiocb; struct msghdr { void *msg_name ; int msg_namelen ; struct iov_iter msg_iter ; void *msg_control ; __kernel_size_t msg_controllen ; unsigned int msg_flags ; struct kiocb *msg_iocb ; }; struct __anonstruct_sync_serial_settings_223 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; }; typedef struct __anonstruct_sync_serial_settings_223 sync_serial_settings; struct __anonstruct_te1_settings_224 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; unsigned int slot_map ; }; typedef struct __anonstruct_te1_settings_224 te1_settings; struct __anonstruct_raw_hdlc_proto_225 { unsigned short encoding ; unsigned short parity ; }; typedef struct __anonstruct_raw_hdlc_proto_225 raw_hdlc_proto; struct __anonstruct_fr_proto_226 { unsigned int t391 ; unsigned int t392 ; unsigned int n391 ; unsigned int n392 ; unsigned int n393 ; unsigned short lmi ; unsigned short dce ; }; typedef struct __anonstruct_fr_proto_226 fr_proto; struct __anonstruct_fr_proto_pvc_227 { unsigned int dlci ; }; typedef struct __anonstruct_fr_proto_pvc_227 fr_proto_pvc; struct __anonstruct_fr_proto_pvc_info_228 { unsigned int dlci ; char master[16U] ; }; typedef struct __anonstruct_fr_proto_pvc_info_228 fr_proto_pvc_info; struct __anonstruct_cisco_proto_229 { unsigned int interval ; unsigned int timeout ; }; typedef struct __anonstruct_cisco_proto_229 cisco_proto; struct ifmap { unsigned long mem_start ; unsigned long mem_end ; unsigned short base_addr ; unsigned char irq ; unsigned char dma ; unsigned char port ; }; union __anonunion_ifs_ifsu_230 { raw_hdlc_proto *raw_hdlc ; cisco_proto *cisco ; fr_proto *fr ; fr_proto_pvc *fr_pvc ; fr_proto_pvc_info *fr_pvc_info ; sync_serial_settings *sync ; te1_settings *te1 ; }; struct if_settings { unsigned int type ; unsigned int size ; union __anonunion_ifs_ifsu_230 ifs_ifsu ; }; union __anonunion_ifr_ifrn_231 { char ifrn_name[16U] ; }; union __anonunion_ifr_ifru_232 { struct sockaddr ifru_addr ; struct sockaddr ifru_dstaddr ; struct sockaddr ifru_broadaddr ; struct sockaddr ifru_netmask ; struct sockaddr ifru_hwaddr ; short ifru_flags ; int ifru_ivalue ; int ifru_mtu ; struct ifmap ifru_map ; char ifru_slave[16U] ; char ifru_newname[16U] ; void *ifru_data ; struct if_settings ifru_settings ; }; struct ifreq { union __anonunion_ifr_ifrn_231 ifr_ifrn ; union __anonunion_ifr_ifru_232 ifr_ifru ; }; struct hlist_bl_node; struct hlist_bl_head { struct hlist_bl_node *first ; }; struct hlist_bl_node { struct hlist_bl_node *next ; struct hlist_bl_node **pprev ; }; struct __anonstruct____missing_field_name_237 { spinlock_t lock ; int count ; }; union __anonunion____missing_field_name_236 { struct __anonstruct____missing_field_name_237 __annonCompField60 ; }; struct lockref { union __anonunion____missing_field_name_236 __annonCompField61 ; }; struct vfsmount; struct __anonstruct____missing_field_name_239 { u32 hash ; u32 len ; }; union __anonunion____missing_field_name_238 { struct __anonstruct____missing_field_name_239 __annonCompField62 ; u64 hash_len ; }; struct qstr { union __anonunion____missing_field_name_238 __annonCompField63 ; unsigned char const *name ; }; struct dentry_operations; union __anonunion_d_u_240 { struct hlist_node d_alias ; struct callback_head d_rcu ; }; struct dentry { unsigned int d_flags ; seqcount_t d_seq ; struct hlist_bl_node d_hash ; struct dentry *d_parent ; struct qstr d_name ; struct inode *d_inode ; unsigned char d_iname[32U] ; struct lockref d_lockref ; struct dentry_operations const *d_op ; struct super_block *d_sb ; unsigned long d_time ; void *d_fsdata ; struct list_head d_lru ; struct list_head d_child ; struct list_head d_subdirs ; union __anonunion_d_u_240 d_u ; }; struct dentry_operations { int (*d_revalidate)(struct dentry * , unsigned int ) ; int (*d_weak_revalidate)(struct dentry * , unsigned int ) ; int (*d_hash)(struct dentry const * , struct qstr * ) ; int (*d_compare)(struct dentry const * , struct dentry const * , unsigned int , char const * , struct qstr const * ) ; int (*d_delete)(struct dentry const * ) ; void (*d_release)(struct dentry * ) ; void (*d_prune)(struct dentry * ) ; void (*d_iput)(struct dentry * , struct inode * ) ; char *(*d_dname)(struct dentry * , char * , int ) ; struct vfsmount *(*d_automount)(struct path * ) ; int (*d_manage)(struct dentry * , bool ) ; struct inode *(*d_select_inode)(struct dentry * , unsigned int ) ; }; struct path { struct vfsmount *mnt ; struct dentry *dentry ; }; struct list_lru_one { struct list_head list ; long nr_items ; }; struct list_lru_memcg { struct list_lru_one *lru[0U] ; }; struct list_lru_node { spinlock_t lock ; struct list_lru_one lru ; struct list_lru_memcg *memcg_lrus ; }; struct list_lru { struct list_lru_node *node ; struct list_head list ; }; struct __anonstruct____missing_field_name_244 { struct radix_tree_node *parent ; void *private_data ; }; union __anonunion____missing_field_name_243 { struct __anonstruct____missing_field_name_244 __annonCompField64 ; struct callback_head callback_head ; }; struct radix_tree_node { unsigned int path ; unsigned int count ; union __anonunion____missing_field_name_243 __annonCompField65 ; struct list_head private_list ; void *slots[64U] ; unsigned long tags[3U][1U] ; }; struct radix_tree_root { unsigned int height ; gfp_t gfp_mask ; struct radix_tree_node *rnode ; }; struct fiemap_extent { __u64 fe_logical ; __u64 fe_physical ; __u64 fe_length ; __u64 fe_reserved64[2U] ; __u32 fe_flags ; __u32 fe_reserved[3U] ; }; enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; struct block_device; struct bio_vec { struct page *bv_page ; unsigned int bv_len ; unsigned int bv_offset ; }; struct export_operations; struct poll_table_struct; struct kstatfs; struct swap_info_struct; struct iattr { unsigned int ia_valid ; umode_t ia_mode ; kuid_t ia_uid ; kgid_t ia_gid ; loff_t ia_size ; struct timespec ia_atime ; struct timespec ia_mtime ; struct timespec ia_ctime ; struct file *ia_file ; }; struct dquot; typedef __kernel_uid32_t projid_t; struct __anonstruct_kprojid_t_248 { projid_t val ; }; typedef struct __anonstruct_kprojid_t_248 kprojid_t; enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; typedef long long qsize_t; union __anonunion____missing_field_name_249 { kuid_t uid ; kgid_t gid ; kprojid_t projid ; }; struct kqid { union __anonunion____missing_field_name_249 __annonCompField67 ; enum quota_type type ; }; struct mem_dqblk { qsize_t dqb_bhardlimit ; qsize_t dqb_bsoftlimit ; qsize_t dqb_curspace ; qsize_t dqb_rsvspace ; qsize_t dqb_ihardlimit ; qsize_t dqb_isoftlimit ; qsize_t dqb_curinodes ; time_t dqb_btime ; time_t dqb_itime ; }; struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format ; int dqi_fmt_id ; struct list_head dqi_dirty_list ; unsigned long dqi_flags ; unsigned int dqi_bgrace ; unsigned int dqi_igrace ; qsize_t dqi_max_spc_limit ; qsize_t dqi_max_ino_limit ; void *dqi_priv ; }; struct dquot { struct hlist_node dq_hash ; struct list_head dq_inuse ; struct list_head dq_free ; struct list_head dq_dirty ; struct mutex dq_lock ; atomic_t dq_count ; wait_queue_head_t dq_wait_unused ; struct super_block *dq_sb ; struct kqid dq_id ; loff_t dq_off ; unsigned long dq_flags ; struct mem_dqblk dq_dqb ; }; struct quota_format_ops { int (*check_quota_file)(struct super_block * , int ) ; int (*read_file_info)(struct super_block * , int ) ; int (*write_file_info)(struct super_block * , int ) ; int (*free_file_info)(struct super_block * , int ) ; int (*read_dqblk)(struct dquot * ) ; int (*commit_dqblk)(struct dquot * ) ; int (*release_dqblk)(struct dquot * ) ; }; struct dquot_operations { int (*write_dquot)(struct dquot * ) ; struct dquot *(*alloc_dquot)(struct super_block * , int ) ; void (*destroy_dquot)(struct dquot * ) ; int (*acquire_dquot)(struct dquot * ) ; int (*release_dquot)(struct dquot * ) ; int (*mark_dirty)(struct dquot * ) ; int (*write_info)(struct super_block * , int ) ; qsize_t *(*get_reserved_space)(struct inode * ) ; int (*get_projid)(struct inode * , kprojid_t * ) ; }; struct qc_dqblk { int d_fieldmask ; u64 d_spc_hardlimit ; u64 d_spc_softlimit ; u64 d_ino_hardlimit ; u64 d_ino_softlimit ; u64 d_space ; u64 d_ino_count ; s64 d_ino_timer ; s64 d_spc_timer ; int d_ino_warns ; int d_spc_warns ; u64 d_rt_spc_hardlimit ; u64 d_rt_spc_softlimit ; u64 d_rt_space ; s64 d_rt_spc_timer ; int d_rt_spc_warns ; }; struct qc_type_state { unsigned int flags ; unsigned int spc_timelimit ; unsigned int ino_timelimit ; unsigned int rt_spc_timelimit ; unsigned int spc_warnlimit ; unsigned int ino_warnlimit ; unsigned int rt_spc_warnlimit ; unsigned long long ino ; blkcnt_t blocks ; blkcnt_t nextents ; }; struct qc_state { unsigned int s_incoredqs ; struct qc_type_state s_state[3U] ; }; struct qc_info { int i_fieldmask ; unsigned int i_flags ; unsigned int i_spc_timelimit ; unsigned int i_ino_timelimit ; unsigned int i_rt_spc_timelimit ; unsigned int i_spc_warnlimit ; unsigned int i_ino_warnlimit ; unsigned int i_rt_spc_warnlimit ; }; struct quotactl_ops { int (*quota_on)(struct super_block * , int , int , struct path * ) ; int (*quota_off)(struct super_block * , int ) ; int (*quota_enable)(struct super_block * , unsigned int ) ; int (*quota_disable)(struct super_block * , unsigned int ) ; int (*quota_sync)(struct super_block * , int ) ; int (*set_info)(struct super_block * , int , struct qc_info * ) ; int (*get_dqblk)(struct super_block * , struct kqid , struct qc_dqblk * ) ; int (*set_dqblk)(struct super_block * , struct kqid , struct qc_dqblk * ) ; int (*get_state)(struct super_block * , struct qc_state * ) ; int (*rm_xquota)(struct super_block * , unsigned int ) ; }; struct quota_format_type { int qf_fmt_id ; struct quota_format_ops const *qf_ops ; struct module *qf_owner ; struct quota_format_type *qf_next ; }; struct quota_info { unsigned int flags ; struct mutex dqio_mutex ; struct mutex dqonoff_mutex ; struct inode *files[3U] ; struct mem_dqinfo info[3U] ; struct quota_format_ops const *ops[3U] ; }; struct kiocb { struct file *ki_filp ; loff_t ki_pos ; void (*ki_complete)(struct kiocb * , long , long ) ; void *private ; int ki_flags ; }; struct address_space_operations { int (*writepage)(struct page * , struct writeback_control * ) ; int (*readpage)(struct file * , struct page * ) ; int (*writepages)(struct address_space * , struct writeback_control * ) ; int (*set_page_dirty)(struct page * ) ; int (*readpages)(struct file * , struct address_space * , struct list_head * , unsigned int ) ; int (*write_begin)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page ** , void ** ) ; int (*write_end)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page * , void * ) ; sector_t (*bmap)(struct address_space * , sector_t ) ; void (*invalidatepage)(struct page * , unsigned int , unsigned int ) ; int (*releasepage)(struct page * , gfp_t ) ; void (*freepage)(struct page * ) ; ssize_t (*direct_IO)(struct kiocb * , struct iov_iter * , loff_t ) ; int (*migratepage)(struct address_space * , struct page * , struct page * , enum migrate_mode ) ; int (*launder_page)(struct page * ) ; int (*is_partially_uptodate)(struct page * , unsigned long , unsigned long ) ; void (*is_dirty_writeback)(struct page * , bool * , bool * ) ; int (*error_remove_page)(struct address_space * , struct page * ) ; int (*swap_activate)(struct swap_info_struct * , struct file * , sector_t * ) ; void (*swap_deactivate)(struct file * ) ; }; struct address_space { struct inode *host ; struct radix_tree_root page_tree ; spinlock_t tree_lock ; atomic_t i_mmap_writable ; struct rb_root i_mmap ; struct rw_semaphore i_mmap_rwsem ; unsigned long nrpages ; unsigned long nrshadows ; unsigned long writeback_index ; struct address_space_operations const *a_ops ; unsigned long flags ; spinlock_t private_lock ; struct list_head private_list ; void *private_data ; }; struct request_queue; struct hd_struct; struct gendisk; struct block_device { dev_t bd_dev ; int bd_openers ; struct inode *bd_inode ; struct super_block *bd_super ; struct mutex bd_mutex ; struct list_head bd_inodes ; void *bd_claiming ; void *bd_holder ; int bd_holders ; bool bd_write_holder ; struct list_head bd_holder_disks ; struct block_device *bd_contains ; unsigned int bd_block_size ; struct hd_struct *bd_part ; unsigned int bd_part_count ; int bd_invalidated ; struct gendisk *bd_disk ; struct request_queue *bd_queue ; struct list_head bd_list ; unsigned long bd_private ; int bd_fsfreeze_count ; struct mutex bd_fsfreeze_mutex ; }; struct posix_acl; struct inode_operations; union __anonunion____missing_field_name_252 { unsigned int const i_nlink ; unsigned int __i_nlink ; }; union __anonunion____missing_field_name_253 { struct hlist_head i_dentry ; struct callback_head i_rcu ; }; struct file_lock_context; struct cdev; union __anonunion____missing_field_name_254 { struct pipe_inode_info *i_pipe ; struct block_device *i_bdev ; struct cdev *i_cdev ; char *i_link ; }; struct inode { umode_t i_mode ; unsigned short i_opflags ; kuid_t i_uid ; kgid_t i_gid ; unsigned int i_flags ; struct posix_acl *i_acl ; struct posix_acl *i_default_acl ; struct inode_operations const *i_op ; struct super_block *i_sb ; struct address_space *i_mapping ; void *i_security ; unsigned long i_ino ; union __anonunion____missing_field_name_252 __annonCompField68 ; dev_t i_rdev ; loff_t i_size ; struct timespec i_atime ; struct timespec i_mtime ; struct timespec i_ctime ; spinlock_t i_lock ; unsigned short i_bytes ; unsigned int i_blkbits ; blkcnt_t i_blocks ; unsigned long i_state ; struct mutex i_mutex ; unsigned long dirtied_when ; unsigned long dirtied_time_when ; struct hlist_node i_hash ; struct list_head i_wb_list ; struct bdi_writeback *i_wb ; int i_wb_frn_winner ; u16 i_wb_frn_avg_time ; u16 i_wb_frn_history ; struct list_head i_lru ; struct list_head i_sb_list ; union __anonunion____missing_field_name_253 __annonCompField69 ; u64 i_version ; atomic_t i_count ; atomic_t i_dio_count ; atomic_t i_writecount ; atomic_t i_readcount ; struct file_operations const *i_fop ; struct file_lock_context *i_flctx ; struct address_space i_data ; struct list_head i_devices ; union __anonunion____missing_field_name_254 __annonCompField70 ; __u32 i_generation ; __u32 i_fsnotify_mask ; struct hlist_head i_fsnotify_marks ; void *i_private ; }; struct fown_struct { rwlock_t lock ; struct pid *pid ; enum pid_type pid_type ; kuid_t uid ; kuid_t euid ; int signum ; }; struct file_ra_state { unsigned long start ; unsigned int size ; unsigned int async_size ; unsigned int ra_pages ; unsigned int mmap_miss ; loff_t prev_pos ; }; union __anonunion_f_u_255 { struct llist_node fu_llist ; struct callback_head fu_rcuhead ; }; struct file { union __anonunion_f_u_255 f_u ; struct path f_path ; struct inode *f_inode ; struct file_operations const *f_op ; spinlock_t f_lock ; atomic_long_t f_count ; unsigned int f_flags ; fmode_t f_mode ; struct mutex f_pos_lock ; loff_t f_pos ; struct fown_struct f_owner ; struct cred const *f_cred ; struct file_ra_state f_ra ; u64 f_version ; void *f_security ; void *private_data ; struct list_head f_ep_links ; struct list_head f_tfile_llink ; struct address_space *f_mapping ; }; typedef void *fl_owner_t; struct file_lock; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock * , struct file_lock * ) ; void (*fl_release_private)(struct file_lock * ) ; }; struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock * , struct file_lock * ) ; unsigned long (*lm_owner_key)(struct file_lock * ) ; fl_owner_t (*lm_get_owner)(fl_owner_t ) ; void (*lm_put_owner)(fl_owner_t ) ; void (*lm_notify)(struct file_lock * ) ; int (*lm_grant)(struct file_lock * , int ) ; bool (*lm_break)(struct file_lock * ) ; int (*lm_change)(struct file_lock * , int , struct list_head * ) ; void (*lm_setup)(struct file_lock * , void ** ) ; }; struct net; struct nlm_lockowner; struct nfs_lock_info { u32 state ; struct nlm_lockowner *owner ; struct list_head list ; }; struct nfs4_lock_state; struct nfs4_lock_info { struct nfs4_lock_state *owner ; }; struct fasync_struct; struct __anonstruct_afs_257 { struct list_head link ; int state ; }; union __anonunion_fl_u_256 { struct nfs_lock_info nfs_fl ; struct nfs4_lock_info nfs4_fl ; struct __anonstruct_afs_257 afs ; }; struct file_lock { struct file_lock *fl_next ; struct list_head fl_list ; struct hlist_node fl_link ; struct list_head fl_block ; fl_owner_t fl_owner ; unsigned int fl_flags ; unsigned char fl_type ; unsigned int fl_pid ; int fl_link_cpu ; struct pid *fl_nspid ; wait_queue_head_t fl_wait ; struct file *fl_file ; loff_t fl_start ; loff_t fl_end ; struct fasync_struct *fl_fasync ; unsigned long fl_break_time ; unsigned long fl_downgrade_time ; struct file_lock_operations const *fl_ops ; struct lock_manager_operations const *fl_lmops ; union __anonunion_fl_u_256 fl_u ; }; struct file_lock_context { spinlock_t flc_lock ; struct list_head flc_flock ; struct list_head flc_posix ; struct list_head flc_lease ; }; struct fasync_struct { spinlock_t fa_lock ; int magic ; int fa_fd ; struct fasync_struct *fa_next ; struct file *fa_file ; struct callback_head fa_rcu ; }; struct sb_writers { struct percpu_counter counter[3U] ; wait_queue_head_t wait ; int frozen ; wait_queue_head_t wait_unfrozen ; struct lockdep_map lock_map[3U] ; }; struct super_operations; struct xattr_handler; struct mtd_info; struct super_block { struct list_head s_list ; dev_t s_dev ; unsigned char s_blocksize_bits ; unsigned long s_blocksize ; loff_t s_maxbytes ; struct file_system_type *s_type ; struct super_operations const *s_op ; struct dquot_operations const *dq_op ; struct quotactl_ops const *s_qcop ; struct export_operations const *s_export_op ; unsigned long s_flags ; unsigned long s_iflags ; unsigned long s_magic ; struct dentry *s_root ; struct rw_semaphore s_umount ; int s_count ; atomic_t s_active ; void *s_security ; struct xattr_handler const **s_xattr ; struct list_head s_inodes ; struct hlist_bl_head s_anon ; struct list_head s_mounts ; struct block_device *s_bdev ; struct backing_dev_info *s_bdi ; struct mtd_info *s_mtd ; struct hlist_node s_instances ; unsigned int s_quota_types ; struct quota_info s_dquot ; struct sb_writers s_writers ; char s_id[32U] ; u8 s_uuid[16U] ; void *s_fs_info ; unsigned int s_max_links ; fmode_t s_mode ; u32 s_time_gran ; struct mutex s_vfs_rename_mutex ; char *s_subtype ; char *s_options ; struct dentry_operations const *s_d_op ; int cleancache_poolid ; struct shrinker s_shrink ; atomic_long_t s_remove_count ; int s_readonly_remount ; struct workqueue_struct *s_dio_done_wq ; struct hlist_head s_pins ; struct list_lru s_dentry_lru ; struct list_lru s_inode_lru ; struct callback_head rcu ; int s_stack_depth ; }; struct fiemap_extent_info { unsigned int fi_flags ; unsigned int fi_extents_mapped ; unsigned int fi_extents_max ; struct fiemap_extent *fi_extents_start ; }; struct dir_context; struct dir_context { int (*actor)(struct dir_context * , char const * , int , loff_t , u64 , unsigned int ) ; loff_t pos ; }; struct file_operations { struct module *owner ; loff_t (*llseek)(struct file * , loff_t , int ) ; ssize_t (*read)(struct file * , char * , size_t , loff_t * ) ; ssize_t (*write)(struct file * , char const * , size_t , loff_t * ) ; ssize_t (*read_iter)(struct kiocb * , struct iov_iter * ) ; ssize_t (*write_iter)(struct kiocb * , struct iov_iter * ) ; int (*iterate)(struct file * , struct dir_context * ) ; unsigned int (*poll)(struct file * , struct poll_table_struct * ) ; long (*unlocked_ioctl)(struct file * , unsigned int , unsigned long ) ; long (*compat_ioctl)(struct file * , unsigned int , unsigned long ) ; int (*mmap)(struct file * , struct vm_area_struct * ) ; int (*mremap)(struct file * , struct vm_area_struct * ) ; int (*open)(struct inode * , struct file * ) ; int (*flush)(struct file * , fl_owner_t ) ; int (*release)(struct inode * , struct file * ) ; int (*fsync)(struct file * , loff_t , loff_t , int ) ; int (*aio_fsync)(struct kiocb * , int ) ; int (*fasync)(int , struct file * , int ) ; int (*lock)(struct file * , int , struct file_lock * ) ; ssize_t (*sendpage)(struct file * , struct page * , int , size_t , loff_t * , int ) ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; int (*check_flags)(int ) ; int (*flock)(struct file * , int , struct file_lock * ) ; ssize_t (*splice_write)(struct pipe_inode_info * , struct file * , loff_t * , size_t , unsigned int ) ; ssize_t (*splice_read)(struct file * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*setlease)(struct file * , long , struct file_lock ** , void ** ) ; long (*fallocate)(struct file * , int , loff_t , loff_t ) ; void (*show_fdinfo)(struct seq_file * , struct file * ) ; }; struct inode_operations { struct dentry *(*lookup)(struct inode * , struct dentry * , unsigned int ) ; char const *(*follow_link)(struct dentry * , void ** ) ; int (*permission)(struct inode * , int ) ; struct posix_acl *(*get_acl)(struct inode * , int ) ; int (*readlink)(struct dentry * , char * , int ) ; void (*put_link)(struct inode * , void * ) ; int (*create)(struct inode * , struct dentry * , umode_t , bool ) ; int (*link)(struct dentry * , struct inode * , struct dentry * ) ; int (*unlink)(struct inode * , struct dentry * ) ; int (*symlink)(struct inode * , struct dentry * , char const * ) ; int (*mkdir)(struct inode * , struct dentry * , umode_t ) ; int (*rmdir)(struct inode * , struct dentry * ) ; int (*mknod)(struct inode * , struct dentry * , umode_t , dev_t ) ; int (*rename)(struct inode * , struct dentry * , struct inode * , struct dentry * ) ; int (*rename2)(struct inode * , struct dentry * , struct inode * , struct dentry * , unsigned int ) ; int (*setattr)(struct dentry * , struct iattr * ) ; int (*getattr)(struct vfsmount * , struct dentry * , struct kstat * ) ; int (*setxattr)(struct dentry * , char const * , void const * , size_t , int ) ; ssize_t (*getxattr)(struct dentry * , char const * , void * , size_t ) ; ssize_t (*listxattr)(struct dentry * , char * , size_t ) ; int (*removexattr)(struct dentry * , char const * ) ; int (*fiemap)(struct inode * , struct fiemap_extent_info * , u64 , u64 ) ; int (*update_time)(struct inode * , struct timespec * , int ) ; int (*atomic_open)(struct inode * , struct dentry * , struct file * , unsigned int , umode_t , int * ) ; int (*tmpfile)(struct inode * , struct dentry * , umode_t ) ; int (*set_acl)(struct inode * , struct posix_acl * , int ) ; }; struct super_operations { struct inode *(*alloc_inode)(struct super_block * ) ; void (*destroy_inode)(struct inode * ) ; void (*dirty_inode)(struct inode * , int ) ; int (*write_inode)(struct inode * , struct writeback_control * ) ; int (*drop_inode)(struct inode * ) ; void (*evict_inode)(struct inode * ) ; void (*put_super)(struct super_block * ) ; int (*sync_fs)(struct super_block * , int ) ; int (*freeze_super)(struct super_block * ) ; int (*freeze_fs)(struct super_block * ) ; int (*thaw_super)(struct super_block * ) ; int (*unfreeze_fs)(struct super_block * ) ; int (*statfs)(struct dentry * , struct kstatfs * ) ; int (*remount_fs)(struct super_block * , int * , char * ) ; void (*umount_begin)(struct super_block * ) ; int (*show_options)(struct seq_file * , struct dentry * ) ; int (*show_devname)(struct seq_file * , struct dentry * ) ; int (*show_path)(struct seq_file * , struct dentry * ) ; int (*show_stats)(struct seq_file * , struct dentry * ) ; ssize_t (*quota_read)(struct super_block * , int , char * , size_t , loff_t ) ; ssize_t (*quota_write)(struct super_block * , int , char const * , size_t , loff_t ) ; struct dquot **(*get_dquots)(struct inode * ) ; int (*bdev_try_to_free_page)(struct super_block * , struct page * , gfp_t ) ; long (*nr_cached_objects)(struct super_block * , struct shrink_control * ) ; long (*free_cached_objects)(struct super_block * , struct shrink_control * ) ; }; struct file_system_type { char const *name ; int fs_flags ; struct dentry *(*mount)(struct file_system_type * , int , char const * , void * ) ; void (*kill_sb)(struct super_block * ) ; struct module *owner ; struct file_system_type *next ; struct hlist_head fs_supers ; struct lock_class_key s_lock_key ; struct lock_class_key s_umount_key ; struct lock_class_key s_vfs_rename_key ; struct lock_class_key s_writers_key[3U] ; struct lock_class_key i_lock_key ; struct lock_class_key i_mutex_key ; struct lock_class_key i_mutex_dir_key ; }; typedef s32 compat_time_t; typedef s32 compat_long_t; typedef u32 compat_uptr_t; struct compat_timespec { compat_time_t tv_sec ; s32 tv_nsec ; }; struct compat_robust_list { compat_uptr_t next ; }; struct compat_robust_list_head { struct compat_robust_list list ; compat_long_t futex_offset ; compat_uptr_t list_op_pending ; }; enum ldv_25071 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ; typedef enum ldv_25071 socket_state; struct socket_wq { wait_queue_head_t wait ; struct fasync_struct *fasync_list ; struct callback_head rcu ; }; struct proto_ops; struct socket { socket_state state ; short type ; unsigned long flags ; struct socket_wq *wq ; struct file *file ; struct sock *sk ; struct proto_ops const *ops ; }; struct proto_ops { int family ; struct module *owner ; int (*release)(struct socket * ) ; int (*bind)(struct socket * , struct sockaddr * , int ) ; int (*connect)(struct socket * , struct sockaddr * , int , int ) ; int (*socketpair)(struct socket * , struct socket * ) ; int (*accept)(struct socket * , struct socket * , int ) ; int (*getname)(struct socket * , struct sockaddr * , int * , int ) ; unsigned int (*poll)(struct file * , struct socket * , struct poll_table_struct * ) ; int (*ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*listen)(struct socket * , int ) ; int (*shutdown)(struct socket * , int ) ; int (*setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct socket * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct socket * , int , int , char * , int * ) ; int (*sendmsg)(struct socket * , struct msghdr * , size_t ) ; int (*recvmsg)(struct socket * , struct msghdr * , size_t , int ) ; int (*mmap)(struct file * , struct socket * , struct vm_area_struct * ) ; ssize_t (*sendpage)(struct socket * , struct page * , int , size_t , int ) ; ssize_t (*splice_read)(struct socket * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*set_peek_off)(struct sock * , int ) ; }; struct exception_table_entry { int insn ; int fixup ; }; struct in6_addr; struct sk_buff; typedef u64 netdev_features_t; union __anonunion_in6_u_272 { __u8 u6_addr8[16U] ; __be16 u6_addr16[8U] ; __be32 u6_addr32[4U] ; }; struct in6_addr { union __anonunion_in6_u_272 in6_u ; }; struct ethhdr { unsigned char h_dest[6U] ; unsigned char h_source[6U] ; __be16 h_proto ; }; struct pipe_buf_operations; struct pipe_buffer { struct page *page ; unsigned int offset ; unsigned int len ; struct pipe_buf_operations const *ops ; unsigned int flags ; unsigned long private ; }; struct pipe_inode_info { struct mutex mutex ; wait_queue_head_t wait ; unsigned int nrbufs ; unsigned int curbuf ; unsigned int buffers ; unsigned int readers ; unsigned int writers ; unsigned int files ; unsigned int waiting_writers ; unsigned int r_counter ; unsigned int w_counter ; struct page *tmp_page ; struct fasync_struct *fasync_readers ; struct fasync_struct *fasync_writers ; struct pipe_buffer *bufs ; }; struct pipe_buf_operations { int can_merge ; int (*confirm)(struct pipe_inode_info * , struct pipe_buffer * ) ; void (*release)(struct pipe_inode_info * , struct pipe_buffer * ) ; int (*steal)(struct pipe_inode_info * , struct pipe_buffer * ) ; void (*get)(struct pipe_inode_info * , struct pipe_buffer * ) ; }; struct napi_struct; struct nf_conntrack { atomic_t use ; }; union __anonunion____missing_field_name_277 { struct net_device *physoutdev ; char neigh_header[8U] ; }; union __anonunion____missing_field_name_278 { __be32 ipv4_daddr ; struct in6_addr ipv6_daddr ; }; struct nf_bridge_info { atomic_t use ; unsigned char orig_proto ; bool pkt_otherhost ; __u16 frag_max_size ; unsigned int mask ; struct net_device *physindev ; union __anonunion____missing_field_name_277 __annonCompField74 ; union __anonunion____missing_field_name_278 __annonCompField75 ; }; struct sk_buff_head { struct sk_buff *next ; struct sk_buff *prev ; __u32 qlen ; spinlock_t lock ; }; struct skb_frag_struct; typedef struct skb_frag_struct skb_frag_t; struct __anonstruct_page_279 { struct page *p ; }; struct skb_frag_struct { struct __anonstruct_page_279 page ; __u32 page_offset ; __u32 size ; }; struct skb_shared_hwtstamps { ktime_t hwtstamp ; }; struct skb_shared_info { unsigned char nr_frags ; __u8 tx_flags ; unsigned short gso_size ; unsigned short gso_segs ; unsigned short gso_type ; struct sk_buff *frag_list ; struct skb_shared_hwtstamps hwtstamps ; u32 tskey ; __be32 ip6_frag_id ; atomic_t dataref ; void *destructor_arg ; skb_frag_t frags[17U] ; }; typedef unsigned int sk_buff_data_t; struct __anonstruct____missing_field_name_281 { u32 stamp_us ; u32 stamp_jiffies ; }; union __anonunion____missing_field_name_280 { u64 v64 ; struct __anonstruct____missing_field_name_281 __annonCompField76 ; }; struct skb_mstamp { union __anonunion____missing_field_name_280 __annonCompField77 ; }; union __anonunion____missing_field_name_284 { ktime_t tstamp ; struct skb_mstamp skb_mstamp ; }; struct __anonstruct____missing_field_name_283 { struct sk_buff *next ; struct sk_buff *prev ; union __anonunion____missing_field_name_284 __annonCompField78 ; }; union __anonunion____missing_field_name_282 { struct __anonstruct____missing_field_name_283 __annonCompField79 ; struct rb_node rbnode ; }; struct sec_path; struct __anonstruct____missing_field_name_286 { __u16 csum_start ; __u16 csum_offset ; }; union __anonunion____missing_field_name_285 { __wsum csum ; struct __anonstruct____missing_field_name_286 __annonCompField81 ; }; union __anonunion____missing_field_name_287 { unsigned int napi_id ; unsigned int sender_cpu ; }; union __anonunion____missing_field_name_288 { __u32 mark ; __u32 reserved_tailroom ; }; union __anonunion____missing_field_name_289 { __be16 inner_protocol ; __u8 inner_ipproto ; }; struct sk_buff { union __anonunion____missing_field_name_282 __annonCompField80 ; struct sock *sk ; struct net_device *dev ; char cb[48U] ; unsigned long _skb_refdst ; void (*destructor)(struct sk_buff * ) ; struct sec_path *sp ; struct nf_conntrack *nfct ; struct nf_bridge_info *nf_bridge ; unsigned int len ; unsigned int data_len ; __u16 mac_len ; __u16 hdr_len ; __u16 queue_mapping ; unsigned char cloned : 1 ; unsigned char nohdr : 1 ; unsigned char fclone : 2 ; unsigned char peeked : 1 ; unsigned char head_frag : 1 ; unsigned char xmit_more : 1 ; __u32 headers_start[0U] ; __u8 __pkt_type_offset[0U] ; unsigned char pkt_type : 3 ; unsigned char pfmemalloc : 1 ; unsigned char ignore_df : 1 ; unsigned char nfctinfo : 3 ; unsigned char nf_trace : 1 ; unsigned char ip_summed : 2 ; unsigned char ooo_okay : 1 ; unsigned char l4_hash : 1 ; unsigned char sw_hash : 1 ; unsigned char wifi_acked_valid : 1 ; unsigned char wifi_acked : 1 ; unsigned char no_fcs : 1 ; unsigned char encapsulation : 1 ; unsigned char encap_hdr_csum : 1 ; unsigned char csum_valid : 1 ; unsigned char csum_complete_sw : 1 ; unsigned char csum_level : 2 ; unsigned char csum_bad : 1 ; unsigned char ndisc_nodetype : 2 ; unsigned char ipvs_property : 1 ; unsigned char inner_protocol_type : 1 ; unsigned char remcsum_offload : 1 ; __u16 tc_index ; __u16 tc_verd ; union __anonunion____missing_field_name_285 __annonCompField82 ; __u32 priority ; int skb_iif ; __u32 hash ; __be16 vlan_proto ; __u16 vlan_tci ; union __anonunion____missing_field_name_287 __annonCompField83 ; __u32 secmark ; union __anonunion____missing_field_name_288 __annonCompField84 ; union __anonunion____missing_field_name_289 __annonCompField85 ; __u16 inner_transport_header ; __u16 inner_network_header ; __u16 inner_mac_header ; __be16 protocol ; __u16 transport_header ; __u16 network_header ; __u16 mac_header ; __u32 headers_end[0U] ; sk_buff_data_t tail ; sk_buff_data_t end ; unsigned char *head ; unsigned char *data ; unsigned int truesize ; atomic_t users ; }; struct dst_entry; struct rtable; enum pkt_hash_types { PKT_HASH_TYPE_NONE = 0, PKT_HASH_TYPE_L2 = 1, PKT_HASH_TYPE_L3 = 2, PKT_HASH_TYPE_L4 = 3 } ; struct ethtool_cmd { __u32 cmd ; __u32 supported ; __u32 advertising ; __u16 speed ; __u8 duplex ; __u8 port ; __u8 phy_address ; __u8 transceiver ; __u8 autoneg ; __u8 mdio_support ; __u32 maxtxpkt ; __u32 maxrxpkt ; __u16 speed_hi ; __u8 eth_tp_mdix ; __u8 eth_tp_mdix_ctrl ; __u32 lp_advertising ; __u32 reserved[2U] ; }; struct ethtool_drvinfo { __u32 cmd ; char driver[32U] ; char version[32U] ; char fw_version[32U] ; char bus_info[32U] ; char erom_version[32U] ; char reserved2[12U] ; __u32 n_priv_flags ; __u32 n_stats ; __u32 testinfo_len ; __u32 eedump_len ; __u32 regdump_len ; }; struct ethtool_wolinfo { __u32 cmd ; __u32 supported ; __u32 wolopts ; __u8 sopass[6U] ; }; struct ethtool_tunable { __u32 cmd ; __u32 id ; __u32 type_id ; __u32 len ; void *data[0U] ; }; struct ethtool_regs { __u32 cmd ; __u32 version ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eeprom { __u32 cmd ; __u32 magic ; __u32 offset ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eee { __u32 cmd ; __u32 supported ; __u32 advertised ; __u32 lp_advertised ; __u32 eee_active ; __u32 eee_enabled ; __u32 tx_lpi_enabled ; __u32 tx_lpi_timer ; __u32 reserved[2U] ; }; struct ethtool_modinfo { __u32 cmd ; __u32 type ; __u32 eeprom_len ; __u32 reserved[8U] ; }; struct ethtool_coalesce { __u32 cmd ; __u32 rx_coalesce_usecs ; __u32 rx_max_coalesced_frames ; __u32 rx_coalesce_usecs_irq ; __u32 rx_max_coalesced_frames_irq ; __u32 tx_coalesce_usecs ; __u32 tx_max_coalesced_frames ; __u32 tx_coalesce_usecs_irq ; __u32 tx_max_coalesced_frames_irq ; __u32 stats_block_coalesce_usecs ; __u32 use_adaptive_rx_coalesce ; __u32 use_adaptive_tx_coalesce ; __u32 pkt_rate_low ; __u32 rx_coalesce_usecs_low ; __u32 rx_max_coalesced_frames_low ; __u32 tx_coalesce_usecs_low ; __u32 tx_max_coalesced_frames_low ; __u32 pkt_rate_high ; __u32 rx_coalesce_usecs_high ; __u32 rx_max_coalesced_frames_high ; __u32 tx_coalesce_usecs_high ; __u32 tx_max_coalesced_frames_high ; __u32 rate_sample_interval ; }; struct ethtool_ringparam { __u32 cmd ; __u32 rx_max_pending ; __u32 rx_mini_max_pending ; __u32 rx_jumbo_max_pending ; __u32 tx_max_pending ; __u32 rx_pending ; __u32 rx_mini_pending ; __u32 rx_jumbo_pending ; __u32 tx_pending ; }; struct ethtool_channels { __u32 cmd ; __u32 max_rx ; __u32 max_tx ; __u32 max_other ; __u32 max_combined ; __u32 rx_count ; __u32 tx_count ; __u32 other_count ; __u32 combined_count ; }; struct ethtool_pauseparam { __u32 cmd ; __u32 autoneg ; __u32 rx_pause ; __u32 tx_pause ; }; struct ethtool_test { __u32 cmd ; __u32 flags ; __u32 reserved ; __u32 len ; __u64 data[0U] ; }; struct ethtool_stats { __u32 cmd ; __u32 n_stats ; __u64 data[0U] ; }; struct ethtool_tcpip4_spec { __be32 ip4src ; __be32 ip4dst ; __be16 psrc ; __be16 pdst ; __u8 tos ; }; struct ethtool_ah_espip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 spi ; __u8 tos ; }; struct ethtool_usrip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 l4_4_bytes ; __u8 tos ; __u8 ip_ver ; __u8 proto ; }; union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec ; struct ethtool_tcpip4_spec udp_ip4_spec ; struct ethtool_tcpip4_spec sctp_ip4_spec ; struct ethtool_ah_espip4_spec ah_ip4_spec ; struct ethtool_ah_espip4_spec esp_ip4_spec ; struct ethtool_usrip4_spec usr_ip4_spec ; struct ethhdr ether_spec ; __u8 hdata[52U] ; }; struct ethtool_flow_ext { __u8 padding[2U] ; unsigned char h_dest[6U] ; __be16 vlan_etype ; __be16 vlan_tci ; __be32 data[2U] ; }; struct ethtool_rx_flow_spec { __u32 flow_type ; union ethtool_flow_union h_u ; struct ethtool_flow_ext h_ext ; union ethtool_flow_union m_u ; struct ethtool_flow_ext m_ext ; __u64 ring_cookie ; __u32 location ; }; struct ethtool_rxnfc { __u32 cmd ; __u32 flow_type ; __u64 data ; struct ethtool_rx_flow_spec fs ; __u32 rule_cnt ; __u32 rule_locs[0U] ; }; struct ethtool_flash { __u32 cmd ; __u32 region ; char data[128U] ; }; struct ethtool_dump { __u32 cmd ; __u32 version ; __u32 flag ; __u32 len ; __u8 data[0U] ; }; struct ethtool_ts_info { __u32 cmd ; __u32 so_timestamping ; __s32 phc_index ; __u32 tx_types ; __u32 tx_reserved[3U] ; __u32 rx_filters ; __u32 rx_reserved[3U] ; }; enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; struct ethtool_ops { int (*get_settings)(struct net_device * , struct ethtool_cmd * ) ; int (*set_settings)(struct net_device * , struct ethtool_cmd * ) ; void (*get_drvinfo)(struct net_device * , struct ethtool_drvinfo * ) ; int (*get_regs_len)(struct net_device * ) ; void (*get_regs)(struct net_device * , struct ethtool_regs * , void * ) ; void (*get_wol)(struct net_device * , struct ethtool_wolinfo * ) ; int (*set_wol)(struct net_device * , struct ethtool_wolinfo * ) ; u32 (*get_msglevel)(struct net_device * ) ; void (*set_msglevel)(struct net_device * , u32 ) ; int (*nway_reset)(struct net_device * ) ; u32 (*get_link)(struct net_device * ) ; int (*get_eeprom_len)(struct net_device * ) ; int (*get_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*set_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; int (*set_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; void (*get_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; int (*set_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; void (*get_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; int (*set_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; void (*self_test)(struct net_device * , struct ethtool_test * , u64 * ) ; void (*get_strings)(struct net_device * , u32 , u8 * ) ; int (*set_phys_id)(struct net_device * , enum ethtool_phys_id_state ) ; void (*get_ethtool_stats)(struct net_device * , struct ethtool_stats * , u64 * ) ; int (*begin)(struct net_device * ) ; void (*complete)(struct net_device * ) ; u32 (*get_priv_flags)(struct net_device * ) ; int (*set_priv_flags)(struct net_device * , u32 ) ; int (*get_sset_count)(struct net_device * , int ) ; int (*get_rxnfc)(struct net_device * , struct ethtool_rxnfc * , u32 * ) ; int (*set_rxnfc)(struct net_device * , struct ethtool_rxnfc * ) ; int (*flash_device)(struct net_device * , struct ethtool_flash * ) ; int (*reset)(struct net_device * , u32 * ) ; u32 (*get_rxfh_key_size)(struct net_device * ) ; u32 (*get_rxfh_indir_size)(struct net_device * ) ; int (*get_rxfh)(struct net_device * , u32 * , u8 * , u8 * ) ; int (*set_rxfh)(struct net_device * , u32 const * , u8 const * , u8 const ) ; void (*get_channels)(struct net_device * , struct ethtool_channels * ) ; int (*set_channels)(struct net_device * , struct ethtool_channels * ) ; int (*get_dump_flag)(struct net_device * , struct ethtool_dump * ) ; int (*get_dump_data)(struct net_device * , struct ethtool_dump * , void * ) ; int (*set_dump)(struct net_device * , struct ethtool_dump * ) ; int (*get_ts_info)(struct net_device * , struct ethtool_ts_info * ) ; int (*get_module_info)(struct net_device * , struct ethtool_modinfo * ) ; int (*get_module_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_eee)(struct net_device * , struct ethtool_eee * ) ; int (*set_eee)(struct net_device * , struct ethtool_eee * ) ; int (*get_tunable)(struct net_device * , struct ethtool_tunable const * , void * ) ; int (*set_tunable)(struct net_device * , struct ethtool_tunable const * , void const * ) ; }; struct prot_inuse; struct netns_core { struct ctl_table_header *sysctl_hdr ; int sysctl_somaxconn ; struct prot_inuse *inuse ; }; struct u64_stats_sync { }; struct ipstats_mib { u64 mibs[36U] ; struct u64_stats_sync syncp ; }; struct icmp_mib { unsigned long mibs[28U] ; }; struct icmpmsg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6_mib { unsigned long mibs[6U] ; }; struct icmpv6_mib_device { atomic_long_t mibs[6U] ; }; struct icmpv6msg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6msg_mib_device { atomic_long_t mibs[512U] ; }; struct tcp_mib { unsigned long mibs[16U] ; }; struct udp_mib { unsigned long mibs[9U] ; }; struct linux_mib { unsigned long mibs[115U] ; }; struct linux_xfrm_mib { unsigned long mibs[29U] ; }; struct netns_mib { struct tcp_mib *tcp_statistics ; struct ipstats_mib *ip_statistics ; struct linux_mib *net_statistics ; struct udp_mib *udp_statistics ; struct udp_mib *udplite_statistics ; struct icmp_mib *icmp_statistics ; struct icmpmsg_mib *icmpmsg_statistics ; struct proc_dir_entry *proc_net_devsnmp6 ; struct udp_mib *udp_stats_in6 ; struct udp_mib *udplite_stats_in6 ; struct ipstats_mib *ipv6_statistics ; struct icmpv6_mib *icmpv6_statistics ; struct icmpv6msg_mib *icmpv6msg_statistics ; struct linux_xfrm_mib *xfrm_statistics ; }; struct netns_unix { int sysctl_max_dgram_qlen ; struct ctl_table_header *ctl ; }; struct netns_packet { struct mutex sklist_lock ; struct hlist_head sklist ; }; struct netns_frags { struct percpu_counter mem ; int timeout ; int high_thresh ; int low_thresh ; }; struct ipv4_devconf; struct fib_rules_ops; struct fib_table; struct local_ports { seqlock_t lock ; int range[2U] ; bool warned ; }; struct ping_group_range { seqlock_t lock ; kgid_t range[2U] ; }; struct inet_peer_base; struct xt_table; struct netns_ipv4 { struct ctl_table_header *forw_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *ipv4_hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *xfrm4_hdr ; struct ipv4_devconf *devconf_all ; struct ipv4_devconf *devconf_dflt ; struct fib_rules_ops *rules_ops ; bool fib_has_custom_rules ; struct fib_table *fib_local ; struct fib_table *fib_main ; struct fib_table *fib_default ; int fib_num_tclassid_users ; struct hlist_head *fib_table_hash ; bool fib_offload_disabled ; struct sock *fibnl ; struct sock **icmp_sk ; struct sock *mc_autojoin_sk ; struct inet_peer_base *peers ; struct sock **tcp_sk ; struct netns_frags frags ; struct xt_table *iptable_filter ; struct xt_table *iptable_mangle ; struct xt_table *iptable_raw ; struct xt_table *arptable_filter ; struct xt_table *iptable_security ; struct xt_table *nat_table ; int sysctl_icmp_echo_ignore_all ; int sysctl_icmp_echo_ignore_broadcasts ; int sysctl_icmp_ignore_bogus_error_responses ; int sysctl_icmp_ratelimit ; int sysctl_icmp_ratemask ; int sysctl_icmp_errors_use_inbound_ifaddr ; struct local_ports ip_local_ports ; int sysctl_tcp_ecn ; int sysctl_tcp_ecn_fallback ; int sysctl_ip_no_pmtu_disc ; int sysctl_ip_fwd_use_pmtu ; int sysctl_ip_nonlocal_bind ; int sysctl_fwmark_reflect ; int sysctl_tcp_fwmark_accept ; int sysctl_tcp_mtu_probing ; int sysctl_tcp_base_mss ; int sysctl_tcp_probe_threshold ; u32 sysctl_tcp_probe_interval ; struct ping_group_range ping_group_range ; atomic_t dev_addr_genid ; unsigned long *sysctl_local_reserved_ports ; struct list_head mr_tables ; struct fib_rules_ops *mr_rules_ops ; atomic_t rt_genid ; }; struct neighbour; struct dst_ops { unsigned short family ; unsigned int gc_thresh ; int (*gc)(struct dst_ops * ) ; struct dst_entry *(*check)(struct dst_entry * , __u32 ) ; unsigned int (*default_advmss)(struct dst_entry const * ) ; unsigned int (*mtu)(struct dst_entry const * ) ; u32 *(*cow_metrics)(struct dst_entry * , unsigned long ) ; void (*destroy)(struct dst_entry * ) ; void (*ifdown)(struct dst_entry * , struct net_device * , int ) ; struct dst_entry *(*negative_advice)(struct dst_entry * ) ; void (*link_failure)(struct sk_buff * ) ; void (*update_pmtu)(struct dst_entry * , struct sock * , struct sk_buff * , u32 ) ; void (*redirect)(struct dst_entry * , struct sock * , struct sk_buff * ) ; int (*local_out)(struct sk_buff * ) ; struct neighbour *(*neigh_lookup)(struct dst_entry const * , struct sk_buff * , void const * ) ; struct kmem_cache *kmem_cachep ; struct percpu_counter pcpuc_entries ; }; struct netns_sysctl_ipv6 { struct ctl_table_header *hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *icmp_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *xfrm6_hdr ; int bindv6only ; int flush_delay ; int ip6_rt_max_size ; int ip6_rt_gc_min_interval ; int ip6_rt_gc_timeout ; int ip6_rt_gc_interval ; int ip6_rt_gc_elasticity ; int ip6_rt_mtu_expires ; int ip6_rt_min_advmss ; int flowlabel_consistency ; int auto_flowlabels ; int icmpv6_time ; int anycast_src_echo_reply ; int fwmark_reflect ; int idgen_retries ; int idgen_delay ; int flowlabel_state_ranges ; }; struct ipv6_devconf; struct rt6_info; struct rt6_statistics; struct fib6_table; struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl ; struct ipv6_devconf *devconf_all ; struct ipv6_devconf *devconf_dflt ; struct inet_peer_base *peers ; struct netns_frags frags ; struct xt_table *ip6table_filter ; struct xt_table *ip6table_mangle ; struct xt_table *ip6table_raw ; struct xt_table *ip6table_security ; struct xt_table *ip6table_nat ; struct rt6_info *ip6_null_entry ; struct rt6_statistics *rt6_stats ; struct timer_list ip6_fib_timer ; struct hlist_head *fib_table_hash ; struct fib6_table *fib6_main_tbl ; struct dst_ops ip6_dst_ops ; unsigned int ip6_rt_gc_expire ; unsigned long ip6_rt_last_gc ; struct rt6_info *ip6_prohibit_entry ; struct rt6_info *ip6_blk_hole_entry ; struct fib6_table *fib6_local_tbl ; struct fib_rules_ops *fib6_rules_ops ; struct sock **icmp_sk ; struct sock *ndisc_sk ; struct sock *tcp_sk ; struct sock *igmp_sk ; struct sock *mc_autojoin_sk ; struct list_head mr6_tables ; struct fib_rules_ops *mr6_rules_ops ; atomic_t dev_addr_genid ; atomic_t fib6_sernum ; }; struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl ; struct netns_frags frags ; }; struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr ; }; struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl ; struct netns_frags frags ; }; struct sctp_mib; struct netns_sctp { struct sctp_mib *sctp_statistics ; struct proc_dir_entry *proc_net_sctp ; struct ctl_table_header *sysctl_header ; struct sock *ctl_sock ; struct list_head local_addr_list ; struct list_head addr_waitq ; struct timer_list addr_wq_timer ; struct list_head auto_asconf_splist ; spinlock_t addr_wq_lock ; spinlock_t local_addr_lock ; unsigned int rto_initial ; unsigned int rto_min ; unsigned int rto_max ; int rto_alpha ; int rto_beta ; int max_burst ; int cookie_preserve_enable ; char *sctp_hmac_alg ; unsigned int valid_cookie_life ; unsigned int sack_timeout ; unsigned int hb_interval ; int max_retrans_association ; int max_retrans_path ; int max_retrans_init ; int pf_retrans ; int sndbuf_policy ; int rcvbuf_policy ; int default_auto_asconf ; int addip_enable ; int addip_noauth ; int prsctp_enable ; int auth_enable ; int scope_policy ; int rwnd_upd_shift ; unsigned long max_autoclose ; }; struct netns_dccp { struct sock *v4_ctl_sk ; struct sock *v6_ctl_sk ; }; struct nf_logger; struct netns_nf { struct proc_dir_entry *proc_netfilter ; struct nf_logger const *nf_loggers[13U] ; struct ctl_table_header *nf_log_dir_header ; }; struct ebt_table; struct netns_xt { struct list_head tables[13U] ; bool notrack_deprecated_warning ; bool clusterip_deprecated_warning ; struct ebt_table *broute_table ; struct ebt_table *frame_filter ; struct ebt_table *frame_nat ; }; struct hlist_nulls_node; struct hlist_nulls_head { struct hlist_nulls_node *first ; }; struct hlist_nulls_node { struct hlist_nulls_node *next ; struct hlist_nulls_node **pprev ; }; struct nf_proto_net { struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; struct ctl_table_header *ctl_compat_header ; struct ctl_table *ctl_compat_table ; unsigned int users ; }; struct nf_generic_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_tcp_net { struct nf_proto_net pn ; unsigned int timeouts[14U] ; unsigned int tcp_loose ; unsigned int tcp_be_liberal ; unsigned int tcp_max_retrans ; }; struct nf_udp_net { struct nf_proto_net pn ; unsigned int timeouts[2U] ; }; struct nf_icmp_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_ip_net { struct nf_generic_net generic ; struct nf_tcp_net tcp ; struct nf_udp_net udp ; struct nf_icmp_net icmp ; struct nf_icmp_net icmpv6 ; struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; }; struct ct_pcpu { spinlock_t lock ; struct hlist_nulls_head unconfirmed ; struct hlist_nulls_head dying ; struct hlist_nulls_head tmpl ; }; struct ip_conntrack_stat; struct nf_ct_event_notifier; struct nf_exp_event_notifier; struct netns_ct { atomic_t count ; unsigned int expect_count ; struct delayed_work ecache_dwork ; bool ecache_dwork_pending ; struct ctl_table_header *sysctl_header ; struct ctl_table_header *acct_sysctl_header ; struct ctl_table_header *tstamp_sysctl_header ; struct ctl_table_header *event_sysctl_header ; struct ctl_table_header *helper_sysctl_header ; char *slabname ; unsigned int sysctl_log_invalid ; int sysctl_events ; int sysctl_acct ; int sysctl_auto_assign_helper ; bool auto_assign_helper_warned ; int sysctl_tstamp ; int sysctl_checksum ; unsigned int htable_size ; seqcount_t generation ; struct kmem_cache *nf_conntrack_cachep ; struct hlist_nulls_head *hash ; struct hlist_head *expect_hash ; struct ct_pcpu *pcpu_lists ; struct ip_conntrack_stat *stat ; struct nf_ct_event_notifier *nf_conntrack_event_cb ; struct nf_exp_event_notifier *nf_expect_event_cb ; struct nf_ip_net nf_ct_proto ; unsigned int labels_used ; u8 label_words ; struct hlist_head *nat_bysource ; unsigned int nat_htable_size ; }; struct nft_af_info; struct netns_nftables { struct list_head af_info ; struct list_head commit_list ; struct nft_af_info *ipv4 ; struct nft_af_info *ipv6 ; struct nft_af_info *inet ; struct nft_af_info *arp ; struct nft_af_info *bridge ; struct nft_af_info *netdev ; unsigned int base_seq ; u8 gencursor ; }; struct tasklet_struct { struct tasklet_struct *next ; unsigned long state ; atomic_t count ; void (*func)(unsigned long ) ; unsigned long data ; }; struct flow_cache_percpu { struct hlist_head *hash_table ; int hash_count ; u32 hash_rnd ; int hash_rnd_recalc ; struct tasklet_struct flush_tasklet ; }; struct flow_cache { u32 hash_shift ; struct flow_cache_percpu *percpu ; struct notifier_block hotcpu_notifier ; int low_watermark ; int high_watermark ; struct timer_list rnd_timer ; }; struct xfrm_policy_hash { struct hlist_head *table ; unsigned int hmask ; u8 dbits4 ; u8 sbits4 ; u8 dbits6 ; u8 sbits6 ; }; struct xfrm_policy_hthresh { struct work_struct work ; seqlock_t lock ; u8 lbits4 ; u8 rbits4 ; u8 lbits6 ; u8 rbits6 ; }; struct netns_xfrm { struct list_head state_all ; struct hlist_head *state_bydst ; struct hlist_head *state_bysrc ; struct hlist_head *state_byspi ; unsigned int state_hmask ; unsigned int state_num ; struct work_struct state_hash_work ; struct hlist_head state_gc_list ; struct work_struct state_gc_work ; struct list_head policy_all ; struct hlist_head *policy_byidx ; unsigned int policy_idx_hmask ; struct hlist_head policy_inexact[3U] ; struct xfrm_policy_hash policy_bydst[3U] ; unsigned int policy_count[6U] ; struct work_struct policy_hash_work ; struct xfrm_policy_hthresh policy_hthresh ; struct sock *nlsk ; struct sock *nlsk_stash ; u32 sysctl_aevent_etime ; u32 sysctl_aevent_rseqth ; int sysctl_larval_drop ; u32 sysctl_acq_expires ; struct ctl_table_header *sysctl_hdr ; struct dst_ops xfrm4_dst_ops ; struct dst_ops xfrm6_dst_ops ; spinlock_t xfrm_state_lock ; rwlock_t xfrm_policy_lock ; struct mutex xfrm_cfg_mutex ; struct flow_cache flow_cache_global ; atomic_t flow_cache_genid ; struct list_head flow_cache_gc_list ; spinlock_t flow_cache_gc_lock ; struct work_struct flow_cache_gc_work ; struct work_struct flow_cache_flush_work ; struct mutex flow_flush_sem ; }; struct mpls_route; struct netns_mpls { size_t platform_labels ; struct mpls_route **platform_label ; struct ctl_table_header *ctl ; }; struct proc_ns_operations; struct ns_common { atomic_long_t stashed ; struct proc_ns_operations const *ops ; unsigned int inum ; }; struct net_generic; struct netns_ipvs; struct net { atomic_t passive ; atomic_t count ; spinlock_t rules_mod_lock ; atomic64_t cookie_gen ; struct list_head list ; struct list_head cleanup_list ; struct list_head exit_list ; struct user_namespace *user_ns ; spinlock_t nsid_lock ; struct idr netns_ids ; struct ns_common ns ; struct proc_dir_entry *proc_net ; struct proc_dir_entry *proc_net_stat ; struct ctl_table_set sysctls ; struct sock *rtnl ; struct sock *genl_sock ; struct list_head dev_base_head ; struct hlist_head *dev_name_head ; struct hlist_head *dev_index_head ; unsigned int dev_base_seq ; int ifindex ; unsigned int dev_unreg_count ; struct list_head rules_ops ; struct net_device *loopback_dev ; struct netns_core core ; struct netns_mib mib ; struct netns_packet packet ; struct netns_unix unx ; struct netns_ipv4 ipv4 ; struct netns_ipv6 ipv6 ; struct netns_ieee802154_lowpan ieee802154_lowpan ; struct netns_sctp sctp ; struct netns_dccp dccp ; struct netns_nf nf ; struct netns_xt xt ; struct netns_ct ct ; struct netns_nftables nft ; struct netns_nf_frag nf_frag ; struct sock *nfnl ; struct sock *nfnl_stash ; struct sk_buff_head wext_nlevents ; struct net_generic *gen ; struct netns_xfrm xfrm ; struct netns_ipvs *ipvs ; struct netns_mpls mpls ; struct sock *diag_nlsk ; atomic_t fnhe_genid ; }; struct __anonstruct_possible_net_t_306 { struct net *net ; }; typedef struct __anonstruct_possible_net_t_306 possible_net_t; enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_PDATA = 3 } ; struct fwnode_handle { enum fwnode_type type ; struct fwnode_handle *secondary ; }; typedef u32 phandle; struct property { char *name ; int length ; void *value ; struct property *next ; unsigned long _flags ; unsigned int unique_id ; struct bin_attribute attr ; }; struct device_node { char const *name ; char const *type ; phandle phandle ; char const *full_name ; struct fwnode_handle fwnode ; struct property *properties ; struct property *deadprops ; struct device_node *parent ; struct device_node *child ; struct device_node *sibling ; struct kobject kobj ; unsigned long _flags ; void *data ; }; struct mii_ioctl_data { __u16 phy_id ; __u16 reg_num ; __u16 val_in ; __u16 val_out ; }; enum ldv_28747 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_MAX = 16 } ; typedef enum ldv_28747 phy_interface_t; enum ldv_28801 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ; struct phy_device; struct mii_bus { char const *name ; char id[17U] ; void *priv ; int (*read)(struct mii_bus * , int , int ) ; int (*write)(struct mii_bus * , int , int , u16 ) ; int (*reset)(struct mii_bus * ) ; struct mutex mdio_lock ; struct device *parent ; enum ldv_28801 state ; struct device dev ; struct phy_device *phy_map[32U] ; u32 phy_mask ; u32 phy_ignore_ta_mask ; int *irq ; }; enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ; struct phy_c45_device_ids { u32 devices_in_package ; u32 device_ids[8U] ; }; struct phy_driver; struct phy_device { struct phy_driver *drv ; struct mii_bus *bus ; struct device dev ; u32 phy_id ; struct phy_c45_device_ids c45_ids ; bool is_c45 ; bool is_internal ; bool has_fixups ; bool suspended ; enum phy_state state ; u32 dev_flags ; phy_interface_t interface ; int addr ; int speed ; int duplex ; int pause ; int asym_pause ; int link ; u32 interrupts ; u32 supported ; u32 advertising ; u32 lp_advertising ; int autoneg ; int link_timeout ; int irq ; void *priv ; struct work_struct phy_queue ; struct delayed_work state_queue ; atomic_t irq_disable ; struct mutex lock ; struct net_device *attached_dev ; void (*adjust_link)(struct net_device * ) ; }; struct phy_driver { u32 phy_id ; char *name ; unsigned int phy_id_mask ; u32 features ; u32 flags ; void const *driver_data ; int (*soft_reset)(struct phy_device * ) ; int (*config_init)(struct phy_device * ) ; int (*probe)(struct phy_device * ) ; int (*suspend)(struct phy_device * ) ; int (*resume)(struct phy_device * ) ; int (*config_aneg)(struct phy_device * ) ; int (*aneg_done)(struct phy_device * ) ; int (*read_status)(struct phy_device * ) ; int (*ack_interrupt)(struct phy_device * ) ; int (*config_intr)(struct phy_device * ) ; int (*did_interrupt)(struct phy_device * ) ; void (*remove)(struct phy_device * ) ; int (*match_phy_device)(struct phy_device * ) ; int (*ts_info)(struct phy_device * , struct ethtool_ts_info * ) ; int (*hwtstamp)(struct phy_device * , struct ifreq * ) ; bool (*rxtstamp)(struct phy_device * , struct sk_buff * , int ) ; void (*txtstamp)(struct phy_device * , struct sk_buff * , int ) ; int (*set_wol)(struct phy_device * , struct ethtool_wolinfo * ) ; void (*get_wol)(struct phy_device * , struct ethtool_wolinfo * ) ; void (*link_change_notify)(struct phy_device * ) ; int (*read_mmd_indirect)(struct phy_device * , int , int , int ) ; void (*write_mmd_indirect)(struct phy_device * , int , int , int , u32 ) ; int (*module_info)(struct phy_device * , struct ethtool_modinfo * ) ; int (*module_eeprom)(struct phy_device * , struct ethtool_eeprom * , u8 * ) ; struct device_driver driver ; }; struct fixed_phy_status { int link ; int speed ; int duplex ; int pause ; int asym_pause ; }; enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4 } ; struct dsa_chip_data { struct device *host_dev ; int sw_addr ; int eeprom_len ; struct device_node *of_node ; char *port_names[12U] ; struct device_node *port_dn[12U] ; s8 *rtable ; }; struct dsa_platform_data { struct device *netdev ; struct net_device *of_netdev ; int nr_chips ; struct dsa_chip_data *chip ; }; struct packet_type; struct dsa_switch; struct dsa_switch_tree { struct dsa_platform_data *pd ; struct net_device *master_netdev ; int (*rcv)(struct sk_buff * , struct net_device * , struct packet_type * , struct net_device * ) ; enum dsa_tag_protocol tag_protocol ; s8 cpu_switch ; s8 cpu_port ; int link_poll_needed ; struct work_struct link_poll_work ; struct timer_list link_poll_timer ; struct dsa_switch *ds[4U] ; }; struct dsa_switch_driver; struct dsa_switch { struct dsa_switch_tree *dst ; int index ; enum dsa_tag_protocol tag_protocol ; struct dsa_chip_data *pd ; struct dsa_switch_driver *drv ; struct device *master_dev ; char hwmon_name[24U] ; struct device *hwmon_dev ; u32 dsa_port_mask ; u32 phys_port_mask ; u32 phys_mii_mask ; struct mii_bus *slave_mii_bus ; struct net_device *ports[12U] ; }; struct dsa_switch_driver { struct list_head list ; enum dsa_tag_protocol tag_protocol ; int priv_size ; char *(*probe)(struct device * , int ) ; int (*setup)(struct dsa_switch * ) ; int (*set_addr)(struct dsa_switch * , u8 * ) ; u32 (*get_phy_flags)(struct dsa_switch * , int ) ; int (*phy_read)(struct dsa_switch * , int , int ) ; int (*phy_write)(struct dsa_switch * , int , int , u16 ) ; void (*poll_link)(struct dsa_switch * ) ; void (*adjust_link)(struct dsa_switch * , int , struct phy_device * ) ; void (*fixed_link_update)(struct dsa_switch * , int , struct fixed_phy_status * ) ; void (*get_strings)(struct dsa_switch * , int , uint8_t * ) ; void (*get_ethtool_stats)(struct dsa_switch * , int , uint64_t * ) ; int (*get_sset_count)(struct dsa_switch * ) ; void (*get_wol)(struct dsa_switch * , int , struct ethtool_wolinfo * ) ; int (*set_wol)(struct dsa_switch * , int , struct ethtool_wolinfo * ) ; int (*suspend)(struct dsa_switch * ) ; int (*resume)(struct dsa_switch * ) ; int (*port_enable)(struct dsa_switch * , int , struct phy_device * ) ; void (*port_disable)(struct dsa_switch * , int , struct phy_device * ) ; int (*set_eee)(struct dsa_switch * , int , struct phy_device * , struct ethtool_eee * ) ; int (*get_eee)(struct dsa_switch * , int , struct ethtool_eee * ) ; int (*get_temp)(struct dsa_switch * , int * ) ; int (*get_temp_limit)(struct dsa_switch * , int * ) ; int (*set_temp_limit)(struct dsa_switch * , int ) ; int (*get_temp_alarm)(struct dsa_switch * , bool * ) ; int (*get_eeprom_len)(struct dsa_switch * ) ; int (*get_eeprom)(struct dsa_switch * , struct ethtool_eeprom * , u8 * ) ; int (*set_eeprom)(struct dsa_switch * , struct ethtool_eeprom * , u8 * ) ; int (*get_regs_len)(struct dsa_switch * , int ) ; void (*get_regs)(struct dsa_switch * , int , struct ethtool_regs * , void * ) ; int (*port_join_bridge)(struct dsa_switch * , int , u32 ) ; int (*port_leave_bridge)(struct dsa_switch * , int , u32 ) ; int (*port_stp_update)(struct dsa_switch * , int , u8 ) ; int (*fdb_add)(struct dsa_switch * , int , unsigned char const * , u16 ) ; int (*fdb_del)(struct dsa_switch * , int , unsigned char const * , u16 ) ; int (*fdb_getnext)(struct dsa_switch * , int , unsigned char * , bool * ) ; }; struct ieee_ets { __u8 willing ; __u8 ets_cap ; __u8 cbs ; __u8 tc_tx_bw[8U] ; __u8 tc_rx_bw[8U] ; __u8 tc_tsa[8U] ; __u8 prio_tc[8U] ; __u8 tc_reco_bw[8U] ; __u8 tc_reco_tsa[8U] ; __u8 reco_prio_tc[8U] ; }; struct ieee_maxrate { __u64 tc_maxrate[8U] ; }; struct ieee_qcn { __u8 rpg_enable[8U] ; __u32 rppp_max_rps[8U] ; __u32 rpg_time_reset[8U] ; __u32 rpg_byte_reset[8U] ; __u32 rpg_threshold[8U] ; __u32 rpg_max_rate[8U] ; __u32 rpg_ai_rate[8U] ; __u32 rpg_hai_rate[8U] ; __u32 rpg_gd[8U] ; __u32 rpg_min_dec_fac[8U] ; __u32 rpg_min_rate[8U] ; __u32 cndd_state_machine[8U] ; }; struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U] ; __u32 rppp_created_rps[8U] ; }; struct ieee_pfc { __u8 pfc_cap ; __u8 pfc_en ; __u8 mbc ; __u16 delay ; __u64 requests[8U] ; __u64 indications[8U] ; }; struct cee_pg { __u8 willing ; __u8 error ; __u8 pg_en ; __u8 tcs_supported ; __u8 pg_bw[8U] ; __u8 prio_pg[8U] ; }; struct cee_pfc { __u8 willing ; __u8 error ; __u8 pfc_en ; __u8 tcs_supported ; }; struct dcb_app { __u8 selector ; __u8 priority ; __u16 protocol ; }; struct dcb_peer_app_info { __u8 willing ; __u8 error ; }; struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_setets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_getmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_setmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_getqcn)(struct net_device * , struct ieee_qcn * ) ; int (*ieee_setqcn)(struct net_device * , struct ieee_qcn * ) ; int (*ieee_getqcnstats)(struct net_device * , struct ieee_qcn_stats * ) ; int (*ieee_getpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_setpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_getapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_setapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_delapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_peer_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_peer_getpfc)(struct net_device * , struct ieee_pfc * ) ; u8 (*getstate)(struct net_device * ) ; u8 (*setstate)(struct net_device * , u8 ) ; void (*getpermhwaddr)(struct net_device * , u8 * ) ; void (*setpgtccfgtx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgtx)(struct net_device * , int , u8 ) ; void (*setpgtccfgrx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgrx)(struct net_device * , int , u8 ) ; void (*getpgtccfgtx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgtx)(struct net_device * , int , u8 * ) ; void (*getpgtccfgrx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgrx)(struct net_device * , int , u8 * ) ; void (*setpfccfg)(struct net_device * , int , u8 ) ; void (*getpfccfg)(struct net_device * , int , u8 * ) ; u8 (*setall)(struct net_device * ) ; u8 (*getcap)(struct net_device * , int , u8 * ) ; int (*getnumtcs)(struct net_device * , int , u8 * ) ; int (*setnumtcs)(struct net_device * , int , u8 ) ; u8 (*getpfcstate)(struct net_device * ) ; void (*setpfcstate)(struct net_device * , u8 ) ; void (*getbcncfg)(struct net_device * , int , u32 * ) ; void (*setbcncfg)(struct net_device * , int , u32 ) ; void (*getbcnrp)(struct net_device * , int , u8 * ) ; void (*setbcnrp)(struct net_device * , int , u8 ) ; int (*setapp)(struct net_device * , u8 , u16 , u8 ) ; int (*getapp)(struct net_device * , u8 , u16 ) ; u8 (*getfeatcfg)(struct net_device * , int , u8 * ) ; u8 (*setfeatcfg)(struct net_device * , int , u8 ) ; u8 (*getdcbx)(struct net_device * ) ; u8 (*setdcbx)(struct net_device * , u8 ) ; int (*peer_getappinfo)(struct net_device * , struct dcb_peer_app_info * , u16 * ) ; int (*peer_getapptable)(struct net_device * , struct dcb_app * ) ; int (*cee_peer_getpg)(struct net_device * , struct cee_pg * ) ; int (*cee_peer_getpfc)(struct net_device * , struct cee_pfc * ) ; }; struct taskstats { __u16 version ; __u32 ac_exitcode ; __u8 ac_flag ; __u8 ac_nice ; __u64 cpu_count ; __u64 cpu_delay_total ; __u64 blkio_count ; __u64 blkio_delay_total ; __u64 swapin_count ; __u64 swapin_delay_total ; __u64 cpu_run_real_total ; __u64 cpu_run_virtual_total ; char ac_comm[32U] ; __u8 ac_sched ; __u8 ac_pad[3U] ; __u32 ac_uid ; __u32 ac_gid ; __u32 ac_pid ; __u32 ac_ppid ; __u32 ac_btime ; __u64 ac_etime ; __u64 ac_utime ; __u64 ac_stime ; __u64 ac_minflt ; __u64 ac_majflt ; __u64 coremem ; __u64 virtmem ; __u64 hiwater_rss ; __u64 hiwater_vm ; __u64 read_char ; __u64 write_char ; __u64 read_syscalls ; __u64 write_syscalls ; __u64 read_bytes ; __u64 write_bytes ; __u64 cancelled_write_bytes ; __u64 nvcsw ; __u64 nivcsw ; __u64 ac_utimescaled ; __u64 ac_stimescaled ; __u64 cpu_scaled_run_real_total ; __u64 freepages_count ; __u64 freepages_delay_total ; }; struct netprio_map { struct callback_head rcu ; u32 priomap_len ; u32 priomap[] ; }; struct xfrm_policy; struct xfrm_state; struct request_sock; struct mnt_namespace; struct ipc_namespace; struct nsproxy { atomic_t count ; struct uts_namespace *uts_ns ; struct ipc_namespace *ipc_ns ; struct mnt_namespace *mnt_ns ; struct pid_namespace *pid_ns_for_children ; struct net *net_ns ; }; struct nlmsghdr { __u32 nlmsg_len ; __u16 nlmsg_type ; __u16 nlmsg_flags ; __u32 nlmsg_seq ; __u32 nlmsg_pid ; }; struct nlattr { __u16 nla_len ; __u16 nla_type ; }; struct netlink_callback { struct sk_buff *skb ; struct nlmsghdr const *nlh ; int (*dump)(struct sk_buff * , struct netlink_callback * ) ; int (*done)(struct netlink_callback * ) ; void *data ; struct module *module ; u16 family ; u16 min_dump_alloc ; unsigned int prev_seq ; unsigned int seq ; long args[6U] ; }; struct ndmsg { __u8 ndm_family ; __u8 ndm_pad1 ; __u16 ndm_pad2 ; __s32 ndm_ifindex ; __u16 ndm_state ; __u8 ndm_flags ; __u8 ndm_type ; }; struct rtnl_link_stats64 { __u64 rx_packets ; __u64 tx_packets ; __u64 rx_bytes ; __u64 tx_bytes ; __u64 rx_errors ; __u64 tx_errors ; __u64 rx_dropped ; __u64 tx_dropped ; __u64 multicast ; __u64 collisions ; __u64 rx_length_errors ; __u64 rx_over_errors ; __u64 rx_crc_errors ; __u64 rx_frame_errors ; __u64 rx_fifo_errors ; __u64 rx_missed_errors ; __u64 tx_aborted_errors ; __u64 tx_carrier_errors ; __u64 tx_fifo_errors ; __u64 tx_heartbeat_errors ; __u64 tx_window_errors ; __u64 rx_compressed ; __u64 tx_compressed ; }; enum macvlan_mode { MACVLAN_MODE_PRIVATE = 1, MACVLAN_MODE_VEPA = 2, MACVLAN_MODE_BRIDGE = 4, MACVLAN_MODE_PASSTHRU = 8, MACVLAN_MODE_SOURCE = 16 } ; struct ifla_vf_stats { __u64 rx_packets ; __u64 tx_packets ; __u64 rx_bytes ; __u64 tx_bytes ; __u64 broadcast ; __u64 multicast ; }; struct ifla_vf_info { __u32 vf ; __u8 mac[32U] ; __u32 vlan ; __u32 qos ; __u32 spoofchk ; __u32 linkstate ; __u32 min_tx_rate ; __u32 max_tx_rate ; __u32 rss_query_en ; }; struct netpoll_info; struct wireless_dev; struct wpan_dev; struct mpls_dev; enum netdev_tx { __NETDEV_TX_MIN = (-0x7FFFFFFF-1), NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, NETDEV_TX_LOCKED = 32 } ; typedef enum netdev_tx netdev_tx_t; struct net_device_stats { unsigned long rx_packets ; unsigned long tx_packets ; unsigned long rx_bytes ; unsigned long tx_bytes ; unsigned long rx_errors ; unsigned long tx_errors ; unsigned long rx_dropped ; unsigned long tx_dropped ; unsigned long multicast ; unsigned long collisions ; unsigned long rx_length_errors ; unsigned long rx_over_errors ; unsigned long rx_crc_errors ; unsigned long rx_frame_errors ; unsigned long rx_fifo_errors ; unsigned long rx_missed_errors ; unsigned long tx_aborted_errors ; unsigned long tx_carrier_errors ; unsigned long tx_fifo_errors ; unsigned long tx_heartbeat_errors ; unsigned long tx_window_errors ; unsigned long rx_compressed ; unsigned long tx_compressed ; }; struct neigh_parms; struct netdev_hw_addr { struct list_head list ; unsigned char addr[32U] ; unsigned char type ; bool global_use ; int sync_cnt ; int refcount ; int synced ; struct callback_head callback_head ; }; struct netdev_hw_addr_list { struct list_head list ; int count ; }; struct hh_cache { u16 hh_len ; u16 __pad ; seqlock_t hh_lock ; unsigned long hh_data[16U] ; }; struct header_ops { int (*create)(struct sk_buff * , struct net_device * , unsigned short , void const * , void const * , unsigned int ) ; int (*parse)(struct sk_buff const * , unsigned char * ) ; int (*cache)(struct neighbour const * , struct hh_cache * , __be16 ) ; void (*cache_update)(struct hh_cache * , struct net_device const * , unsigned char const * ) ; }; struct napi_struct { struct list_head poll_list ; unsigned long state ; int weight ; unsigned int gro_count ; int (*poll)(struct napi_struct * , int ) ; spinlock_t poll_lock ; int poll_owner ; struct net_device *dev ; struct sk_buff *gro_list ; struct sk_buff *skb ; struct hrtimer timer ; struct list_head dev_list ; struct hlist_node napi_hash_node ; unsigned int napi_id ; }; enum gro_result { GRO_MERGED = 0, GRO_MERGED_FREE = 1, GRO_HELD = 2, GRO_NORMAL = 3, GRO_DROP = 4 } ; typedef enum gro_result gro_result_t; enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff ** ); struct Qdisc; struct netdev_queue { struct net_device *dev ; struct Qdisc *qdisc ; struct Qdisc *qdisc_sleeping ; struct kobject kobj ; int numa_node ; spinlock_t _xmit_lock ; int xmit_lock_owner ; unsigned long trans_start ; unsigned long trans_timeout ; unsigned long state ; struct dql dql ; unsigned long tx_maxrate ; }; struct rps_map { unsigned int len ; struct callback_head rcu ; u16 cpus[0U] ; }; struct rps_dev_flow { u16 cpu ; u16 filter ; unsigned int last_qtail ; }; struct rps_dev_flow_table { unsigned int mask ; struct callback_head rcu ; struct rps_dev_flow flows[0U] ; }; struct netdev_rx_queue { struct rps_map *rps_map ; struct rps_dev_flow_table *rps_flow_table ; struct kobject kobj ; struct net_device *dev ; }; struct xps_map { unsigned int len ; unsigned int alloc_len ; struct callback_head rcu ; u16 queues[0U] ; }; struct xps_dev_maps { struct callback_head rcu ; struct xps_map *cpu_map[0U] ; }; struct netdev_tc_txq { u16 count ; u16 offset ; }; struct netdev_fcoe_hbainfo { char manufacturer[64U] ; char serial_number[64U] ; char hardware_version[64U] ; char driver_version[64U] ; char optionrom_version[64U] ; char firmware_version[64U] ; char model[256U] ; char model_description[256U] ; }; struct netdev_phys_item_id { unsigned char id[32U] ; unsigned char id_len ; }; struct net_device_ops { int (*ndo_init)(struct net_device * ) ; void (*ndo_uninit)(struct net_device * ) ; int (*ndo_open)(struct net_device * ) ; int (*ndo_stop)(struct net_device * ) ; netdev_tx_t (*ndo_start_xmit)(struct sk_buff * , struct net_device * ) ; u16 (*ndo_select_queue)(struct net_device * , struct sk_buff * , void * , u16 (*)(struct net_device * , struct sk_buff * ) ) ; void (*ndo_change_rx_flags)(struct net_device * , int ) ; void (*ndo_set_rx_mode)(struct net_device * ) ; int (*ndo_set_mac_address)(struct net_device * , void * ) ; int (*ndo_validate_addr)(struct net_device * ) ; int (*ndo_do_ioctl)(struct net_device * , struct ifreq * , int ) ; int (*ndo_set_config)(struct net_device * , struct ifmap * ) ; int (*ndo_change_mtu)(struct net_device * , int ) ; int (*ndo_neigh_setup)(struct net_device * , struct neigh_parms * ) ; void (*ndo_tx_timeout)(struct net_device * ) ; struct rtnl_link_stats64 *(*ndo_get_stats64)(struct net_device * , struct rtnl_link_stats64 * ) ; struct net_device_stats *(*ndo_get_stats)(struct net_device * ) ; int (*ndo_vlan_rx_add_vid)(struct net_device * , __be16 , u16 ) ; int (*ndo_vlan_rx_kill_vid)(struct net_device * , __be16 , u16 ) ; void (*ndo_poll_controller)(struct net_device * ) ; int (*ndo_netpoll_setup)(struct net_device * , struct netpoll_info * ) ; void (*ndo_netpoll_cleanup)(struct net_device * ) ; int (*ndo_busy_poll)(struct napi_struct * ) ; int (*ndo_set_vf_mac)(struct net_device * , int , u8 * ) ; int (*ndo_set_vf_vlan)(struct net_device * , int , u16 , u8 ) ; int (*ndo_set_vf_rate)(struct net_device * , int , int , int ) ; int (*ndo_set_vf_spoofchk)(struct net_device * , int , bool ) ; int (*ndo_get_vf_config)(struct net_device * , int , struct ifla_vf_info * ) ; int (*ndo_set_vf_link_state)(struct net_device * , int , int ) ; int (*ndo_get_vf_stats)(struct net_device * , int , struct ifla_vf_stats * ) ; int (*ndo_set_vf_port)(struct net_device * , int , struct nlattr ** ) ; int (*ndo_get_vf_port)(struct net_device * , int , struct sk_buff * ) ; int (*ndo_set_vf_rss_query_en)(struct net_device * , int , bool ) ; int (*ndo_setup_tc)(struct net_device * , u8 ) ; int (*ndo_fcoe_enable)(struct net_device * ) ; int (*ndo_fcoe_disable)(struct net_device * ) ; int (*ndo_fcoe_ddp_setup)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_ddp_done)(struct net_device * , u16 ) ; int (*ndo_fcoe_ddp_target)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_get_hbainfo)(struct net_device * , struct netdev_fcoe_hbainfo * ) ; int (*ndo_fcoe_get_wwn)(struct net_device * , u64 * , int ) ; int (*ndo_rx_flow_steer)(struct net_device * , struct sk_buff const * , u16 , u32 ) ; int (*ndo_add_slave)(struct net_device * , struct net_device * ) ; int (*ndo_del_slave)(struct net_device * , struct net_device * ) ; netdev_features_t (*ndo_fix_features)(struct net_device * , netdev_features_t ) ; int (*ndo_set_features)(struct net_device * , netdev_features_t ) ; int (*ndo_neigh_construct)(struct neighbour * ) ; void (*ndo_neigh_destroy)(struct neighbour * ) ; int (*ndo_fdb_add)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 , u16 ) ; int (*ndo_fdb_del)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 ) ; int (*ndo_fdb_dump)(struct sk_buff * , struct netlink_callback * , struct net_device * , struct net_device * , int ) ; int (*ndo_bridge_setlink)(struct net_device * , struct nlmsghdr * , u16 ) ; int (*ndo_bridge_getlink)(struct sk_buff * , u32 , u32 , struct net_device * , u32 , int ) ; int (*ndo_bridge_dellink)(struct net_device * , struct nlmsghdr * , u16 ) ; int (*ndo_change_carrier)(struct net_device * , bool ) ; int (*ndo_get_phys_port_id)(struct net_device * , struct netdev_phys_item_id * ) ; int (*ndo_get_phys_port_name)(struct net_device * , char * , size_t ) ; void (*ndo_add_vxlan_port)(struct net_device * , sa_family_t , __be16 ) ; void (*ndo_del_vxlan_port)(struct net_device * , sa_family_t , __be16 ) ; void *(*ndo_dfwd_add_station)(struct net_device * , struct net_device * ) ; void (*ndo_dfwd_del_station)(struct net_device * , void * ) ; netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff * , struct net_device * , void * ) ; int (*ndo_get_lock_subclass)(struct net_device * ) ; netdev_features_t (*ndo_features_check)(struct sk_buff * , struct net_device * , netdev_features_t ) ; int (*ndo_set_tx_maxrate)(struct net_device * , int , u32 ) ; int (*ndo_get_iflink)(struct net_device const * ) ; }; struct __anonstruct_adj_list_316 { struct list_head upper ; struct list_head lower ; }; struct __anonstruct_all_adj_list_317 { struct list_head upper ; struct list_head lower ; }; struct iw_handler_def; struct iw_public_data; struct switchdev_ops; struct vlan_info; struct tipc_bearer; struct in_device; struct dn_dev; struct inet6_dev; struct tcf_proto; struct cpu_rmap; struct pcpu_lstats; struct pcpu_sw_netstats; struct pcpu_dstats; struct pcpu_vstats; union __anonunion____missing_field_name_318 { void *ml_priv ; struct pcpu_lstats *lstats ; struct pcpu_sw_netstats *tstats ; struct pcpu_dstats *dstats ; struct pcpu_vstats *vstats ; }; struct garp_port; struct mrp_port; struct rtnl_link_ops; struct net_device { char name[16U] ; struct hlist_node name_hlist ; char *ifalias ; unsigned long mem_end ; unsigned long mem_start ; unsigned long base_addr ; int irq ; atomic_t carrier_changes ; unsigned long state ; struct list_head dev_list ; struct list_head napi_list ; struct list_head unreg_list ; struct list_head close_list ; struct list_head ptype_all ; struct list_head ptype_specific ; struct __anonstruct_adj_list_316 adj_list ; struct __anonstruct_all_adj_list_317 all_adj_list ; netdev_features_t features ; netdev_features_t hw_features ; netdev_features_t wanted_features ; netdev_features_t vlan_features ; netdev_features_t hw_enc_features ; netdev_features_t mpls_features ; int ifindex ; int group ; struct net_device_stats stats ; atomic_long_t rx_dropped ; atomic_long_t tx_dropped ; struct iw_handler_def const *wireless_handlers ; struct iw_public_data *wireless_data ; struct net_device_ops const *netdev_ops ; struct ethtool_ops const *ethtool_ops ; struct switchdev_ops const *switchdev_ops ; struct header_ops const *header_ops ; unsigned int flags ; unsigned int priv_flags ; unsigned short gflags ; unsigned short padded ; unsigned char operstate ; unsigned char link_mode ; unsigned char if_port ; unsigned char dma ; unsigned int mtu ; unsigned short type ; unsigned short hard_header_len ; unsigned short needed_headroom ; unsigned short needed_tailroom ; unsigned char perm_addr[32U] ; unsigned char addr_assign_type ; unsigned char addr_len ; unsigned short neigh_priv_len ; unsigned short dev_id ; unsigned short dev_port ; spinlock_t addr_list_lock ; unsigned char name_assign_type ; bool uc_promisc ; struct netdev_hw_addr_list uc ; struct netdev_hw_addr_list mc ; struct netdev_hw_addr_list dev_addrs ; struct kset *queues_kset ; unsigned int promiscuity ; unsigned int allmulti ; struct vlan_info *vlan_info ; struct dsa_switch_tree *dsa_ptr ; struct tipc_bearer *tipc_ptr ; void *atalk_ptr ; struct in_device *ip_ptr ; struct dn_dev *dn_ptr ; struct inet6_dev *ip6_ptr ; void *ax25_ptr ; struct wireless_dev *ieee80211_ptr ; struct wpan_dev *ieee802154_ptr ; struct mpls_dev *mpls_ptr ; unsigned long last_rx ; unsigned char *dev_addr ; struct netdev_rx_queue *_rx ; unsigned int num_rx_queues ; unsigned int real_num_rx_queues ; unsigned long gro_flush_timeout ; rx_handler_func_t *rx_handler ; void *rx_handler_data ; struct tcf_proto *ingress_cl_list ; struct netdev_queue *ingress_queue ; struct list_head nf_hooks_ingress ; unsigned char broadcast[32U] ; struct cpu_rmap *rx_cpu_rmap ; struct hlist_node index_hlist ; struct netdev_queue *_tx ; unsigned int num_tx_queues ; unsigned int real_num_tx_queues ; struct Qdisc *qdisc ; unsigned long tx_queue_len ; spinlock_t tx_global_lock ; int watchdog_timeo ; struct xps_dev_maps *xps_maps ; unsigned long trans_start ; struct timer_list watchdog_timer ; int *pcpu_refcnt ; struct list_head todo_list ; struct list_head link_watch_list ; unsigned char reg_state ; bool dismantle ; unsigned short rtnl_link_state ; void (*destructor)(struct net_device * ) ; struct netpoll_info *npinfo ; possible_net_t nd_net ; union __anonunion____missing_field_name_318 __annonCompField95 ; struct garp_port *garp_port ; struct mrp_port *mrp_port ; struct device dev ; struct attribute_group const *sysfs_groups[4U] ; struct attribute_group const *sysfs_rx_queue_group ; struct rtnl_link_ops const *rtnl_link_ops ; unsigned int gso_max_size ; u16 gso_max_segs ; u16 gso_min_segs ; struct dcbnl_rtnl_ops const *dcbnl_ops ; u8 num_tc ; struct netdev_tc_txq tc_to_txq[16U] ; u8 prio_tc_map[16U] ; unsigned int fcoe_ddp_xid ; struct netprio_map *priomap ; struct phy_device *phydev ; struct lock_class_key *qdisc_tx_busylock ; }; struct packet_type { __be16 type ; struct net_device *dev ; int (*func)(struct sk_buff * , struct net_device * , struct packet_type * , struct net_device * ) ; bool (*id_match)(struct packet_type * , struct sock * ) ; void *af_packet_priv ; struct list_head list ; }; struct pcpu_sw_netstats { u64 rx_packets ; u64 rx_bytes ; u64 tx_packets ; u64 tx_bytes ; struct u64_stats_sync syncp ; }; enum skb_free_reason { SKB_REASON_CONSUMED = 0, SKB_REASON_DROPPED = 1 } ; struct iphdr { unsigned char ihl : 4 ; unsigned char version : 4 ; __u8 tos ; __be16 tot_len ; __be16 id ; __be16 frag_off ; __u8 ttl ; __u8 protocol ; __sum16 check ; __be32 saddr ; __be32 daddr ; }; struct page_counter { atomic_long_t count ; unsigned long limit ; struct page_counter *parent ; unsigned long watermark ; unsigned long failcnt ; }; struct sock_filter { __u16 code ; __u8 jt ; __u8 jf ; __u32 k ; }; struct bpf_insn { __u8 code ; unsigned char dst_reg : 4 ; unsigned char src_reg : 4 ; __s16 off ; __s32 imm ; }; enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC = 0, BPF_PROG_TYPE_SOCKET_FILTER = 1, BPF_PROG_TYPE_KPROBE = 2, BPF_PROG_TYPE_SCHED_CLS = 3, BPF_PROG_TYPE_SCHED_ACT = 4 } ; struct bpf_prog_aux; struct sock_fprog_kern { u16 len ; struct sock_filter *filter ; }; union __anonunion____missing_field_name_329 { struct sock_filter insns[0U] ; struct bpf_insn insnsi[0U] ; }; struct bpf_prog { u16 pages ; bool jited ; bool gpl_compatible ; u32 len ; enum bpf_prog_type type ; struct bpf_prog_aux *aux ; struct sock_fprog_kern *orig_prog ; unsigned int (*bpf_func)(struct sk_buff const * , struct bpf_insn const * ) ; union __anonunion____missing_field_name_329 __annonCompField100 ; }; struct sk_filter { atomic_t refcnt ; struct callback_head rcu ; struct bpf_prog *prog ; }; struct pollfd { int fd ; short events ; short revents ; }; struct poll_table_struct { void (*_qproc)(struct file * , wait_queue_head_t * , struct poll_table_struct * ) ; unsigned long _key ; }; struct nla_policy { u16 type ; u16 len ; }; struct rtnl_link_ops { struct list_head list ; char const *kind ; size_t priv_size ; void (*setup)(struct net_device * ) ; int maxtype ; struct nla_policy const *policy ; int (*validate)(struct nlattr ** , struct nlattr ** ) ; int (*newlink)(struct net * , struct net_device * , struct nlattr ** , struct nlattr ** ) ; int (*changelink)(struct net_device * , struct nlattr ** , struct nlattr ** ) ; void (*dellink)(struct net_device * , struct list_head * ) ; size_t (*get_size)(struct net_device const * ) ; int (*fill_info)(struct sk_buff * , struct net_device const * ) ; size_t (*get_xstats_size)(struct net_device const * ) ; int (*fill_xstats)(struct sk_buff * , struct net_device const * ) ; unsigned int (*get_num_tx_queues)(void) ; unsigned int (*get_num_rx_queues)(void) ; int slave_maxtype ; struct nla_policy const *slave_policy ; int (*slave_validate)(struct nlattr ** , struct nlattr ** ) ; int (*slave_changelink)(struct net_device * , struct net_device * , struct nlattr ** , struct nlattr ** ) ; size_t (*get_slave_size)(struct net_device const * , struct net_device const * ) ; int (*fill_slave_info)(struct sk_buff * , struct net_device const * , struct net_device const * ) ; struct net *(*get_link_net)(struct net_device const * ) ; }; struct neigh_table; struct neigh_parms { possible_net_t net ; struct net_device *dev ; struct list_head list ; int (*neigh_setup)(struct neighbour * ) ; void (*neigh_cleanup)(struct neighbour * ) ; struct neigh_table *tbl ; void *sysctl_table ; int dead ; atomic_t refcnt ; struct callback_head callback_head ; int reachable_time ; int data[13U] ; unsigned long data_state[1U] ; }; struct neigh_statistics { unsigned long allocs ; unsigned long destroys ; unsigned long hash_grows ; unsigned long res_failed ; unsigned long lookups ; unsigned long hits ; unsigned long rcv_probes_mcast ; unsigned long rcv_probes_ucast ; unsigned long periodic_gc_runs ; unsigned long forced_gc_runs ; unsigned long unres_discards ; }; struct neigh_ops; struct neighbour { struct neighbour *next ; struct neigh_table *tbl ; struct neigh_parms *parms ; unsigned long confirmed ; unsigned long updated ; rwlock_t lock ; atomic_t refcnt ; struct sk_buff_head arp_queue ; unsigned int arp_queue_len_bytes ; struct timer_list timer ; unsigned long used ; atomic_t probes ; __u8 flags ; __u8 nud_state ; __u8 type ; __u8 dead ; seqlock_t ha_lock ; unsigned char ha[32U] ; struct hh_cache hh ; int (*output)(struct neighbour * , struct sk_buff * ) ; struct neigh_ops const *ops ; struct callback_head rcu ; struct net_device *dev ; u8 primary_key[0U] ; }; struct neigh_ops { int family ; void (*solicit)(struct neighbour * , struct sk_buff * ) ; void (*error_report)(struct neighbour * , struct sk_buff * ) ; int (*output)(struct neighbour * , struct sk_buff * ) ; int (*connected_output)(struct neighbour * , struct sk_buff * ) ; }; struct pneigh_entry { struct pneigh_entry *next ; possible_net_t net ; struct net_device *dev ; u8 flags ; u8 key[0U] ; }; struct neigh_hash_table { struct neighbour **hash_buckets ; unsigned int hash_shift ; __u32 hash_rnd[4U] ; struct callback_head rcu ; }; struct neigh_table { int family ; int entry_size ; int key_len ; __be16 protocol ; __u32 (*hash)(void const * , struct net_device const * , __u32 * ) ; bool (*key_eq)(struct neighbour const * , void const * ) ; int (*constructor)(struct neighbour * ) ; int (*pconstructor)(struct pneigh_entry * ) ; void (*pdestructor)(struct pneigh_entry * ) ; void (*proxy_redo)(struct sk_buff * ) ; char *id ; struct neigh_parms parms ; struct list_head parms_list ; int gc_interval ; int gc_thresh1 ; int gc_thresh2 ; int gc_thresh3 ; unsigned long last_flush ; struct delayed_work gc_work ; struct timer_list proxy_timer ; struct sk_buff_head proxy_queue ; atomic_t entries ; rwlock_t lock ; unsigned long last_rand ; struct neigh_statistics *stats ; struct neigh_hash_table *nht ; struct pneigh_entry **phash_buckets ; }; struct dn_route; union __anonunion____missing_field_name_340 { struct dst_entry *next ; struct rtable *rt_next ; struct rt6_info *rt6_next ; struct dn_route *dn_next ; }; struct dst_entry { struct callback_head callback_head ; struct dst_entry *child ; struct net_device *dev ; struct dst_ops *ops ; unsigned long _metrics ; unsigned long expires ; struct dst_entry *path ; struct dst_entry *from ; struct xfrm_state *xfrm ; int (*input)(struct sk_buff * ) ; int (*output)(struct sock * , struct sk_buff * ) ; unsigned short flags ; unsigned short pending_confirm ; short error ; short obsolete ; unsigned short header_len ; unsigned short trailer_len ; __u32 tclassid ; long __pad_to_align_refcnt[2U] ; atomic_t __refcnt ; int __use ; unsigned long lastuse ; union __anonunion____missing_field_name_340 __annonCompField101 ; }; struct hwtstamp_config { int flags ; int tx_type ; int rx_filter ; }; struct __anonstruct_socket_lock_t_341 { spinlock_t slock ; int owned ; wait_queue_head_t wq ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_socket_lock_t_341 socket_lock_t; struct proto; typedef __u32 __portpair; typedef __u64 __addrpair; struct __anonstruct____missing_field_name_343 { __be32 skc_daddr ; __be32 skc_rcv_saddr ; }; union __anonunion____missing_field_name_342 { __addrpair skc_addrpair ; struct __anonstruct____missing_field_name_343 __annonCompField102 ; }; union __anonunion____missing_field_name_344 { unsigned int skc_hash ; __u16 skc_u16hashes[2U] ; }; struct __anonstruct____missing_field_name_346 { __be16 skc_dport ; __u16 skc_num ; }; union __anonunion____missing_field_name_345 { __portpair skc_portpair ; struct __anonstruct____missing_field_name_346 __annonCompField105 ; }; union __anonunion____missing_field_name_347 { struct hlist_node skc_bind_node ; struct hlist_nulls_node skc_portaddr_node ; }; union __anonunion____missing_field_name_348 { struct hlist_node skc_node ; struct hlist_nulls_node skc_nulls_node ; }; struct sock_common { union __anonunion____missing_field_name_342 __annonCompField103 ; union __anonunion____missing_field_name_344 __annonCompField104 ; union __anonunion____missing_field_name_345 __annonCompField106 ; unsigned short skc_family ; unsigned char volatile skc_state ; unsigned char skc_reuse : 4 ; unsigned char skc_reuseport : 1 ; unsigned char skc_ipv6only : 1 ; unsigned char skc_net_refcnt : 1 ; int skc_bound_dev_if ; union __anonunion____missing_field_name_347 __annonCompField107 ; struct proto *skc_prot ; possible_net_t skc_net ; struct in6_addr skc_v6_daddr ; struct in6_addr skc_v6_rcv_saddr ; atomic64_t skc_cookie ; int skc_dontcopy_begin[0U] ; union __anonunion____missing_field_name_348 __annonCompField108 ; int skc_tx_queue_mapping ; atomic_t skc_refcnt ; int skc_dontcopy_end[0U] ; }; struct cg_proto; struct __anonstruct_sk_backlog_349 { atomic_t rmem_alloc ; int len ; struct sk_buff *head ; struct sk_buff *tail ; }; struct sock { struct sock_common __sk_common ; socket_lock_t sk_lock ; struct sk_buff_head sk_receive_queue ; struct __anonstruct_sk_backlog_349 sk_backlog ; int sk_forward_alloc ; __u32 sk_rxhash ; u16 sk_incoming_cpu ; __u32 sk_txhash ; unsigned int sk_napi_id ; unsigned int sk_ll_usec ; atomic_t sk_drops ; int sk_rcvbuf ; struct sk_filter *sk_filter ; struct socket_wq *sk_wq ; struct xfrm_policy *sk_policy[2U] ; unsigned long sk_flags ; struct dst_entry *sk_rx_dst ; struct dst_entry *sk_dst_cache ; spinlock_t sk_dst_lock ; atomic_t sk_wmem_alloc ; atomic_t sk_omem_alloc ; int sk_sndbuf ; struct sk_buff_head sk_write_queue ; unsigned char sk_shutdown : 2 ; unsigned char sk_no_check_tx : 1 ; unsigned char sk_no_check_rx : 1 ; unsigned char sk_userlocks : 4 ; unsigned char sk_protocol ; unsigned short sk_type ; int sk_wmem_queued ; gfp_t sk_allocation ; u32 sk_pacing_rate ; u32 sk_max_pacing_rate ; netdev_features_t sk_route_caps ; netdev_features_t sk_route_nocaps ; int sk_gso_type ; unsigned int sk_gso_max_size ; u16 sk_gso_max_segs ; int sk_rcvlowat ; unsigned long sk_lingertime ; struct sk_buff_head sk_error_queue ; struct proto *sk_prot_creator ; rwlock_t sk_callback_lock ; int sk_err ; int sk_err_soft ; u32 sk_ack_backlog ; u32 sk_max_ack_backlog ; __u32 sk_priority ; __u32 sk_cgrp_prioidx ; struct pid *sk_peer_pid ; struct cred const *sk_peer_cred ; long sk_rcvtimeo ; long sk_sndtimeo ; struct timer_list sk_timer ; ktime_t sk_stamp ; u16 sk_tsflags ; u32 sk_tskey ; struct socket *sk_socket ; void *sk_user_data ; struct page_frag sk_frag ; struct sk_buff *sk_send_head ; __s32 sk_peek_off ; int sk_write_pending ; void *sk_security ; __u32 sk_mark ; u32 sk_classid ; struct cg_proto *sk_cgrp ; void (*sk_state_change)(struct sock * ) ; void (*sk_data_ready)(struct sock * ) ; void (*sk_write_space)(struct sock * ) ; void (*sk_error_report)(struct sock * ) ; int (*sk_backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*sk_destruct)(struct sock * ) ; }; struct request_sock_ops; struct timewait_sock_ops; struct inet_hashinfo; struct raw_hashinfo; struct udp_table; union __anonunion_h_352 { struct inet_hashinfo *hashinfo ; struct udp_table *udp_table ; struct raw_hashinfo *raw_hash ; }; struct proto { void (*close)(struct sock * , long ) ; int (*connect)(struct sock * , struct sockaddr * , int ) ; int (*disconnect)(struct sock * , int ) ; struct sock *(*accept)(struct sock * , int , int * ) ; int (*ioctl)(struct sock * , int , unsigned long ) ; int (*init)(struct sock * ) ; void (*destroy)(struct sock * ) ; void (*shutdown)(struct sock * , int ) ; int (*setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_ioctl)(struct sock * , unsigned int , unsigned long ) ; int (*sendmsg)(struct sock * , struct msghdr * , size_t ) ; int (*recvmsg)(struct sock * , struct msghdr * , size_t , int , int , int * ) ; int (*sendpage)(struct sock * , struct page * , int , size_t , int ) ; int (*bind)(struct sock * , struct sockaddr * , int ) ; int (*backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*release_cb)(struct sock * ) ; void (*hash)(struct sock * ) ; void (*unhash)(struct sock * ) ; void (*rehash)(struct sock * ) ; int (*get_port)(struct sock * , unsigned short ) ; void (*clear_sk)(struct sock * , int ) ; unsigned int inuse_idx ; bool (*stream_memory_free)(struct sock const * ) ; void (*enter_memory_pressure)(struct sock * ) ; atomic_long_t *memory_allocated ; struct percpu_counter *sockets_allocated ; int *memory_pressure ; long *sysctl_mem ; int *sysctl_wmem ; int *sysctl_rmem ; int max_header ; bool no_autobind ; struct kmem_cache *slab ; unsigned int obj_size ; int slab_flags ; struct percpu_counter *orphan_count ; struct request_sock_ops *rsk_prot ; struct timewait_sock_ops *twsk_prot ; union __anonunion_h_352 h ; struct module *owner ; char name[32U] ; struct list_head node ; int (*init_cgroup)(struct mem_cgroup * , struct cgroup_subsys * ) ; void (*destroy_cgroup)(struct mem_cgroup * ) ; struct cg_proto *(*proto_cgroup)(struct mem_cgroup * ) ; }; struct cg_proto { struct page_counter memory_allocated ; struct percpu_counter sockets_allocated ; int memory_pressure ; long sysctl_mem[3U] ; unsigned long flags ; struct mem_cgroup *memcg ; }; struct request_sock_ops { int family ; int obj_size ; struct kmem_cache *slab ; char *slab_name ; int (*rtx_syn_ack)(struct sock * , struct request_sock * ) ; void (*send_ack)(struct sock * , struct sk_buff * , struct request_sock * ) ; void (*send_reset)(struct sock * , struct sk_buff * ) ; void (*destructor)(struct request_sock * ) ; void (*syn_ack_timeout)(struct request_sock const * ) ; }; struct request_sock { struct sock_common __req_common ; struct request_sock *dl_next ; struct sock *rsk_listener ; u16 mss ; u8 num_retrans ; unsigned char cookie_ts : 1 ; unsigned char num_timeout : 7 ; u32 window_clamp ; u32 rcv_wnd ; u32 ts_recent ; struct timer_list rsk_timer ; struct request_sock_ops const *rsk_ops ; struct sock *sk ; u32 *saved_syn ; u32 secid ; u32 peer_secid ; }; struct timewait_sock_ops { struct kmem_cache *twsk_slab ; char *twsk_slab_name ; unsigned int twsk_obj_size ; int (*twsk_unique)(struct sock * , struct sock * , void * ) ; void (*twsk_destructor)(struct sock * ) ; }; struct tcphdr { __be16 source ; __be16 dest ; __be32 seq ; __be32 ack_seq ; unsigned char res1 : 4 ; unsigned char doff : 4 ; unsigned char fin : 1 ; unsigned char syn : 1 ; unsigned char rst : 1 ; unsigned char psh : 1 ; unsigned char ack : 1 ; unsigned char urg : 1 ; unsigned char ece : 1 ; unsigned char cwr : 1 ; __be16 window ; __sum16 check ; __be16 urg_ptr ; }; struct ipv6hdr { unsigned char priority : 4 ; unsigned char version : 4 ; __u8 flow_lbl[3U] ; __be16 payload_len ; __u8 nexthdr ; __u8 hop_limit ; struct in6_addr saddr ; struct in6_addr daddr ; }; struct ipv6_stable_secret { bool initialized ; struct in6_addr secret ; }; struct ipv6_devconf { __s32 forwarding ; __s32 hop_limit ; __s32 mtu6 ; __s32 accept_ra ; __s32 accept_redirects ; __s32 autoconf ; __s32 dad_transmits ; __s32 rtr_solicits ; __s32 rtr_solicit_interval ; __s32 rtr_solicit_delay ; __s32 force_mld_version ; __s32 mldv1_unsolicited_report_interval ; __s32 mldv2_unsolicited_report_interval ; __s32 use_tempaddr ; __s32 temp_valid_lft ; __s32 temp_prefered_lft ; __s32 regen_max_retry ; __s32 max_desync_factor ; __s32 max_addresses ; __s32 accept_ra_defrtr ; __s32 accept_ra_pinfo ; __s32 accept_ra_rtr_pref ; __s32 rtr_probe_interval ; __s32 accept_ra_rt_info_max_plen ; __s32 proxy_ndp ; __s32 accept_source_route ; __s32 accept_ra_from_local ; __s32 optimistic_dad ; __s32 use_optimistic ; __s32 mc_forwarding ; __s32 disable_ipv6 ; __s32 accept_dad ; __s32 force_tllao ; __s32 ndisc_notify ; __s32 suppress_frag_ndisc ; __s32 accept_ra_mtu ; struct ipv6_stable_secret stable_secret ; void *sysctl ; }; struct ip6_sf_list { struct ip6_sf_list *sf_next ; struct in6_addr sf_addr ; unsigned long sf_count[2U] ; unsigned char sf_gsresp ; unsigned char sf_oldin ; unsigned char sf_crcount ; }; struct ifmcaddr6 { struct in6_addr mca_addr ; struct inet6_dev *idev ; struct ifmcaddr6 *next ; struct ip6_sf_list *mca_sources ; struct ip6_sf_list *mca_tomb ; unsigned int mca_sfmode ; unsigned char mca_crcount ; unsigned long mca_sfcount[2U] ; struct timer_list mca_timer ; unsigned int mca_flags ; int mca_users ; atomic_t mca_refcnt ; spinlock_t mca_lock ; unsigned long mca_cstamp ; unsigned long mca_tstamp ; }; struct ifacaddr6 { struct in6_addr aca_addr ; struct inet6_dev *aca_idev ; struct rt6_info *aca_rt ; struct ifacaddr6 *aca_next ; int aca_users ; atomic_t aca_refcnt ; unsigned long aca_cstamp ; unsigned long aca_tstamp ; }; struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry ; struct ipstats_mib *ipv6 ; struct icmpv6_mib_device *icmpv6dev ; struct icmpv6msg_mib_device *icmpv6msgdev ; }; struct inet6_dev { struct net_device *dev ; struct list_head addr_list ; struct ifmcaddr6 *mc_list ; struct ifmcaddr6 *mc_tomb ; spinlock_t mc_lock ; unsigned char mc_qrv ; unsigned char mc_gq_running ; unsigned char mc_ifc_count ; unsigned char mc_dad_count ; unsigned long mc_v1_seen ; unsigned long mc_qi ; unsigned long mc_qri ; unsigned long mc_maxdelay ; struct timer_list mc_gq_timer ; struct timer_list mc_ifc_timer ; struct timer_list mc_dad_timer ; struct ifacaddr6 *ac_list ; rwlock_t lock ; atomic_t refcnt ; __u32 if_flags ; int dead ; u8 rndid[8U] ; struct timer_list regen_timer ; struct list_head tempaddr_list ; struct in6_addr token ; struct neigh_parms *nd_parms ; struct ipv6_devconf cnf ; struct ipv6_devstat stats ; struct timer_list rs_timer ; __u8 rs_probes ; __u8 addr_gen_mode ; unsigned long tstamp ; struct callback_head rcu ; }; union __anonunion____missing_field_name_377 { __be32 a4 ; __be32 a6[4U] ; struct in6_addr in6 ; }; struct inetpeer_addr_base { union __anonunion____missing_field_name_377 __annonCompField111 ; }; struct inetpeer_addr { struct inetpeer_addr_base addr ; __u16 family ; }; union __anonunion____missing_field_name_378 { struct list_head gc_list ; struct callback_head gc_rcu ; }; struct __anonstruct____missing_field_name_380 { atomic_t rid ; }; union __anonunion____missing_field_name_379 { struct __anonstruct____missing_field_name_380 __annonCompField113 ; struct callback_head rcu ; struct inet_peer *gc_next ; }; struct inet_peer { struct inet_peer *avl_left ; struct inet_peer *avl_right ; struct inetpeer_addr daddr ; __u32 avl_height ; u32 metrics[16U] ; u32 rate_tokens ; unsigned long rate_last ; union __anonunion____missing_field_name_378 __annonCompField112 ; union __anonunion____missing_field_name_379 __annonCompField114 ; __u32 dtime ; atomic_t refcnt ; }; struct inet_peer_base { struct inet_peer *root ; seqlock_t lock ; int total ; }; struct uncached_list; struct rtable { struct dst_entry dst ; int rt_genid ; unsigned int rt_flags ; __u16 rt_type ; __u8 rt_is_input ; __u8 rt_uses_gateway ; int rt_iif ; __be32 rt_gateway ; u32 rt_pmtu ; struct list_head rt_uncached ; struct uncached_list *rt_uncached_list ; }; struct vlan_hdr { __be16 h_vlan_TCI ; __be16 h_vlan_encapsulated_proto ; }; struct vlan_ethhdr { unsigned char h_dest[6U] ; unsigned char h_source[6U] ; __be16 h_vlan_proto ; __be16 h_vlan_TCI ; __be16 h_vlan_encapsulated_proto ; }; struct vlan_pcpu_stats { u64 rx_packets ; u64 rx_bytes ; u64 rx_multicast ; u64 tx_packets ; u64 tx_bytes ; struct u64_stats_sync syncp ; u32 rx_errors ; u32 tx_dropped ; }; struct netpoll; struct macvlan_port; struct macvtap_queue; struct macvlan_dev { struct net_device *dev ; struct list_head list ; struct hlist_node hlist ; struct macvlan_port *port ; struct net_device *lowerdev ; void *fwd_priv ; struct vlan_pcpu_stats *pcpu_stats ; unsigned long mc_filter[4U] ; netdev_features_t set_features ; enum macvlan_mode mode ; u16 flags ; struct macvtap_queue *taps[256U] ; struct list_head queue_list ; int numvtaps ; int numqueues ; netdev_features_t tap_features ; int minor ; int nest_level ; struct netpoll *netpoll ; unsigned int macaddr_count ; }; struct cyclecounter { cycle_t (*read)(struct cyclecounter const * ) ; cycle_t mask ; u32 mult ; u32 shift ; }; struct timecounter { struct cyclecounter const *cc ; cycle_t cycle_last ; u64 nsec ; u64 mask ; u64 frac ; }; struct cdev { struct kobject kobj ; struct module *owner ; struct file_operations const *ops ; struct list_head list ; dev_t dev ; unsigned int count ; }; struct ptp_clock_time { __s64 sec ; __u32 nsec ; __u32 reserved ; }; struct ptp_extts_request { unsigned int index ; unsigned int flags ; unsigned int rsv[2U] ; }; struct ptp_perout_request { struct ptp_clock_time start ; struct ptp_clock_time period ; unsigned int index ; unsigned int flags ; unsigned int rsv[4U] ; }; enum ptp_pin_function { PTP_PF_NONE = 0, PTP_PF_EXTTS = 1, PTP_PF_PEROUT = 2, PTP_PF_PHYSYNC = 3 } ; struct ptp_pin_desc { char name[64U] ; unsigned int index ; unsigned int func ; unsigned int chan ; unsigned int rsv[5U] ; }; enum ldv_36092 { PTP_CLK_REQ_EXTTS = 0, PTP_CLK_REQ_PEROUT = 1, PTP_CLK_REQ_PPS = 2 } ; union __anonunion____missing_field_name_390 { struct ptp_extts_request extts ; struct ptp_perout_request perout ; }; struct ptp_clock_request { enum ldv_36092 type ; union __anonunion____missing_field_name_390 __annonCompField116 ; }; struct ptp_clock_info { struct module *owner ; char name[16U] ; s32 max_adj ; int n_alarm ; int n_ext_ts ; int n_per_out ; int n_pins ; int pps ; struct ptp_pin_desc *pin_config ; int (*adjfreq)(struct ptp_clock_info * , s32 ) ; int (*adjtime)(struct ptp_clock_info * , s64 ) ; int (*gettime64)(struct ptp_clock_info * , struct timespec * ) ; int (*settime64)(struct ptp_clock_info * , struct timespec const * ) ; int (*enable)(struct ptp_clock_info * , struct ptp_clock_request * , int ) ; int (*verify)(struct ptp_clock_info * , unsigned int , enum ptp_pin_function , unsigned int ) ; }; struct ptp_clock; struct mdio_if_info { int prtad ; u32 mmds ; unsigned int mode_support ; struct net_device *dev ; int (*mdio_read)(struct net_device * , int , int , u16 ) ; int (*mdio_write)(struct net_device * , int , int , u16 , u16 ) ; }; struct ixgbe_thermal_diode_data { u8 location ; u8 temp ; u8 caution_thresh ; u8 max_op_thresh ; }; struct ixgbe_thermal_sensor_data { struct ixgbe_thermal_diode_data sensor[3U] ; }; struct __anonstruct_read_393 { __le64 buffer_addr ; __le32 cmd_type_len ; __le32 olinfo_status ; }; struct __anonstruct_wb_394 { __le64 rsvd ; __le32 nxtseq_seed ; __le32 status ; }; union ixgbe_adv_tx_desc { struct __anonstruct_read_393 read ; struct __anonstruct_wb_394 wb ; }; struct __anonstruct_read_395 { __le64 pkt_addr ; __le64 hdr_addr ; }; struct __anonstruct_hs_rss_399 { __le16 pkt_info ; __le16 hdr_info ; }; union __anonunion_lo_dword_398 { __le32 data ; struct __anonstruct_hs_rss_399 hs_rss ; }; struct __anonstruct_csum_ip_401 { __le16 ip_id ; __le16 csum ; }; union __anonunion_hi_dword_400 { __le32 rss ; struct __anonstruct_csum_ip_401 csum_ip ; }; struct __anonstruct_lower_397 { union __anonunion_lo_dword_398 lo_dword ; union __anonunion_hi_dword_400 hi_dword ; }; struct __anonstruct_upper_402 { __le32 status_error ; __le16 length ; __le16 vlan ; }; struct __anonstruct_wb_396 { struct __anonstruct_lower_397 lower ; struct __anonstruct_upper_402 upper ; }; union ixgbe_adv_rx_desc { struct __anonstruct_read_395 read ; struct __anonstruct_wb_396 wb ; }; typedef u32 ixgbe_autoneg_advertised; typedef u32 ixgbe_link_speed; struct __anonstruct_formatted_403 { u8 vm_pool ; u8 flow_type ; __be16 vlan_id ; __be32 dst_ip[4U] ; __be32 src_ip[4U] ; __be16 src_port ; __be16 dst_port ; __be16 flex_bytes ; __be16 bkt_hash ; }; union ixgbe_atr_input { struct __anonstruct_formatted_403 formatted ; __be32 dword_stream[11U] ; }; struct __anonstruct_formatted_404 { u8 vm_pool ; u8 flow_type ; __be16 vlan_id ; }; struct __anonstruct_port_405 { __be16 src ; __be16 dst ; }; union ixgbe_atr_hash_dword { struct __anonstruct_formatted_404 formatted ; __be32 ip ; struct __anonstruct_port_405 port ; __be16 flex_bytes ; __be32 dword ; }; enum ixgbe_eeprom_type { ixgbe_eeprom_uninitialized = 0, ixgbe_eeprom_spi = 1, ixgbe_flash = 2, ixgbe_eeprom_none = 3 } ; enum ixgbe_mac_type { ixgbe_mac_unknown = 0, ixgbe_mac_82598EB = 1, ixgbe_mac_82599EB = 2, ixgbe_mac_X540 = 3, ixgbe_mac_X550 = 4, ixgbe_mac_X550EM_x = 5, ixgbe_num_macs = 6 } ; enum ixgbe_phy_type { ixgbe_phy_unknown = 0, ixgbe_phy_none = 1, ixgbe_phy_tn = 2, ixgbe_phy_aq = 3, ixgbe_phy_x550em_kr = 4, ixgbe_phy_x550em_kx4 = 5, ixgbe_phy_x550em_ext_t = 6, ixgbe_phy_cu_unknown = 7, ixgbe_phy_qt = 8, ixgbe_phy_xaui = 9, ixgbe_phy_nl = 10, ixgbe_phy_sfp_passive_tyco = 11, ixgbe_phy_sfp_passive_unknown = 12, ixgbe_phy_sfp_active_unknown = 13, ixgbe_phy_sfp_avago = 14, ixgbe_phy_sfp_ftl = 15, ixgbe_phy_sfp_ftl_active = 16, ixgbe_phy_sfp_unknown = 17, ixgbe_phy_sfp_intel = 18, ixgbe_phy_qsfp_passive_unknown = 19, ixgbe_phy_qsfp_active_unknown = 20, ixgbe_phy_qsfp_intel = 21, ixgbe_phy_qsfp_unknown = 22, ixgbe_phy_sfp_unsupported = 23, ixgbe_phy_generic = 24 } ; enum ixgbe_sfp_type { ixgbe_sfp_type_da_cu = 0, ixgbe_sfp_type_sr = 1, ixgbe_sfp_type_lr = 2, ixgbe_sfp_type_da_cu_core0 = 3, ixgbe_sfp_type_da_cu_core1 = 4, ixgbe_sfp_type_srlr_core0 = 5, ixgbe_sfp_type_srlr_core1 = 6, ixgbe_sfp_type_da_act_lmt_core0 = 7, ixgbe_sfp_type_da_act_lmt_core1 = 8, ixgbe_sfp_type_1g_cu_core0 = 9, ixgbe_sfp_type_1g_cu_core1 = 10, ixgbe_sfp_type_1g_sx_core0 = 11, ixgbe_sfp_type_1g_sx_core1 = 12, ixgbe_sfp_type_1g_lx_core0 = 13, ixgbe_sfp_type_1g_lx_core1 = 14, ixgbe_sfp_type_not_present = 65534, ixgbe_sfp_type_unknown = 65535 } ; enum ixgbe_media_type { ixgbe_media_type_unknown = 0, ixgbe_media_type_fiber = 1, ixgbe_media_type_fiber_qsfp = 2, ixgbe_media_type_fiber_lco = 3, ixgbe_media_type_copper = 4, ixgbe_media_type_backplane = 5, ixgbe_media_type_cx4 = 6, ixgbe_media_type_virtual = 7 } ; enum ixgbe_fc_mode { ixgbe_fc_none = 0, ixgbe_fc_rx_pause = 1, ixgbe_fc_tx_pause = 2, ixgbe_fc_full = 3, ixgbe_fc_default = 4 } ; enum ixgbe_smart_speed { ixgbe_smart_speed_auto = 0, ixgbe_smart_speed_on = 1, ixgbe_smart_speed_off = 2 } ; enum ixgbe_bus_type { ixgbe_bus_type_unknown = 0, ixgbe_bus_type_pci = 1, ixgbe_bus_type_pcix = 2, ixgbe_bus_type_pci_express = 3, ixgbe_bus_type_reserved = 4 } ; enum ixgbe_bus_speed { ixgbe_bus_speed_unknown = 0, ixgbe_bus_speed_33 = 33, ixgbe_bus_speed_66 = 66, ixgbe_bus_speed_100 = 100, ixgbe_bus_speed_120 = 120, ixgbe_bus_speed_133 = 133, ixgbe_bus_speed_2500 = 2500, ixgbe_bus_speed_5000 = 5000, ixgbe_bus_speed_8000 = 8000, ixgbe_bus_speed_reserved = 8001 } ; enum ixgbe_bus_width { ixgbe_bus_width_unknown = 0, ixgbe_bus_width_pcie_x1 = 1, ixgbe_bus_width_pcie_x2 = 2, ixgbe_bus_width_pcie_x4 = 4, ixgbe_bus_width_pcie_x8 = 8, ixgbe_bus_width_32 = 32, ixgbe_bus_width_64 = 64, ixgbe_bus_width_reserved = 65 } ; struct ixgbe_addr_filter_info { u32 num_mc_addrs ; u32 rar_used_count ; u32 mta_in_use ; u32 overflow_promisc ; bool uc_set_promisc ; bool user_set_promisc ; }; struct ixgbe_bus_info { enum ixgbe_bus_speed speed ; enum ixgbe_bus_width width ; enum ixgbe_bus_type type ; u16 func ; u16 lan_id ; }; struct ixgbe_fc_info { u32 high_water[8U] ; u32 low_water[8U] ; u16 pause_time ; bool send_xon ; bool strict_ieee ; bool disable_fc_autoneg ; bool fc_was_autonegged ; enum ixgbe_fc_mode current_mode ; enum ixgbe_fc_mode requested_mode ; }; struct ixgbe_hw_stats { u64 crcerrs ; u64 illerrc ; u64 errbc ; u64 mspdc ; u64 mpctotal ; u64 mpc[8U] ; u64 mlfc ; u64 mrfc ; u64 rlec ; u64 lxontxc ; u64 lxonrxc ; u64 lxofftxc ; u64 lxoffrxc ; u64 pxontxc[8U] ; u64 pxonrxc[8U] ; u64 pxofftxc[8U] ; u64 pxoffrxc[8U] ; u64 prc64 ; u64 prc127 ; u64 prc255 ; u64 prc511 ; u64 prc1023 ; u64 prc1522 ; u64 gprc ; u64 bprc ; u64 mprc ; u64 gptc ; u64 gorc ; u64 gotc ; u64 rnbc[8U] ; u64 ruc ; u64 rfc ; u64 roc ; u64 rjc ; u64 mngprc ; u64 mngpdc ; u64 mngptc ; u64 tor ; u64 tpr ; u64 tpt ; u64 ptc64 ; u64 ptc127 ; u64 ptc255 ; u64 ptc511 ; u64 ptc1023 ; u64 ptc1522 ; u64 mptc ; u64 bptc ; u64 xec ; u64 rqsmr[16U] ; u64 tqsmr[8U] ; u64 qprc[16U] ; u64 qptc[16U] ; u64 qbrc[16U] ; u64 qbtc[16U] ; u64 qprdc[16U] ; u64 pxon2offc[8U] ; u64 fdirustat_add ; u64 fdirustat_remove ; u64 fdirfstat_fadd ; u64 fdirfstat_fremove ; u64 fdirmatch ; u64 fdirmiss ; u64 fccrc ; u64 fcoerpdc ; u64 fcoeprc ; u64 fcoeptc ; u64 fcoedwrc ; u64 fcoedwtc ; u64 fcoe_noddp ; u64 fcoe_noddp_ext_buff ; u64 b2ospc ; u64 b2ogprc ; u64 o2bgptc ; u64 o2bspc ; }; struct ixgbe_eeprom_operations { s32 (*init_params)(struct ixgbe_hw * ) ; s32 (*read)(struct ixgbe_hw * , u16 , u16 * ) ; s32 (*read_buffer)(struct ixgbe_hw * , u16 , u16 , u16 * ) ; s32 (*write)(struct ixgbe_hw * , u16 , u16 ) ; s32 (*write_buffer)(struct ixgbe_hw * , u16 , u16 , u16 * ) ; s32 (*validate_checksum)(struct ixgbe_hw * , u16 * ) ; s32 (*update_checksum)(struct ixgbe_hw * ) ; s32 (*calc_checksum)(struct ixgbe_hw * ) ; }; struct ixgbe_mac_operations { s32 (*init_hw)(struct ixgbe_hw * ) ; s32 (*reset_hw)(struct ixgbe_hw * ) ; s32 (*start_hw)(struct ixgbe_hw * ) ; s32 (*clear_hw_cntrs)(struct ixgbe_hw * ) ; enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw * ) ; s32 (*get_mac_addr)(struct ixgbe_hw * , u8 * ) ; s32 (*get_san_mac_addr)(struct ixgbe_hw * , u8 * ) ; s32 (*get_device_caps)(struct ixgbe_hw * , u16 * ) ; s32 (*get_wwn_prefix)(struct ixgbe_hw * , u16 * , u16 * ) ; s32 (*stop_adapter)(struct ixgbe_hw * ) ; s32 (*get_bus_info)(struct ixgbe_hw * ) ; void (*set_lan_id)(struct ixgbe_hw * ) ; s32 (*read_analog_reg8)(struct ixgbe_hw * , u32 , u8 * ) ; s32 (*write_analog_reg8)(struct ixgbe_hw * , u32 , u8 ) ; s32 (*setup_sfp)(struct ixgbe_hw * ) ; s32 (*disable_rx_buff)(struct ixgbe_hw * ) ; s32 (*enable_rx_buff)(struct ixgbe_hw * ) ; s32 (*enable_rx_dma)(struct ixgbe_hw * , u32 ) ; s32 (*acquire_swfw_sync)(struct ixgbe_hw * , u32 ) ; void (*release_swfw_sync)(struct ixgbe_hw * , u32 ) ; s32 (*prot_autoc_read)(struct ixgbe_hw * , bool * , u32 * ) ; s32 (*prot_autoc_write)(struct ixgbe_hw * , u32 , bool ) ; void (*disable_tx_laser)(struct ixgbe_hw * ) ; void (*enable_tx_laser)(struct ixgbe_hw * ) ; void (*flap_tx_laser)(struct ixgbe_hw * ) ; void (*stop_link_on_d3)(struct ixgbe_hw * ) ; s32 (*setup_link)(struct ixgbe_hw * , ixgbe_link_speed , bool ) ; s32 (*check_link)(struct ixgbe_hw * , ixgbe_link_speed * , bool * , bool ) ; s32 (*get_link_capabilities)(struct ixgbe_hw * , ixgbe_link_speed * , bool * ) ; void (*set_rxpba)(struct ixgbe_hw * , int , u32 , int ) ; s32 (*led_on)(struct ixgbe_hw * , u32 ) ; s32 (*led_off)(struct ixgbe_hw * , u32 ) ; s32 (*blink_led_start)(struct ixgbe_hw * , u32 ) ; s32 (*blink_led_stop)(struct ixgbe_hw * , u32 ) ; s32 (*set_rar)(struct ixgbe_hw * , u32 , u8 * , u32 , u32 ) ; s32 (*clear_rar)(struct ixgbe_hw * , u32 ) ; s32 (*set_vmdq)(struct ixgbe_hw * , u32 , u32 ) ; s32 (*set_vmdq_san_mac)(struct ixgbe_hw * , u32 ) ; s32 (*clear_vmdq)(struct ixgbe_hw * , u32 , u32 ) ; s32 (*init_rx_addrs)(struct ixgbe_hw * ) ; s32 (*update_mc_addr_list)(struct ixgbe_hw * , struct net_device * ) ; s32 (*enable_mc)(struct ixgbe_hw * ) ; s32 (*disable_mc)(struct ixgbe_hw * ) ; s32 (*clear_vfta)(struct ixgbe_hw * ) ; s32 (*set_vfta)(struct ixgbe_hw * , u32 , u32 , bool ) ; s32 (*init_uta_tables)(struct ixgbe_hw * ) ; void (*set_mac_anti_spoofing)(struct ixgbe_hw * , bool , int ) ; void (*set_vlan_anti_spoofing)(struct ixgbe_hw * , bool , int ) ; s32 (*fc_enable)(struct ixgbe_hw * ) ; s32 (*set_fw_drv_ver)(struct ixgbe_hw * , u8 , u8 , u8 , u8 ) ; s32 (*get_thermal_sensor_data)(struct ixgbe_hw * ) ; s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw * ) ; void (*disable_rx)(struct ixgbe_hw * ) ; void (*enable_rx)(struct ixgbe_hw * ) ; void (*set_source_address_pruning)(struct ixgbe_hw * , bool , unsigned int ) ; void (*set_ethertype_anti_spoofing)(struct ixgbe_hw * , bool , int ) ; s32 (*dmac_config)(struct ixgbe_hw * ) ; s32 (*dmac_update_tcs)(struct ixgbe_hw * ) ; s32 (*dmac_config_tcs)(struct ixgbe_hw * ) ; }; struct ixgbe_phy_operations { s32 (*identify)(struct ixgbe_hw * ) ; s32 (*identify_sfp)(struct ixgbe_hw * ) ; s32 (*init)(struct ixgbe_hw * ) ; s32 (*reset)(struct ixgbe_hw * ) ; s32 (*read_reg)(struct ixgbe_hw * , u32 , u32 , u16 * ) ; s32 (*write_reg)(struct ixgbe_hw * , u32 , u32 , u16 ) ; s32 (*read_reg_mdi)(struct ixgbe_hw * , u32 , u32 , u16 * ) ; s32 (*write_reg_mdi)(struct ixgbe_hw * , u32 , u32 , u16 ) ; s32 (*setup_link)(struct ixgbe_hw * ) ; s32 (*setup_internal_link)(struct ixgbe_hw * ) ; s32 (*setup_link_speed)(struct ixgbe_hw * , ixgbe_link_speed , bool ) ; s32 (*check_link)(struct ixgbe_hw * , ixgbe_link_speed * , bool * ) ; s32 (*get_firmware_version)(struct ixgbe_hw * , u16 * ) ; s32 (*read_i2c_byte)(struct ixgbe_hw * , u8 , u8 , u8 * ) ; s32 (*write_i2c_byte)(struct ixgbe_hw * , u8 , u8 , u8 ) ; s32 (*read_i2c_sff8472)(struct ixgbe_hw * , u8 , u8 * ) ; s32 (*read_i2c_eeprom)(struct ixgbe_hw * , u8 , u8 * ) ; s32 (*write_i2c_eeprom)(struct ixgbe_hw * , u8 , u8 ) ; s32 (*read_i2c_combined)(struct ixgbe_hw * , u8 , u16 , u16 * ) ; s32 (*write_i2c_combined)(struct ixgbe_hw * , u8 , u16 , u16 ) ; s32 (*check_overtemp)(struct ixgbe_hw * ) ; s32 (*set_phy_power)(struct ixgbe_hw * , bool ) ; s32 (*handle_lasi)(struct ixgbe_hw * ) ; }; struct ixgbe_eeprom_info { struct ixgbe_eeprom_operations ops ; enum ixgbe_eeprom_type type ; u32 semaphore_delay ; u16 word_size ; u16 address_bits ; u16 word_page_size ; }; struct ixgbe_mac_info { struct ixgbe_mac_operations ops ; enum ixgbe_mac_type type ; u8 addr[6U] ; u8 perm_addr[6U] ; u8 san_addr[6U] ; u16 wwnn_prefix ; u16 wwpn_prefix ; u16 max_msix_vectors ; u32 mta_shadow[128U] ; s32 mc_filter_type ; u32 mcft_size ; u32 vft_size ; u32 num_rar_entries ; u32 rar_highwater ; u32 rx_pb_size ; u32 max_tx_queues ; u32 max_rx_queues ; u32 orig_autoc ; u32 orig_autoc2 ; bool orig_link_settings_stored ; bool autotry_restart ; u8 flags ; u8 san_mac_rar_index ; struct ixgbe_thermal_sensor_data thermal_sensor_data ; bool set_lben ; }; struct ixgbe_phy_info { struct ixgbe_phy_operations ops ; struct mdio_if_info mdio ; enum ixgbe_phy_type type ; u32 id ; enum ixgbe_sfp_type sfp_type ; bool sfp_setup_needed ; u32 revision ; enum ixgbe_media_type media_type ; u8 lan_id ; u32 phy_semaphore_mask ; bool reset_disable ; ixgbe_autoneg_advertised autoneg_advertised ; enum ixgbe_smart_speed smart_speed ; bool smart_speed_active ; bool multispeed_fiber ; bool reset_if_overtemp ; bool qsfp_shared_i2c_bus ; u32 nw_mng_if_sel ; }; struct ixgbe_mbx_operations; struct ixgbe_mbx_operations { s32 (*init_params)(struct ixgbe_hw * ) ; s32 (*read)(struct ixgbe_hw * , u32 * , u16 , u16 ) ; s32 (*write)(struct ixgbe_hw * , u32 * , u16 , u16 ) ; s32 (*read_posted)(struct ixgbe_hw * , u32 * , u16 , u16 ) ; s32 (*write_posted)(struct ixgbe_hw * , u32 * , u16 , u16 ) ; s32 (*check_for_msg)(struct ixgbe_hw * , u16 ) ; s32 (*check_for_ack)(struct ixgbe_hw * , u16 ) ; s32 (*check_for_rst)(struct ixgbe_hw * , u16 ) ; }; struct ixgbe_mbx_stats { u32 msgs_tx ; u32 msgs_rx ; u32 acks ; u32 reqs ; u32 rsts ; }; struct ixgbe_mbx_info { struct ixgbe_mbx_operations ops ; struct ixgbe_mbx_stats stats ; u32 timeout ; u32 usec_delay ; u32 v2p_mailbox ; u16 size ; }; struct ixgbe_hw { u8 *hw_addr ; void *back ; struct ixgbe_mac_info mac ; struct ixgbe_addr_filter_info addr_ctrl ; struct ixgbe_fc_info fc ; struct ixgbe_phy_info phy ; struct ixgbe_eeprom_info eeprom ; struct ixgbe_bus_info bus ; struct ixgbe_mbx_info mbx ; u32 const *mvals ; u16 device_id ; u16 vendor_id ; u16 subsystem_device_id ; u16 subsystem_vendor_id ; u8 revision_id ; bool adapter_stopped ; bool force_full_reset ; bool allow_unsupported_sfp ; bool wol_enabled ; }; struct ixgbe_info { enum ixgbe_mac_type mac ; s32 (*get_invariants)(struct ixgbe_hw * ) ; struct ixgbe_mac_operations *mac_ops ; struct ixgbe_eeprom_operations *eeprom_ops ; struct ixgbe_phy_operations *phy_ops ; struct ixgbe_mbx_operations *mbx_ops ; u32 const *mvals ; }; enum strict_prio_type { prio_none = 0, prio_group = 1, prio_link = 2 } ; struct dcb_support { u32 capabilities ; u8 traffic_classes ; u8 pfc_traffic_classes ; }; struct tc_bw_alloc { u8 bwg_id ; u8 bwg_percent ; u8 link_percent ; u8 up_to_tc_bitmap ; u16 data_credits_refill ; u16 data_credits_max ; enum strict_prio_type prio_type ; }; enum dcb_pfc_type { pfc_disabled = 0, pfc_enabled_full = 1, pfc_enabled_tx = 2, pfc_enabled_rx = 3 } ; struct tc_configuration { struct tc_bw_alloc path[2U] ; enum dcb_pfc_type dcb_pfc ; u16 desc_credits_max ; u8 tc ; }; struct dcb_num_tcs { u8 pg_tcs ; u8 pfc_tcs ; }; struct ixgbe_dcb_config { struct dcb_support support ; struct dcb_num_tcs num_tcs ; struct tc_configuration tc_config[8U] ; u8 bw_percentage[2U][8U] ; bool pfc_mode_enable ; u32 dcb_cfg_version ; u32 link_speed ; }; struct ixgbe_fcoe_ddp { int len ; u32 err ; unsigned int sgc ; struct scatterlist *sgl ; dma_addr_t udp ; u64 *udl ; struct dma_pool *pool ; }; struct ixgbe_fcoe_ddp_pool { struct dma_pool *pool ; u64 noddp ; u64 noddp_ext_buff ; }; struct ixgbe_fcoe { struct ixgbe_fcoe_ddp_pool *ddp_pool ; atomic_t refcnt ; spinlock_t lock ; struct ixgbe_fcoe_ddp ddp[2048U] ; void *extra_ddp_buffer ; dma_addr_t extra_ddp_buffer_dma ; unsigned long mode ; u8 up ; }; struct vf_data_storage { unsigned char vf_mac_addresses[6U] ; u16 vf_mc_hashes[30U] ; u16 num_vf_mc_hashes ; u16 default_vf_vlan_id ; u16 vlans_enabled ; bool clear_to_send ; bool pf_set_mac ; u16 pf_vlan ; u16 pf_qos ; u16 tx_rate ; u16 vlan_count ; u8 spoofchk_enabled ; bool rss_query_enabled ; unsigned int vf_api ; }; struct vf_macvlans { struct list_head l ; int vf ; bool free ; bool is_macvlan ; u8 vf_macvlan[6U] ; }; struct ixgbe_tx_buffer { union ixgbe_adv_tx_desc *next_to_watch ; unsigned long time_stamp ; struct sk_buff *skb ; unsigned int bytecount ; unsigned short gso_segs ; __be16 protocol ; dma_addr_t dma ; __u32 len ; u32 tx_flags ; }; struct ixgbe_rx_buffer { struct sk_buff *skb ; dma_addr_t dma ; struct page *page ; unsigned int page_offset ; }; struct ixgbe_queue_stats { u64 packets ; u64 bytes ; u64 yields ; u64 misses ; u64 cleaned ; }; struct ixgbe_tx_queue_stats { u64 restart_queue ; u64 tx_busy ; u64 tx_done_old ; }; struct ixgbe_rx_queue_stats { u64 rsc_count ; u64 rsc_flush ; u64 non_eop_descs ; u64 alloc_rx_page_failed ; u64 alloc_rx_buff_failed ; u64 csum_err ; }; struct ixgbe_adapter; struct ixgbe_fwd_adapter { unsigned long active_vlans[64U] ; struct net_device *netdev ; struct ixgbe_adapter *real_adapter ; unsigned int tx_base_queue ; unsigned int rx_base_queue ; int pool ; }; struct ixgbe_q_vector; union __anonunion____missing_field_name_406 { struct ixgbe_tx_buffer *tx_buffer_info ; struct ixgbe_rx_buffer *rx_buffer_info ; }; struct __anonstruct____missing_field_name_408 { u8 atr_sample_rate ; u8 atr_count ; }; union __anonunion____missing_field_name_407 { u16 next_to_alloc ; struct __anonstruct____missing_field_name_408 __annonCompField119 ; }; union __anonunion____missing_field_name_409 { struct ixgbe_tx_queue_stats tx_stats ; struct ixgbe_rx_queue_stats rx_stats ; }; struct ixgbe_ring { struct ixgbe_ring *next ; struct ixgbe_q_vector *q_vector ; struct net_device *netdev ; struct device *dev ; struct ixgbe_fwd_adapter *l2_accel_priv ; void *desc ; union __anonunion____missing_field_name_406 __annonCompField118 ; unsigned long state ; u8 *tail ; dma_addr_t dma ; unsigned int size ; u16 count ; u8 queue_index ; u8 reg_idx ; u16 next_to_use ; u16 next_to_clean ; union __anonunion____missing_field_name_407 __annonCompField120 ; u8 dcb_tc ; struct ixgbe_queue_stats stats ; struct u64_stats_sync syncp ; union __anonunion____missing_field_name_409 __annonCompField121 ; }; struct ixgbe_ring_feature { u16 limit ; u16 indices ; u16 mask ; u16 offset ; }; struct ixgbe_ring_container { struct ixgbe_ring *ring ; unsigned int total_bytes ; unsigned int total_packets ; u16 work_limit ; u8 count ; u8 itr ; }; struct ixgbe_q_vector { struct ixgbe_adapter *adapter ; int cpu ; u16 v_idx ; u16 itr ; struct ixgbe_ring_container rx ; struct ixgbe_ring_container tx ; struct napi_struct napi ; cpumask_t affinity_mask ; int numa_node ; struct callback_head rcu ; char name[25U] ; atomic_t state ; struct ixgbe_ring ring[0U] ; }; struct hwmon_attr { struct device_attribute dev_attr ; struct ixgbe_hw *hw ; struct ixgbe_thermal_diode_data *sensor ; char name[12U] ; }; struct hwmon_buff { struct attribute_group group ; struct attribute_group const *groups[2U] ; struct attribute *attrs[13U] ; struct hwmon_attr hwmon_list[12U] ; unsigned int n_hwmon ; }; struct ixgbe_mac_addr { u8 addr[6U] ; u16 queue ; u16 state ; }; struct ixgbe_adapter { unsigned long active_vlans[64U] ; struct net_device *netdev ; struct pci_dev *pdev ; unsigned long state ; u32 flags ; u32 flags2 ; int num_tx_queues ; u16 tx_itr_setting ; u16 tx_work_limit ; int num_rx_queues ; u16 rx_itr_setting ; struct ixgbe_ring *tx_ring[64U] ; u64 restart_queue ; u64 lsc_int ; u32 tx_timeout_count ; struct ixgbe_ring *rx_ring[64U] ; int num_rx_pools ; int num_rx_queues_per_pool ; u64 hw_csum_rx_error ; u64 hw_rx_no_dma_resources ; u64 rsc_total_count ; u64 rsc_total_flush ; u64 non_eop_descs ; u32 alloc_rx_page_failed ; u32 alloc_rx_buff_failed ; struct ixgbe_q_vector *q_vector[64U] ; struct ieee_pfc *ixgbe_ieee_pfc ; struct ieee_ets *ixgbe_ieee_ets ; struct ixgbe_dcb_config dcb_cfg ; struct ixgbe_dcb_config temp_dcb_cfg ; u8 dcb_set_bitmap ; u8 dcbx_cap ; enum ixgbe_fc_mode last_lfc_mode ; int num_q_vectors ; int max_q_vectors ; struct ixgbe_ring_feature ring_feature[5U] ; struct msix_entry *msix_entries ; u32 test_icr ; struct ixgbe_ring test_tx_ring ; struct ixgbe_ring test_rx_ring ; struct ixgbe_hw hw ; u16 msg_enable ; struct ixgbe_hw_stats stats ; u64 tx_busy ; unsigned int tx_ring_count ; unsigned int rx_ring_count ; u32 link_speed ; bool link_up ; unsigned long link_check_timeout ; struct timer_list service_timer ; struct work_struct service_task ; struct hlist_head fdir_filter_list ; unsigned long fdir_overflow ; union ixgbe_atr_input fdir_mask ; int fdir_filter_count ; u32 fdir_pballoc ; u32 atr_sample_rate ; spinlock_t fdir_perfect_lock ; struct ixgbe_fcoe fcoe ; u8 *io_addr ; u32 wol ; u16 bridge_mode ; u16 eeprom_verh ; u16 eeprom_verl ; u16 eeprom_cap ; u32 interrupt_event ; u32 led_reg ; struct ptp_clock *ptp_clock ; struct ptp_clock_info ptp_caps ; struct work_struct ptp_tx_work ; struct sk_buff *ptp_tx_skb ; struct hwtstamp_config tstamp_config ; unsigned long ptp_tx_start ; unsigned long last_overflow_check ; unsigned long last_rx_ptp_check ; unsigned long last_rx_timestamp ; spinlock_t tmreg_lock ; struct cyclecounter cc ; struct timecounter tc ; u32 base_incval ; unsigned long active_vfs[1U] ; unsigned int num_vfs ; struct vf_data_storage *vfinfo ; int vf_rate_link_speed ; struct vf_macvlans vf_mvs ; struct vf_macvlans *mv_list ; u32 timer_event_accumulator ; u32 vferr_refcount ; struct ixgbe_mac_addr *mac_table ; u16 vxlan_port ; struct kobject *info_kobj ; struct hwmon_buff *ixgbe_hwmon_buff ; struct dentry *ixgbe_dbg_adapter ; u8 default_up ; unsigned long fwd_bitmask ; u8 rss_indir_tbl[512U] ; u32 rss_key[10U] ; }; struct ixgbe_fdir_filter { struct hlist_node fdir_node ; union ixgbe_atr_input filter ; u16 sw_idx ; u16 action ; }; union __anonunion____missing_field_name_410 { struct sk_buff *head ; struct sk_buff *tail ; }; struct ixgbe_cb { union __anonunion____missing_field_name_410 __annonCompField122 ; dma_addr_t dma ; u16 append_cnt ; bool page_released ; }; struct ixgbe_reg_info { u32 ofs ; char *name ; }; struct my_u0 { u64 a ; u64 b ; }; union __anonunion_hdr_412 { unsigned char *network ; struct iphdr *ipv4 ; struct ipv6hdr *ipv6 ; }; typedef bool ldv_func_ret_type___2; typedef bool ldv_func_ret_type___3; typedef bool ldv_func_ret_type___4; typedef bool ldv_func_ret_type___5; typedef int ldv_func_ret_type___6; typedef int ldv_func_ret_type___7; typedef int ldv_func_ret_type___8; typedef int ldv_func_ret_type___9; typedef int ldv_func_ret_type___10; typedef int ldv_func_ret_type___11; typedef int ldv_func_ret_type___12; typedef int ldv_func_ret_type___13; typedef bool ldv_func_ret_type___14; typedef int ldv_func_ret_type___15; typedef short s16; enum hrtimer_restart; union __anonunion_cmd_or_resp_333 { u8 cmd_resv ; u8 ret_status ; }; struct ixgbe_hic_hdr { u8 cmd ; u8 buf_len ; union __anonunion_cmd_or_resp_333 cmd_or_resp ; u8 checksum ; }; struct ixgbe_hic_drv_info { struct ixgbe_hic_hdr hdr ; u8 port_num ; u8 ver_sub ; u8 ver_build ; u8 ver_min ; u8 ver_maj ; u8 pad ; u16 pad2 ; }; enum hrtimer_restart; struct ixgbe_stats { char stat_string[32U] ; int type ; int sizeof_stat ; int stat_offset ; }; struct ixgbe_reg_test { u16 reg ; u8 array_len ; u8 test_type ; u32 mask ; u32 write ; }; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; struct ixgbe_hic_hdr2_req { u8 cmd ; u8 buf_lenh ; u8 buf_lenl ; u8 checksum ; }; struct ixgbe_hic_hdr2_rsp { u8 cmd ; u8 buf_lenl ; u8 buf_lenh_status ; u8 checksum ; }; union ixgbe_hic_hdr2 { struct ixgbe_hic_hdr2_req req ; struct ixgbe_hic_hdr2_rsp rsp ; }; struct ixgbe_hic_read_shadow_ram { union ixgbe_hic_hdr2 hdr ; u32 address ; u16 length ; u16 pad2 ; u16 data ; u16 pad3 ; }; struct ixgbe_hic_write_shadow_ram { union ixgbe_hic_hdr2 hdr ; __be32 address ; __be16 length ; u16 pad2 ; u16 data ; u16 pad3 ; }; struct ixgbe_hic_disable_rxen { struct ixgbe_hic_hdr hdr ; u8 port_number ; u8 pad2 ; u16 pad3 ; }; enum hrtimer_restart; struct ixgbe_adv_tx_context_desc { __le32 vlan_macip_lens ; __le32 seqnum_seed ; __le32 type_tucmd_mlhl ; __le32 mss_l4len_idx ; }; enum tk_offsets { TK_OFFS_REAL = 0, TK_OFFS_BOOT = 1, TK_OFFS_TAI = 2, TK_OFFS_MAX = 3 } ; enum hrtimer_restart; struct pps_event_time { struct timespec ts_real ; }; union __anonunion____missing_field_name_332 { u64 timestamp ; struct pps_event_time pps_times ; }; struct ptp_clock_event { int type ; int index ; union __anonunion____missing_field_name_332 __annonCompField97 ; }; typedef bool ldv_func_ret_type___16; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; struct call_single_data { struct llist_node llist ; void (*func)(void * ) ; void *info ; unsigned int flags ; }; struct bio_set; struct bio; struct bio_integrity_payload; typedef void bio_end_io_t(struct bio * , int ); struct bvec_iter { sector_t bi_sector ; unsigned int bi_size ; unsigned int bi_idx ; unsigned int bi_bvec_done ; }; union __anonunion____missing_field_name_247 { struct bio_integrity_payload *bi_integrity ; }; struct bio { struct bio *bi_next ; struct block_device *bi_bdev ; unsigned long bi_flags ; unsigned long bi_rw ; struct bvec_iter bi_iter ; unsigned int bi_phys_segments ; unsigned int bi_seg_front_size ; unsigned int bi_seg_back_size ; atomic_t __bi_remaining ; bio_end_io_t *bi_end_io ; void *bi_private ; struct io_context *bi_ioc ; struct cgroup_subsys_state *bi_css ; union __anonunion____missing_field_name_247 __annonCompField66 ; unsigned short bi_vcnt ; unsigned short bi_max_vecs ; atomic_t __bi_cnt ; struct bio_vec *bi_io_vec ; struct bio_set *bi_pool ; struct bio_vec bi_inline_vecs[0U] ; }; struct hd_geometry; struct block_device_operations; struct fc_frame_header { __u8 fh_r_ctl ; __u8 fh_d_id[3U] ; __u8 fh_cs_ctl ; __u8 fh_s_id[3U] ; __u8 fh_type ; __u8 fh_f_ctl[3U] ; __u8 fh_seq_id ; __u8 fh_df_ctl ; __be16 fh_seq_cnt ; __be16 fh_ox_id ; __be16 fh_rx_id ; __be32 fh_parm_offset ; }; struct fcoe_hdr { __u8 fcoe_ver ; __u8 fcoe_resvd[12U] ; __u8 fcoe_sof ; }; struct fcoe_crc_eof { __le32 fcoe_crc32 ; __u8 fcoe_eof ; __u8 fcoe_resvd[3U] ; }; struct disk_stats { unsigned long sectors[2U] ; unsigned long ios[2U] ; unsigned long merges[2U] ; unsigned long ticks[2U] ; unsigned long io_ticks ; unsigned long time_in_queue ; }; struct partition_meta_info { char uuid[37U] ; u8 volname[64U] ; }; struct hd_struct { sector_t start_sect ; sector_t nr_sects ; seqcount_t nr_sects_seq ; sector_t alignment_offset ; unsigned int discard_alignment ; struct device __dev ; struct kobject *holder_dir ; int policy ; int partno ; struct partition_meta_info *info ; int make_it_fail ; unsigned long stamp ; atomic_t in_flight[2U] ; struct disk_stats *dkstats ; atomic_t ref ; struct callback_head callback_head ; }; struct disk_part_tbl { struct callback_head callback_head ; int len ; struct hd_struct *last_lookup ; struct hd_struct *part[] ; }; struct disk_events; struct timer_rand_state; struct blk_integrity; struct gendisk { int major ; int first_minor ; int minors ; char disk_name[32U] ; char *(*devnode)(struct gendisk * , umode_t * ) ; unsigned int events ; unsigned int async_events ; struct disk_part_tbl *part_tbl ; struct hd_struct part0 ; struct block_device_operations const *fops ; struct request_queue *queue ; void *private_data ; int flags ; struct device *driverfs_dev ; struct kobject *slave_dir ; struct timer_rand_state *random ; atomic_t sync_io ; struct disk_events *ev ; struct blk_integrity *integrity ; int node_id ; }; struct fprop_local_percpu { struct percpu_counter events ; unsigned int period ; raw_spinlock_t lock ; }; typedef int congested_fn(void * , int ); struct bdi_writeback_congested { unsigned long state ; atomic_t refcnt ; struct backing_dev_info *bdi ; int blkcg_id ; struct rb_node rb_node ; }; union __anonunion____missing_field_name_403 { struct work_struct release_work ; struct callback_head rcu ; }; struct bdi_writeback { struct backing_dev_info *bdi ; unsigned long state ; unsigned long last_old_flush ; struct list_head b_dirty ; struct list_head b_io ; struct list_head b_more_io ; struct list_head b_dirty_time ; spinlock_t list_lock ; struct percpu_counter stat[4U] ; struct bdi_writeback_congested *congested ; unsigned long bw_time_stamp ; unsigned long dirtied_stamp ; unsigned long written_stamp ; unsigned long write_bandwidth ; unsigned long avg_write_bandwidth ; unsigned long dirty_ratelimit ; unsigned long balanced_dirty_ratelimit ; struct fprop_local_percpu completions ; int dirty_exceeded ; spinlock_t work_lock ; struct list_head work_list ; struct delayed_work dwork ; struct percpu_ref refcnt ; struct fprop_local_percpu memcg_completions ; struct cgroup_subsys_state *memcg_css ; struct cgroup_subsys_state *blkcg_css ; struct list_head memcg_node ; struct list_head blkcg_node ; union __anonunion____missing_field_name_403 __annonCompField122 ; }; struct backing_dev_info { struct list_head bdi_list ; unsigned long ra_pages ; unsigned int capabilities ; congested_fn *congested_fn ; void *congested_data ; char *name ; unsigned int min_ratio ; unsigned int max_ratio ; unsigned int max_prop_frac ; atomic_long_t tot_write_bandwidth ; struct bdi_writeback wb ; struct radix_tree_root cgwb_tree ; struct rb_root cgwb_congested_tree ; atomic_t usage_cnt ; wait_queue_head_t wb_waitq ; struct device *dev ; struct timer_list laptop_mode_wb_timer ; struct dentry *debug_dir ; struct dentry *debug_stats ; }; typedef void *mempool_alloc_t(gfp_t , void * ); typedef void mempool_free_t(void * , void * ); struct mempool_s { spinlock_t lock ; int min_nr ; int curr_nr ; void **elements ; void *pool_data ; mempool_alloc_t *alloc ; mempool_free_t *free ; wait_queue_head_t wait ; }; typedef struct mempool_s mempool_t; union __anonunion____missing_field_name_404 { struct list_head q_node ; struct kmem_cache *__rcu_icq_cache ; }; union __anonunion____missing_field_name_405 { struct hlist_node ioc_node ; struct callback_head __rcu_head ; }; struct io_cq { struct request_queue *q ; struct io_context *ioc ; union __anonunion____missing_field_name_404 __annonCompField123 ; union __anonunion____missing_field_name_405 __annonCompField124 ; unsigned int flags ; }; struct io_context { atomic_long_t refcount ; atomic_t active_ref ; atomic_t nr_tasks ; spinlock_t lock ; unsigned short ioprio ; int nr_batch_requests ; unsigned long last_waited ; struct radix_tree_root icq_tree ; struct io_cq *icq_hint ; struct hlist_head icq_list ; struct work_struct release_work ; }; struct bio_integrity_payload { struct bio *bip_bio ; struct bvec_iter bip_iter ; bio_end_io_t *bip_end_io ; unsigned short bip_slab ; unsigned short bip_vcnt ; unsigned short bip_max_vcnt ; unsigned short bip_flags ; struct work_struct bip_work ; struct bio_vec *bip_vec ; struct bio_vec bip_inline_vecs[0U] ; }; struct bio_list { struct bio *head ; struct bio *tail ; }; struct bio_set { struct kmem_cache *bio_slab ; unsigned int front_pad ; mempool_t *bio_pool ; mempool_t *bvec_pool ; mempool_t *bio_integrity_pool ; mempool_t *bvec_integrity_pool ; spinlock_t rescue_lock ; struct bio_list rescue_list ; struct work_struct rescue_work ; struct workqueue_struct *rescue_workqueue ; }; struct bsg_class_device { struct device *class_dev ; struct device *parent ; int minor ; struct request_queue *queue ; struct kref ref ; void (*release)(struct device * ) ; }; struct elevator_queue; struct blk_trace; struct request; struct bsg_job; struct blkcg_gq; struct blk_flush_queue; typedef void rq_end_io_fn(struct request * , int ); struct request_list { struct request_queue *q ; struct blkcg_gq *blkg ; int count[2U] ; int starved[2U] ; mempool_t *rq_pool ; wait_queue_head_t wait[2U] ; unsigned int flags ; }; union __anonunion____missing_field_name_406___0 { struct call_single_data csd ; unsigned long fifo_time ; }; struct blk_mq_ctx; union __anonunion____missing_field_name_407___0 { struct hlist_node hash ; struct list_head ipi_list ; }; union __anonunion____missing_field_name_408 { struct rb_node rb_node ; void *completion_data ; }; struct __anonstruct_elv_410 { struct io_cq *icq ; void *priv[2U] ; }; struct __anonstruct_flush_411 { unsigned int seq ; struct list_head list ; rq_end_io_fn *saved_end_io ; }; union __anonunion____missing_field_name_409___0 { struct __anonstruct_elv_410 elv ; struct __anonstruct_flush_411 flush ; }; struct request { struct list_head queuelist ; union __anonunion____missing_field_name_406___0 __annonCompField125 ; struct request_queue *q ; struct blk_mq_ctx *mq_ctx ; u64 cmd_flags ; unsigned int cmd_type ; unsigned long atomic_flags ; int cpu ; unsigned int __data_len ; sector_t __sector ; struct bio *bio ; struct bio *biotail ; union __anonunion____missing_field_name_407___0 __annonCompField126 ; union __anonunion____missing_field_name_408 __annonCompField127 ; union __anonunion____missing_field_name_409___0 __annonCompField128 ; struct gendisk *rq_disk ; struct hd_struct *part ; unsigned long start_time ; struct request_list *rl ; unsigned long long start_time_ns ; unsigned long long io_start_time_ns ; unsigned short nr_phys_segments ; unsigned short nr_integrity_segments ; unsigned short ioprio ; void *special ; int tag ; int errors ; unsigned char __cmd[16U] ; unsigned char *cmd ; unsigned short cmd_len ; unsigned int extra_len ; unsigned int sense_len ; unsigned int resid_len ; void *sense ; unsigned long deadline ; struct list_head timeout_list ; unsigned int timeout ; int retries ; rq_end_io_fn *end_io ; void *end_io_data ; struct request *next_rq ; }; struct elevator_type; typedef int elevator_merge_fn(struct request_queue * , struct request ** , struct bio * ); typedef void elevator_merge_req_fn(struct request_queue * , struct request * , struct request * ); typedef void elevator_merged_fn(struct request_queue * , struct request * , int ); typedef int elevator_allow_merge_fn(struct request_queue * , struct request * , struct bio * ); typedef void elevator_bio_merged_fn(struct request_queue * , struct request * , struct bio * ); typedef int elevator_dispatch_fn(struct request_queue * , int ); typedef void elevator_add_req_fn(struct request_queue * , struct request * ); typedef struct request *elevator_request_list_fn(struct request_queue * , struct request * ); typedef void elevator_completed_req_fn(struct request_queue * , struct request * ); typedef int elevator_may_queue_fn(struct request_queue * , int ); typedef void elevator_init_icq_fn(struct io_cq * ); typedef void elevator_exit_icq_fn(struct io_cq * ); typedef int elevator_set_req_fn(struct request_queue * , struct request * , struct bio * , gfp_t ); typedef void elevator_put_req_fn(struct request * ); typedef void elevator_activate_req_fn(struct request_queue * , struct request * ); typedef void elevator_deactivate_req_fn(struct request_queue * , struct request * ); typedef int elevator_init_fn(struct request_queue * , struct elevator_type * ); typedef void elevator_exit_fn(struct elevator_queue * ); typedef void elevator_registered_fn(struct request_queue * ); struct elevator_ops { elevator_merge_fn *elevator_merge_fn ; elevator_merged_fn *elevator_merged_fn ; elevator_merge_req_fn *elevator_merge_req_fn ; elevator_allow_merge_fn *elevator_allow_merge_fn ; elevator_bio_merged_fn *elevator_bio_merged_fn ; elevator_dispatch_fn *elevator_dispatch_fn ; elevator_add_req_fn *elevator_add_req_fn ; elevator_activate_req_fn *elevator_activate_req_fn ; elevator_deactivate_req_fn *elevator_deactivate_req_fn ; elevator_completed_req_fn *elevator_completed_req_fn ; elevator_request_list_fn *elevator_former_req_fn ; elevator_request_list_fn *elevator_latter_req_fn ; elevator_init_icq_fn *elevator_init_icq_fn ; elevator_exit_icq_fn *elevator_exit_icq_fn ; elevator_set_req_fn *elevator_set_req_fn ; elevator_put_req_fn *elevator_put_req_fn ; elevator_may_queue_fn *elevator_may_queue_fn ; elevator_init_fn *elevator_init_fn ; elevator_exit_fn *elevator_exit_fn ; elevator_registered_fn *elevator_registered_fn ; }; struct elv_fs_entry { struct attribute attr ; ssize_t (*show)(struct elevator_queue * , char * ) ; ssize_t (*store)(struct elevator_queue * , char const * , size_t ) ; }; struct elevator_type { struct kmem_cache *icq_cache ; struct elevator_ops ops ; size_t icq_size ; size_t icq_align ; struct elv_fs_entry *elevator_attrs ; char elevator_name[16U] ; struct module *elevator_owner ; char icq_cache_name[21U] ; struct list_head list ; }; struct elevator_queue { struct elevator_type *type ; void *elevator_data ; struct kobject kobj ; struct mutex sysfs_lock ; unsigned char registered : 1 ; struct hlist_head hash[64U] ; }; typedef void request_fn_proc(struct request_queue * ); typedef void make_request_fn(struct request_queue * , struct bio * ); typedef int prep_rq_fn(struct request_queue * , struct request * ); typedef void unprep_rq_fn(struct request_queue * , struct request * ); struct bvec_merge_data { struct block_device *bi_bdev ; sector_t bi_sector ; unsigned int bi_size ; unsigned long bi_rw ; }; typedef int merge_bvec_fn(struct request_queue * , struct bvec_merge_data * , struct bio_vec * ); typedef void softirq_done_fn(struct request * ); typedef int dma_drain_needed_fn(struct request * ); typedef int lld_busy_fn(struct request_queue * ); typedef int bsg_job_fn(struct bsg_job * ); enum blk_eh_timer_return { BLK_EH_NOT_HANDLED = 0, BLK_EH_HANDLED = 1, BLK_EH_RESET_TIMER = 2 } ; typedef enum blk_eh_timer_return rq_timed_out_fn(struct request * ); struct blk_queue_tag { struct request **tag_index ; unsigned long *tag_map ; int busy ; int max_depth ; int real_max_depth ; atomic_t refcnt ; int alloc_policy ; int next_tag ; }; struct queue_limits { unsigned long bounce_pfn ; unsigned long seg_boundary_mask ; unsigned int max_hw_sectors ; unsigned int chunk_sectors ; unsigned int max_sectors ; unsigned int max_segment_size ; unsigned int physical_block_size ; unsigned int alignment_offset ; unsigned int io_min ; unsigned int io_opt ; unsigned int max_discard_sectors ; unsigned int max_write_same_sectors ; unsigned int discard_granularity ; unsigned int discard_alignment ; unsigned short logical_block_size ; unsigned short max_segments ; unsigned short max_integrity_segments ; unsigned char misaligned ; unsigned char discard_misaligned ; unsigned char cluster ; unsigned char discard_zeroes_data ; unsigned char raid_partial_stripes_expensive ; }; struct blk_mq_ops; struct blk_mq_hw_ctx; struct throtl_data; struct blk_mq_tag_set; struct request_queue { struct list_head queue_head ; struct request *last_merge ; struct elevator_queue *elevator ; int nr_rqs[2U] ; int nr_rqs_elvpriv ; struct request_list root_rl ; request_fn_proc *request_fn ; make_request_fn *make_request_fn ; prep_rq_fn *prep_rq_fn ; unprep_rq_fn *unprep_rq_fn ; merge_bvec_fn *merge_bvec_fn ; softirq_done_fn *softirq_done_fn ; rq_timed_out_fn *rq_timed_out_fn ; dma_drain_needed_fn *dma_drain_needed ; lld_busy_fn *lld_busy_fn ; struct blk_mq_ops *mq_ops ; unsigned int *mq_map ; struct blk_mq_ctx *queue_ctx ; unsigned int nr_queues ; struct blk_mq_hw_ctx **queue_hw_ctx ; unsigned int nr_hw_queues ; sector_t end_sector ; struct request *boundary_rq ; struct delayed_work delay_work ; struct backing_dev_info backing_dev_info ; void *queuedata ; unsigned long queue_flags ; int id ; gfp_t bounce_gfp ; spinlock_t __queue_lock ; spinlock_t *queue_lock ; struct kobject kobj ; struct kobject mq_kobj ; struct device *dev ; int rpm_status ; unsigned int nr_pending ; unsigned long nr_requests ; unsigned int nr_congestion_on ; unsigned int nr_congestion_off ; unsigned int nr_batching ; unsigned int dma_drain_size ; void *dma_drain_buffer ; unsigned int dma_pad_mask ; unsigned int dma_alignment ; struct blk_queue_tag *queue_tags ; struct list_head tag_busy_list ; unsigned int nr_sorted ; unsigned int in_flight[2U] ; unsigned int request_fn_active ; unsigned int rq_timeout ; struct timer_list timeout ; struct list_head timeout_list ; struct list_head icq_list ; unsigned long blkcg_pols[1U] ; struct blkcg_gq *root_blkg ; struct list_head blkg_list ; struct queue_limits limits ; unsigned int sg_timeout ; unsigned int sg_reserved_size ; int node ; struct blk_trace *blk_trace ; unsigned int flush_flags ; unsigned char flush_not_queueable : 1 ; struct blk_flush_queue *fq ; struct list_head requeue_list ; spinlock_t requeue_lock ; struct work_struct requeue_work ; struct mutex sysfs_lock ; int bypass_depth ; atomic_t mq_freeze_depth ; bsg_job_fn *bsg_job_fn ; int bsg_job_size ; struct bsg_class_device bsg_dev ; struct throtl_data *td ; struct callback_head callback_head ; wait_queue_head_t mq_freeze_wq ; struct percpu_ref mq_usage_counter ; struct list_head all_q_node ; struct blk_mq_tag_set *tag_set ; struct list_head tag_set_list ; }; struct blk_plug { struct list_head list ; struct list_head mq_list ; struct list_head cb_list ; }; struct blk_integrity_iter { void *prot_buf ; void *data_buf ; sector_t seed ; unsigned int data_size ; unsigned short interval ; char const *disk_name ; }; typedef int integrity_processing_fn(struct blk_integrity_iter * ); struct blk_integrity { integrity_processing_fn *generate_fn ; integrity_processing_fn *verify_fn ; unsigned short flags ; unsigned short tuple_size ; unsigned short interval ; unsigned short tag_size ; char const *name ; struct kobject kobj ; }; struct block_device_operations { int (*open)(struct block_device * , fmode_t ) ; void (*release)(struct gendisk * , fmode_t ) ; int (*rw_page)(struct block_device * , sector_t , struct page * , int ) ; int (*ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; long (*direct_access)(struct block_device * , sector_t , void ** , unsigned long * , long ) ; unsigned int (*check_events)(struct gendisk * , unsigned int ) ; int (*media_changed)(struct gendisk * ) ; void (*unlock_native_capacity)(struct gendisk * ) ; int (*revalidate_disk)(struct gendisk * ) ; int (*getgeo)(struct block_device * , struct hd_geometry * ) ; void (*swap_slot_free_notify)(struct block_device * , unsigned long ) ; struct module *owner ; }; struct blk_mq_tags; struct blk_mq_cpu_notifier { struct list_head list ; void *data ; int (*notify)(void * , unsigned long , unsigned int ) ; }; struct blk_align_bitmap; struct blk_mq_ctxmap { unsigned int size ; unsigned int bits_per_word ; struct blk_align_bitmap *map ; }; struct __anonstruct____missing_field_name_413 { spinlock_t lock ; struct list_head dispatch ; }; struct blk_mq_hw_ctx { struct __anonstruct____missing_field_name_413 __annonCompField129 ; unsigned long state ; struct delayed_work run_work ; struct delayed_work delay_work ; cpumask_var_t cpumask ; int next_cpu ; int next_cpu_batch ; unsigned long flags ; struct request_queue *queue ; struct blk_flush_queue *fq ; void *driver_data ; struct blk_mq_ctxmap ctx_map ; unsigned int nr_ctx ; struct blk_mq_ctx **ctxs ; atomic_t wait_index ; struct blk_mq_tags *tags ; unsigned long queued ; unsigned long run ; unsigned long dispatched[10U] ; unsigned int numa_node ; unsigned int queue_num ; atomic_t nr_active ; struct blk_mq_cpu_notifier cpu_notifier ; struct kobject kobj ; }; struct blk_mq_tag_set { struct blk_mq_ops *ops ; unsigned int nr_hw_queues ; unsigned int queue_depth ; unsigned int reserved_tags ; unsigned int cmd_size ; int numa_node ; unsigned int timeout ; unsigned int flags ; void *driver_data ; struct blk_mq_tags **tags ; struct mutex tag_list_lock ; struct list_head tag_list ; }; struct blk_mq_queue_data { struct request *rq ; struct list_head *list ; bool last ; }; typedef int queue_rq_fn(struct blk_mq_hw_ctx * , struct blk_mq_queue_data const * ); typedef struct blk_mq_hw_ctx *map_queue_fn(struct request_queue * , int const ); typedef enum blk_eh_timer_return timeout_fn(struct request * , bool ); typedef int init_hctx_fn(struct blk_mq_hw_ctx * , void * , unsigned int ); typedef void exit_hctx_fn(struct blk_mq_hw_ctx * , unsigned int ); typedef int init_request_fn(void * , struct request * , unsigned int , unsigned int , unsigned int ); typedef void exit_request_fn(void * , struct request * , unsigned int , unsigned int ); struct blk_mq_ops { queue_rq_fn *queue_rq ; map_queue_fn *map_queue ; timeout_fn *timeout ; softirq_done_fn *complete ; init_hctx_fn *init_hctx ; exit_hctx_fn *exit_hctx ; init_request_fn *init_request ; exit_request_fn *exit_request ; }; void __builtin_prefetch(void const * , ...) ; __inline static long ldv__builtin_expect(long exp , long c ) ; extern struct module __this_module ; __inline static void set_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return; } } __inline static void clear_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr)); return; } } __inline static int test_and_set_bit(long nr , unsigned long volatile *addr ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %2, %0; setc %1": "+m" (*addr), "=qm" (c): "Ir" (nr): "memory"); return ((int )((signed char )c) != 0); } } __inline static int test_and_set_bit_lock(long nr , unsigned long volatile *addr ) { int tmp ; { tmp = test_and_set_bit(nr, addr); return (tmp); } } __inline static int test_and_clear_bit(long nr , unsigned long volatile *addr ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %2, %0; setc %1": "+m" (*addr), "=qm" (c): "Ir" (nr): "memory"); return ((int )((signed char )c) != 0); } } __inline static int constant_test_bit(long nr , unsigned long const volatile *addr ) { { return ((int )((unsigned long )*(addr + (unsigned long )(nr >> 6)) >> ((int )nr & 63)) & 1); } } __inline static int variable_test_bit(long nr , unsigned long const volatile *addr ) { int oldbit ; { __asm__ volatile ("bt %2,%1\n\tsbb %0,%0": "=r" (oldbit): "m" (*((unsigned long *)addr)), "Ir" (nr)); return (oldbit); } } __inline static int ffs(int x ) { int r ; { __asm__ ("bsfl %1,%0": "=r" (r): "rm" (x), "0" (-1)); return (r + 1); } } extern unsigned long find_next_bit(unsigned long const * , unsigned long , unsigned long ) ; extern unsigned long find_first_bit(unsigned long const * , unsigned long ) ; extern unsigned long find_first_zero_bit(unsigned long const * , unsigned long ) ; __inline static __u16 __fswab16(__u16 val ) { { return ((__u16 )((int )((short )((int )val << 8)) | (int )((short )((int )val >> 8)))); } } extern unsigned long find_last_bit(unsigned long const * , unsigned long ) ; extern int printk(char const * , ...) ; extern void __dynamic_netdev_dbg(struct _ddebug * , struct net_device const * , char const * , ...) ; extern void print_hex_dump(char const * , char const * , int , int , int , void const * , size_t , bool ) ; extern int snprintf(char * , size_t , char const * , ...) ; extern enum system_states system_state ; void *ldv_err_ptr(long error ) ; void ldv_spin_lock(void) ; void ldv_spin_unlock(void) ; extern void *malloc(size_t ) ; extern void *calloc(size_t , size_t ) ; extern void *memset(void * , int , size_t ) ; extern int __VERIFIER_nondet_int(void) ; extern unsigned long __VERIFIER_nondet_ulong(void) ; extern void *__VERIFIER_nondet_pointer(void) ; extern void __VERIFIER_assume(int ) ; void *ldv_malloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = malloc(size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_zalloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_init_zalloc(size_t size ) { void *p ; void *tmp ; { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } void *ldv_memset(void *s , int c , size_t n ) { void *tmp ; { tmp = memset(s, c, n); return (tmp); } } int ldv_undef_int(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); return (tmp); } } void *ldv_undef_ptr(void) { void *tmp ; { tmp = __VERIFIER_nondet_pointer(); return (tmp); } } unsigned long ldv_undef_ulong(void) { unsigned long tmp ; { tmp = __VERIFIER_nondet_ulong(); return (tmp); } } __inline static void ldv_stop(void) { { LDV_STOP: ; goto LDV_STOP; } } __inline static long ldv__builtin_expect(long exp , long c ) { { return (exp); } } __inline static void INIT_LIST_HEAD(struct list_head *list ) { { list->next = list; list->prev = list; return; } } __inline static int hlist_empty(struct hlist_head const *h ) { { return ((unsigned long )h->first == (unsigned long )((struct hlist_node */* const */)0)); } } __inline static void __hlist_del(struct hlist_node *n ) { struct hlist_node *next ; struct hlist_node **pprev ; { next = n->next; pprev = n->pprev; *pprev = next; if ((unsigned long )next != (unsigned long )((struct hlist_node *)0)) { next->pprev = pprev; } else { } return; } } __inline static void hlist_del(struct hlist_node *n ) { { __hlist_del(n); n->next = (struct hlist_node *)-2401263026317557504L; n->pprev = (struct hlist_node **)-2401263026316508672L; return; } } extern void __bad_percpu_size(void) ; extern void __bad_size_call_parameter(void) ; extern unsigned long __per_cpu_offset[8192U] ; extern void warn_slowpath_fmt(char const * , int const , char const * , ...) ; extern void warn_slowpath_null(char const * , int const ) ; extern unsigned long __phys_addr(unsigned long ) ; extern void *memcpy(void * , void const * , size_t ) ; extern void *memset(void * , int , size_t ) ; extern char *strcpy(char * , char const * ) ; extern size_t strlcpy(char * , char const * , size_t ) ; extern int __bitmap_weight(unsigned long const * , unsigned int ) ; __inline static int bitmap_weight(unsigned long const *src , unsigned int nbits ) { int tmp___0 ; { tmp___0 = __bitmap_weight(src, nbits); return (tmp___0); } } extern int nr_cpu_ids ; extern struct cpumask const * const cpu_possible_mask ; extern struct cpumask const * const cpu_online_mask ; __inline static unsigned int cpumask_check(unsigned int cpu ) { bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; { __ret_warn_once = (unsigned int )nr_cpu_ids <= cpu; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/cpumask.h", 117); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return (cpu); } } __inline static unsigned int cpumask_next(int n , struct cpumask const *srcp ) { unsigned long tmp ; { if (n != -1) { cpumask_check((unsigned int )n); } else { } tmp = find_next_bit((unsigned long const *)(& srcp->bits), (unsigned long )nr_cpu_ids, (unsigned long )(n + 1)); return ((unsigned int )tmp); } } __inline static unsigned int cpumask_weight(struct cpumask const *srcp ) { int tmp ; { tmp = bitmap_weight((unsigned long const *)(& srcp->bits), (unsigned int )nr_cpu_ids); return ((unsigned int )tmp); } } __inline static void *ERR_PTR(long error ) ; __inline static void prefetchw(void const *x ) { { __asm__ volatile ("661:\n\tprefetcht0 %P1\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 6*32+ 8)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\tprefetchw %P1\n6651:\n\t.popsection": : "i" (0), "m" (*((char const *)x))); return; } } extern void __cmpxchg_wrong_size(void) ; __inline static int atomic_read(atomic_t const *v ) { int __var ; { __var = 0; return ((int )*((int const volatile *)(& v->counter))); } } __inline static void atomic_set(atomic_t *v , int i ) { { v->counter = i; return; } } __inline static void atomic_inc(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; incl %0": "+m" (v->counter)); return; } } __inline static int atomic_cmpxchg(atomic_t *v , int old , int new ) { int __ret ; int __old ; int __new ; u8 volatile *__ptr ; u16 volatile *__ptr___0 ; u32 volatile *__ptr___1 ; u64 volatile *__ptr___2 ; { __old = old; __new = new; switch (4UL) { case 1UL: __ptr = (u8 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgb %2,%1": "=a" (__ret), "+m" (*__ptr): "q" (__new), "0" (__old): "memory"); goto ldv_5783; case 2UL: __ptr___0 = (u16 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgw %2,%1": "=a" (__ret), "+m" (*__ptr___0): "r" (__new), "0" (__old): "memory"); goto ldv_5783; case 4UL: __ptr___1 = (u32 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgl %2,%1": "=a" (__ret), "+m" (*__ptr___1): "r" (__new), "0" (__old): "memory"); goto ldv_5783; case 8UL: __ptr___2 = (u64 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgq %2,%1": "=a" (__ret), "+m" (*__ptr___2): "r" (__new), "0" (__old): "memory"); goto ldv_5783; default: __cmpxchg_wrong_size(); } ldv_5783: ; return (__ret); } } extern int __preempt_count ; __inline static int preempt_count(void) { int pfo_ret__ ; { switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (__preempt_count)); goto ldv_6106; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6106; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6106; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6106; default: __bad_percpu_size(); } ldv_6106: ; return (pfo_ret__ & 2147483647); } } __inline static void __preempt_count_add(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (val)); } goto ldv_6163; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6163; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6163; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (val)); } goto ldv_6163; default: __bad_percpu_size(); } ldv_6163: ; return; } } __inline static void __preempt_count_sub(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (- val)); } goto ldv_6175; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6175; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6175; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (- val)); } goto ldv_6175; default: __bad_percpu_size(); } ldv_6175: ; return; } } extern void __local_bh_disable_ip(unsigned long , unsigned int ) ; __inline static void local_bh_disable(void) { { __local_bh_disable_ip(0UL, 512U); return; } } extern void __local_bh_enable_ip(unsigned long , unsigned int ) ; __inline static void local_bh_enable(void) { { __local_bh_enable_ip(0UL, 512U); return; } } extern void lockdep_init_map(struct lockdep_map * , char const * , struct lock_class_key * , int ) ; extern void lock_acquire(struct lockdep_map * , unsigned int , int , int , int , struct lockdep_map * , unsigned long ) ; extern void lock_release(struct lockdep_map * , int , unsigned long ) ; extern void lockdep_rcu_suspicious(char const * , int const , char const * ) ; extern void __raw_spin_lock_init(raw_spinlock_t * , char const * , struct lock_class_key * ) ; extern void _raw_spin_lock(raw_spinlock_t * ) ; extern void _raw_spin_unlock(raw_spinlock_t * ) ; __inline static raw_spinlock_t *spinlock_check(spinlock_t *lock ) { { return (& lock->__annonCompField18.rlock); } } __inline static void ldv_spin_lock_5(spinlock_t *lock ) { { _raw_spin_lock(& lock->__annonCompField18.rlock); return; } } __inline static void spin_lock(spinlock_t *lock ) ; __inline static void ldv_spin_unlock_9(spinlock_t *lock ) { { _raw_spin_unlock(& lock->__annonCompField18.rlock); return; } } __inline static void spin_unlock(spinlock_t *lock ) ; extern unsigned long volatile jiffies ; __inline static void __rcu_read_lock(void) { { __preempt_count_add(1); __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock(void) { { __asm__ volatile ("": : : "memory"); __preempt_count_sub(1); return; } } extern bool rcu_is_watching(void) ; __inline static void rcu_lock_acquire(struct lockdep_map *map ) { { lock_acquire(map, 0U, 0, 2, 0, (struct lockdep_map *)0, 0UL); return; } } __inline static void rcu_lock_release(struct lockdep_map *map ) { { lock_release(map, 1, 0UL); return; } } extern struct lockdep_map rcu_lock_map ; extern int debug_lockdep_rcu_enabled(void) ; __inline static void rcu_read_lock(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { __rcu_read_lock(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 849, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 900, "rcu_read_unlock() used illegally while idle"); } else { } } else { } __rcu_read_unlock(); rcu_lock_release(& rcu_lock_map); return; } } extern int mod_timer(struct timer_list * , unsigned long ) ; int ldv_mod_timer_51(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; int ldv_mod_timer_58(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; extern int del_timer_sync(struct timer_list * ) ; int ldv_del_timer_sync_52(struct timer_list *ldv_func_arg1 ) ; extern void __init_work(struct work_struct * , int ) ; extern struct workqueue_struct *system_wq ; extern bool queue_work_on(int , struct workqueue_struct * , struct work_struct * ) ; bool ldv_queue_work_on_15(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_17(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; extern bool queue_delayed_work_on(int , struct workqueue_struct * , struct delayed_work * , unsigned long ) ; bool ldv_queue_delayed_work_on_16(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_19(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; extern void flush_workqueue(struct workqueue_struct * ) ; void ldv_flush_workqueue_18(struct workqueue_struct *ldv_func_arg1 ) ; extern bool cancel_work_sync(struct work_struct * ) ; bool ldv_cancel_work_sync_61(struct work_struct *ldv_func_arg1 ) ; __inline static bool queue_work(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_15(8192, wq, work); return (tmp); } } __inline static bool schedule_work(struct work_struct *work ) { bool tmp ; { tmp = queue_work(system_wq, work); return (tmp); } } extern pg_data_t *node_data[] ; __inline static unsigned int readl(void const volatile *addr ) { unsigned int ret ; { __asm__ volatile ("movl %1,%0": "=r" (ret): "m" (*((unsigned int volatile *)addr)): "memory"); return (ret); } } __inline static void writel(unsigned int val , void volatile *addr ) { { __asm__ volatile ("movl %0,%1": : "r" (val), "m" (*((unsigned int volatile *)addr)): "memory"); return; } } extern void *ioremap_nocache(resource_size_t , unsigned long ) ; __inline static void *ioremap(resource_size_t offset , unsigned long size ) { void *tmp ; { tmp = ioremap_nocache(offset, size); return (tmp); } } extern void iounmap(void volatile * ) ; extern int cpu_number ; extern int numa_node ; __inline static int numa_node_id(void) { int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (numa_node)); goto ldv_13675; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (numa_node)); goto ldv_13675; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (numa_node)); goto ldv_13675; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (numa_node)); goto ldv_13675; default: __bad_percpu_size(); } ldv_13675: pscr_ret__ = pfo_ret__; goto ldv_13681; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (numa_node)); goto ldv_13685; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (numa_node)); goto ldv_13685; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (numa_node)); goto ldv_13685; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (numa_node)); goto ldv_13685; default: __bad_percpu_size(); } ldv_13685: pscr_ret__ = pfo_ret_____0; goto ldv_13681; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (numa_node)); goto ldv_13694; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (numa_node)); goto ldv_13694; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (numa_node)); goto ldv_13694; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (numa_node)); goto ldv_13694; default: __bad_percpu_size(); } ldv_13694: pscr_ret__ = pfo_ret_____1; goto ldv_13681; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (numa_node)); goto ldv_13703; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (numa_node)); goto ldv_13703; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (numa_node)); goto ldv_13703; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (numa_node)); goto ldv_13703; default: __bad_percpu_size(); } ldv_13703: pscr_ret__ = pfo_ret_____2; goto ldv_13681; default: __bad_size_call_parameter(); goto ldv_13681; } ldv_13681: ; return (pscr_ret__); } } __inline static int numa_mem_id(void) { int tmp ; { tmp = numa_node_id(); return (tmp); } } __inline static int gfp_zonelist(gfp_t flags ) { long tmp ; { tmp = ldv__builtin_expect((flags & 262144U) != 0U, 0L); if (tmp != 0L) { return (1); } else { } return (0); } } __inline static struct zonelist *node_zonelist(int nid , gfp_t flags ) { int tmp ; { tmp = gfp_zonelist(flags); return ((struct zonelist *)(& (node_data[nid])->node_zonelists) + (unsigned long )tmp); } } extern struct page *__alloc_pages_nodemask(gfp_t , unsigned int , struct zonelist * , nodemask_t * ) ; __inline static struct page *__alloc_pages(gfp_t gfp_mask , unsigned int order , struct zonelist *zonelist ) { struct page *tmp ; { tmp = __alloc_pages_nodemask(gfp_mask, order, zonelist, (nodemask_t *)0); return (tmp); } } __inline static struct page *alloc_pages_node(int nid , gfp_t gfp_mask , unsigned int order ) { struct zonelist *tmp ; struct page *tmp___0 ; { if (nid < 0) { nid = numa_node_id(); } else { } tmp = node_zonelist(nid, gfp_mask); tmp___0 = __alloc_pages(gfp_mask, order, tmp); return (tmp___0); } } extern void __free_pages(struct page * , unsigned int ) ; __inline static char const *kobject_name(struct kobject const *kobj ) { { return ((char const *)kobj->name); } } extern void kfree(void const * ) ; void *ldv_kmem_cache_alloc_25(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_42(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; __inline static void *kcalloc(size_t n , size_t size , gfp_t flags ) ; __inline static void *kzalloc(size_t size , gfp_t flags ) ; void ldv_check_alloc_flags(gfp_t flags ) ; void ldv_check_alloc_nonatomic(void) ; struct work_struct *ldv_work_struct_9_2 ; int ldv_state_variable_20 ; struct ixgbe_hw *mbx_ops_generic_group0 ; struct ethtool_eeprom *ixgbe_ethtool_ops_group2 ; int ldv_irq_5_1 = 0; struct timer_list *ldv_timer_list_10_2 ; int ldv_irq_line_4_2 ; int ldv_irq_line_7_1 ; int ldv_irq_3_2 = 0; int ldv_work_9_3 ; int ldv_irq_6_1 = 0; int ldv_state_variable_14 ; int ldv_irq_line_6_2 ; int ldv_state_variable_37 ; int ldv_state_variable_17 ; void *ldv_irq_data_6_0 ; void *ldv_irq_data_2_3 ; int ldv_state_variable_19 ; int ldv_state_variable_27 ; int ldv_state_variable_9 ; int ldv_irq_line_7_2 ; struct inode *ixgbe_dbg_netdev_ops_fops_group1 ; int ldv_irq_7_3 = 0; struct pci_dev *ixgbe_driver_group1 ; void *ldv_irq_data_2_2 ; int ldv_state_variable_7 ; struct ixgbe_hw *eeprom_ops_82599_group0 ; struct ixgbe_hw *mac_ops_82599_group0 ; int ldv_irq_1_3 = 0; int ldv_irq_line_2_2 ; void *ldv_irq_data_5_2 ; struct pci_dev *ixgbe_err_handler_group0 ; void *ldv_irq_data_1_0 ; void *ldv_irq_data_3_0 ; int ldv_state_variable_26 ; struct file *ixgbe_dbg_reg_ops_fops_group2 ; int ldv_state_variable_28 ; struct ixgbe_hw *mac_ops_82598_group0 ; int ldv_timer_10_2 ; int LDV_IN_INTERRUPT = 1; int ldv_irq_1_1 = 0; void *ldv_irq_data_7_2 ; int ldv_work_8_3 ; int ldv_irq_line_3_1 ; struct net_device *ixgbe_netdev_ops_group1 ; int ldv_state_variable_31 ; struct ixgbe_hw *eeprom_ops_82598_group0 ; struct ethtool_cmd *ixgbe_ethtool_ops_group1 ; int ldv_irq_4_1 = 0; int ldv_state_variable_8 ; int ldv_state_variable_15 ; int ldv_irq_line_5_0 ; int ldv_irq_line_7_3 ; int ldv_work_8_0 ; int ldv_state_variable_21 ; int ldv_state_variable_33 ; struct work_struct *ldv_work_struct_8_0 ; void *ldv_irq_data_4_0 ; int ldv_irq_line_6_3 ; int ldv_irq_4_0 = 0; int ldv_irq_2_2 = 0; int ldv_irq_line_2_0 ; int ldv_irq_line_4_0 ; int ldv_irq_line_6_1 ; int ldv_irq_line_3_0 ; void *ldv_irq_data_7_0 ; void *ldv_irq_data_6_1 ; int ldv_timer_10_0 ; void *ldv_irq_data_3_3 ; int ldv_irq_line_3_2 ; struct net_device *ixgbe_ethtool_ops_group6 ; int ldv_state_variable_10 ; int ldv_irq_1_0 = 0; struct net_device *dcbnl_ops_group0 ; int ldv_irq_line_2_1 ; void *ldv_irq_data_6_2 ; int ldv_state_variable_2 ; int ldv_state_variable_25 ; int ldv_timer_10_1 ; void *ldv_irq_data_2_0 ; struct ethtool_rxnfc *ixgbe_ethtool_ops_group7 ; int ldv_state_variable_11 ; int ldv_irq_1_2 = 0; int ldv_irq_4_3 = 0; int ldv_state_variable_18 ; struct ethtool_pauseparam *ixgbe_ethtool_ops_group3 ; struct timer_list *ldv_timer_list_10_3 ; int ldv_irq_6_0 = 0; int ldv_irq_line_4_1 ; struct work_struct *ldv_work_struct_9_1 ; struct ixgbe_hw *eeprom_ops_X550_group0 ; int ldv_irq_line_5_3 ; int ldv_state_variable_32 ; struct ixgbe_hw *phy_ops_X550EM_x_group0 ; struct ieee_ets *dcbnl_ops_group3 ; int pci_counter ; int ldv_irq_line_6_0 ; int ldv_irq_7_2 = 0; int ldv_state_variable_30 ; int ldv_work_8_1 ; int ldv_state_variable_0 ; void *ldv_irq_data_5_3 ; int ldv_irq_2_0 = 0; int ldv_state_variable_12 ; struct ixgbe_hw *mac_ops_X550EM_x_group0 ; int ldv_irq_line_4_3 ; int ldv_state_variable_22 ; int ldv_state_variable_29 ; struct ethtool_wolinfo *ixgbe_ethtool_ops_group8 ; struct work_struct *ldv_work_struct_8_1 ; int ldv_work_9_0 ; struct ixgbe_hw *mac_ops_X540_group0 ; int ref_cnt ; int ldv_irq_line_1_1 ; struct file *ixgbe_dbg_netdev_ops_fops_group2 ; int ldv_irq_6_3 = 0; struct work_struct *ldv_work_struct_8_3 ; int ldv_state_variable_23 ; int ldv_irq_5_2 = 0; struct timer_list *ldv_timer_list_10_0 ; int ldv_irq_2_1 = 0; int ldv_irq_3_0 = 0; void *ldv_irq_data_2_1 ; struct ixgbe_hw *phy_ops_X540_group0 ; int ldv_state_variable_6 ; void *ldv_irq_data_7_1 ; void *ldv_irq_data_1_3 ; void *ldv_irq_data_5_0 ; struct ixgbe_hw *mac_ops_X550_group0 ; int ldv_irq_7_1 = 0; void *ldv_irq_data_4_1 ; int ldv_state_variable_38 ; struct ethtool_channels *ixgbe_ethtool_ops_group4 ; int ldv_state_variable_39 ; struct dcb_app *dcbnl_ops_group1 ; struct timer_list *ldv_timer_list_10_1 ; int ldv_state_variable_3 ; int ldv_irq_line_1_0 ; void *ldv_irq_data_3_2 ; void *ldv_irq_data_6_3 ; int ldv_state_variable_4 ; struct work_struct *ldv_work_struct_9_0 ; struct work_struct *ldv_work_struct_9_3 ; int ldv_irq_line_3_3 ; int ldv_state_variable_36 ; int ldv_work_9_2 ; int ldv_work_9_1 ; int ldv_state_variable_5 ; struct ixgbe_hw *phy_ops_X550_group0 ; int ldv_state_variable_13 ; int ldv_irq_3_1 = 0; int ldv_irq_line_7_0 ; int ldv_irq_line_5_2 ; void *ldv_irq_data_4_3 ; int ldv_irq_4_2 = 0; int ldv_state_variable_24 ; int ldv_irq_6_2 = 0; int ldv_state_variable_1 ; int ldv_irq_line_1_2 ; int ldv_irq_line_2_3 ; void *ldv_irq_data_1_1 ; struct ixgbe_hw *phy_ops_82598_group0 ; void *ldv_irq_data_4_2 ; struct ixgbe_hw *eeprom_ops_X550EM_x_group0 ; void *ldv_irq_data_3_1 ; void *ldv_irq_data_5_1 ; struct work_struct *ldv_work_struct_8_2 ; struct ixgbe_hw *eeprom_ops_X540_group0 ; struct ethtool_coalesce *ixgbe_ethtool_ops_group5 ; int ldv_state_variable_16 ; struct inode *ixgbe_dbg_reg_ops_fops_group1 ; void *ldv_irq_data_1_2 ; int ldv_irq_5_3 = 0; int ldv_irq_line_5_1 ; int ldv_irq_7_0 = 0; int ldv_irq_2_3 = 0; int ldv_irq_line_1_3 ; int ldv_irq_5_0 = 0; int ldv_work_8_2 ; struct ethtool_ringparam *ixgbe_ethtool_ops_group0 ; int ldv_state_variable_34 ; int ldv_timer_10_3 ; struct ieee_pfc *dcbnl_ops_group2 ; int ldv_irq_3_3 = 0; void *ldv_irq_data_7_3 ; int ldv_state_variable_35 ; struct ixgbe_hw *phy_ops_82599_group0 ; void activate_suitable_irq_4(int line , void *data ) ; int ldv_irq_3(int state , int line , void *data ) ; void disable_suitable_irq_2(int line , void *data ) ; void disable_suitable_irq_7(int line , void *data ) ; void activate_suitable_irq_3(int line , void *data ) ; void ldv_net_device_ops_38(void) ; int reg_check_1(irqreturn_t (*handler)(int , void * ) ) ; void choose_interrupt_4(void) ; void work_init_9(void) ; void invoke_work_8(void) ; void ldv_initialize_ixgbe_phy_operations_16(void) ; int reg_timer_10(struct timer_list *timer , void (*function)(unsigned long ) , unsigned long data ) ; void ldv_initialize_ixgbe_mbx_operations_26(void) ; void ldv_initialize_dcbnl_rtnl_ops_13(void) ; void ldv_timer_10(int state , struct timer_list *timer ) ; void ldv_initialize_ixgbe_eeprom_operations_24(void) ; void ldv_file_operations_12(void) ; void ldv_initialize_ixgbe_phy_operations_28(void) ; void activate_suitable_irq_2(int line , void *data ) ; void work_init_8(void) ; void activate_suitable_timer_10(struct timer_list *timer , unsigned long data ) ; void ldv_initialize_ethtool_ops_35(void) ; void call_and_disable_all_9(int state ) ; void choose_interrupt_1(void) ; int reg_check_2(irqreturn_t (*handler)(int , void * ) ) ; void ldv_initialize_ixgbe_eeprom_operations_18(void) ; void call_and_disable_work_8(struct work_struct *work ) ; void activate_suitable_irq_7(int line , void *data ) ; int reg_check_3(irqreturn_t (*handler)(int , void * ) ) ; void ldv_initialize_ixgbe_mac_operations_30(void) ; void disable_work_8(struct work_struct *work ) ; void activate_work_9(struct work_struct *work , int state ) ; int reg_check_7(irqreturn_t (*handler)(int , void * ) ) ; int ldv_irq_4(int state , int line , void *data ) ; void ldv_initialize_ixgbe_mac_operations_34(void) ; void activate_pending_timer_10(struct timer_list *timer , unsigned long data , int pending_flag ) ; void ldv_initialize_ixgbe_eeprom_operations_29(void) ; void ldv_initialize_ixgbe_eeprom_operations_19(void) ; void call_and_disable_all_8(int state ) ; void ldv_initialize_ixgbe_phy_operations_23(void) ; void disable_suitable_irq_1(int line , void *data ) ; void ldv_initialize_ixgbe_phy_operations_17(void) ; void activate_suitable_irq_1(int line , void *data ) ; void ldv_initialize_ixgbe_phy_operations_32(void) ; int reg_check_4(irqreturn_t (*handler)(int , void * ) ) ; void ldv_initialize_ixgbe_mac_operations_21(void) ; void activate_work_8(struct work_struct *work , int state ) ; int ldv_irq_2(int state , int line , void *data ) ; void ldv_pci_driver_36(void) ; void ldv_initialize_ixgbe_eeprom_operations_33(void) ; void choose_interrupt_2(void) ; void ldv_initialize_pci_error_handlers_37(void) ; void disable_suitable_timer_10(struct timer_list *timer ) ; void disable_work_9(struct work_struct *work ) ; void ldv_initialize_ixgbe_mac_operations_25(void) ; void disable_suitable_irq_4(int line , void *data ) ; void ldv_initialize_ixgbe_mac_operations_20(void) ; void disable_suitable_irq_3(int line , void *data ) ; void timer_init_10(void) ; int ldv_irq_1(int state , int line , void *data ) ; void choose_timer_10(void) ; void choose_interrupt_3(void) ; void ldv_file_operations_11(void) ; extern int driver_for_each_device(struct device_driver * , struct device * , void * , int (*)(struct device * , void * ) ) ; extern int device_set_wakeup_enable(struct device * , bool ) ; __inline static char const *dev_name(struct device const *dev ) { char const *tmp ; { if ((unsigned long )dev->init_name != (unsigned long )((char const */* const */)0)) { return ((char const *)dev->init_name); } else { } tmp = kobject_name(& dev->kobj); return (tmp); } } __inline static int dev_to_node(struct device *dev ) { { return (dev->numa_node); } } __inline static void set_dev_node(struct device *dev , int node ) { { dev->numa_node = node; return; } } __inline static void *dev_get_drvdata(struct device const *dev ) { { return ((void *)dev->driver_data); } } __inline static void dev_set_drvdata(struct device *dev , void *data ) { { dev->driver_data = data; return; } } extern void dev_err(struct device const * , char const * , ...) ; extern void dev_warn(struct device const * , char const * , ...) ; extern void _dev_info(struct device const * , char const * , ...) ; __inline static int pci_channel_offline(struct pci_dev *pdev ) { { return (pdev->error_state != 1U); } } extern void pci_dev_put(struct pci_dev * ) ; extern int pci_find_ext_capability(struct pci_dev * , int ) ; extern struct pci_dev *pci_get_device(unsigned int , unsigned int , struct pci_dev * ) ; extern int pci_bus_read_config_word(struct pci_bus * , unsigned int , int , u16 * ) ; extern int pci_bus_read_config_dword(struct pci_bus * , unsigned int , int , u32 * ) ; extern int pci_bus_write_config_word(struct pci_bus * , unsigned int , int , u16 ) ; __inline static int pci_read_config_word(struct pci_dev const *dev , int where , u16 *val ) { int tmp ; { tmp = pci_bus_read_config_word(dev->bus, dev->devfn, where, val); return (tmp); } } __inline static int pci_read_config_dword(struct pci_dev const *dev , int where , u32 *val ) { int tmp ; { tmp = pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); return (tmp); } } __inline static int pci_write_config_word(struct pci_dev const *dev , int where , u16 val ) { int tmp ; { tmp = pci_bus_write_config_word(dev->bus, dev->devfn, where, (int )val); return (tmp); } } extern int pcie_capability_read_word(struct pci_dev * , int , u16 * ) ; extern int pcie_capability_clear_and_set_word(struct pci_dev * , int , u16 , u16 ) ; __inline static int pcie_capability_set_word(struct pci_dev *dev , int pos , u16 set ) { int tmp ; { tmp = pcie_capability_clear_and_set_word(dev, pos, 0, (int )set); return (tmp); } } extern int pci_enable_device_mem(struct pci_dev * ) ; extern void pci_disable_device(struct pci_dev * ) ; extern void pci_set_master(struct pci_dev * ) ; extern int pci_wait_for_pending_transaction(struct pci_dev * ) ; extern int pcie_get_minimum_link(struct pci_dev * , enum pci_bus_speed * , enum pcie_link_width * ) ; extern int pci_select_bars(struct pci_dev * , unsigned long ) ; extern int pci_save_state(struct pci_dev * ) ; extern void pci_restore_state(struct pci_dev * ) ; extern int pci_set_power_state(struct pci_dev * , pci_power_t ) ; extern int pci_wake_from_d3(struct pci_dev * , bool ) ; extern int pci_prepare_to_sleep(struct pci_dev * ) ; extern int pci_request_selected_regions(struct pci_dev * , int , char const * ) ; extern void pci_release_selected_regions(struct pci_dev * , int ) ; extern int __pci_register_driver(struct pci_driver * , struct module * , char const * ) ; int ldv___pci_register_driver_64(struct pci_driver *ldv_func_arg1 , struct module *ldv_func_arg2 , char const *ldv_func_arg3 ) ; extern void pci_unregister_driver(struct pci_driver * ) ; void ldv_pci_unregister_driver_65(struct pci_driver *ldv_func_arg1 ) ; __inline static int PageTail(struct page const *page ) { int tmp ; { tmp = constant_test_bit(15L, (unsigned long const volatile *)(& page->flags)); return (tmp); } } __inline static struct page *compound_head_by_tail(struct page *tail ) { struct page *head ; int tmp ; long tmp___0 ; { head = tail->__annonCompField46.first_page; __asm__ volatile ("": : : "memory"); tmp = PageTail((struct page const *)tail); tmp___0 = ldv__builtin_expect(tmp != 0, 1L); if (tmp___0 != 0L) { return (head); } else { } return (tail); } } __inline static struct page *compound_head(struct page *page ) { struct page *tmp ; int tmp___0 ; long tmp___1 ; { tmp___0 = PageTail((struct page const *)page); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 0L); if (tmp___1 != 0L) { tmp = compound_head_by_tail(page); return (tmp); } else { } return (page); } } __inline static int page_count(struct page *page ) { struct page *tmp ; int tmp___0 ; { tmp = compound_head(page); tmp___0 = atomic_read((atomic_t const *)(& tmp->__annonCompField42.__annonCompField41.__annonCompField40._count)); return (tmp___0); } } __inline static int page_to_nid(struct page const *page ) { { return ((int )(page->flags >> 54)); } } __inline static void *lowmem_page_address(struct page const *page ) { { return ((void *)((unsigned long )((unsigned long long )(((long )page + 24189255811072L) / 64L) << 12) + 0xffff880000000000UL)); } } extern void pci_disable_msix(struct pci_dev * ) ; __inline static int valid_dma_direction(int dma_direction ) { { return ((dma_direction == 0 || dma_direction == 1) || dma_direction == 2); } } __inline static void kmemcheck_mark_initialized(void *address , unsigned int n ) { { return; } } extern void debug_dma_map_page(struct device * , struct page * , size_t , size_t , int , dma_addr_t , bool ) ; extern void debug_dma_mapping_error(struct device * , dma_addr_t ) ; extern void debug_dma_unmap_page(struct device * , dma_addr_t , size_t , int , bool ) ; extern void debug_dma_sync_single_range_for_cpu(struct device * , dma_addr_t , unsigned long , size_t , int ) ; extern void debug_dma_sync_single_range_for_device(struct device * , dma_addr_t , unsigned long , size_t , int ) ; extern struct dma_map_ops *dma_ops ; __inline static struct dma_map_ops *get_dma_ops(struct device *dev ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )dev == (unsigned long )((struct device *)0), 0L); if (tmp != 0L || (unsigned long )dev->archdata.dma_ops == (unsigned long )((struct dma_map_ops *)0)) { return (dma_ops); } else { return (dev->archdata.dma_ops); } } } __inline static dma_addr_t dma_map_single_attrs(struct device *dev , void *ptr , size_t size , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; dma_addr_t addr ; int tmp___0 ; long tmp___1 ; unsigned long tmp___2 ; unsigned long tmp___3 ; { tmp = get_dma_ops(dev); ops = tmp; kmemcheck_mark_initialized(ptr, (unsigned int )size); tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (19), "i" (12UL)); ldv_26816: ; goto ldv_26816; } else { } tmp___2 = __phys_addr((unsigned long )ptr); addr = (*(ops->map_page))(dev, (struct page *)-24189255811072L + (tmp___2 >> 12), (unsigned long )ptr & 4095UL, size, dir, attrs); tmp___3 = __phys_addr((unsigned long )ptr); debug_dma_map_page(dev, (struct page *)-24189255811072L + (tmp___3 >> 12), (unsigned long )ptr & 4095UL, size, (int )dir, addr, 1); return (addr); } } __inline static void dma_unmap_single_attrs(struct device *dev , dma_addr_t addr , size_t size , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (36), "i" (12UL)); ldv_26825: ; goto ldv_26825; } else { } if ((unsigned long )ops->unmap_page != (unsigned long )((void (*)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_page))(dev, addr, size, dir, attrs); } else { } debug_dma_unmap_page(dev, addr, size, (int )dir, 1); return; } } __inline static dma_addr_t dma_map_page(struct device *dev , struct page *page , size_t offset , size_t size , enum dma_data_direction dir ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; dma_addr_t addr ; void *tmp___0 ; int tmp___1 ; long tmp___2 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = lowmem_page_address((struct page const *)page); kmemcheck_mark_initialized(tmp___0 + offset, (unsigned int )size); tmp___1 = valid_dma_direction((int )dir); tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (84), "i" (12UL)); ldv_26860: ; goto ldv_26860; } else { } addr = (*(ops->map_page))(dev, page, offset, size, dir, (struct dma_attrs *)0); debug_dma_map_page(dev, page, offset, size, (int )dir, addr, 0); return (addr); } } __inline static void dma_unmap_page(struct device *dev , dma_addr_t addr , size_t size , enum dma_data_direction dir ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (96), "i" (12UL)); ldv_26868: ; goto ldv_26868; } else { } if ((unsigned long )ops->unmap_page != (unsigned long )((void (*)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_page))(dev, addr, size, dir, (struct dma_attrs *)0); } else { } debug_dma_unmap_page(dev, addr, size, (int )dir, 0); return; } } __inline static void dma_sync_single_range_for_cpu(struct device *dev , dma_addr_t addr , unsigned long offset , size_t size , enum dma_data_direction dir ) { struct dma_map_ops const *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = (struct dma_map_ops const *)tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (134), "i" (12UL)); ldv_26893: ; goto ldv_26893; } else { } if ((unsigned long )ops->sync_single_for_cpu != (unsigned long )((void (*/* const */)(struct device * , dma_addr_t , size_t , enum dma_data_direction ))0)) { (*(ops->sync_single_for_cpu))(dev, addr + (unsigned long long )offset, size, dir); } else { } debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, (int )dir); return; } } __inline static void dma_sync_single_range_for_device(struct device *dev , dma_addr_t addr , unsigned long offset , size_t size , enum dma_data_direction dir ) { struct dma_map_ops const *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = (struct dma_map_ops const *)tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (148), "i" (12UL)); ldv_26902: ; goto ldv_26902; } else { } if ((unsigned long )ops->sync_single_for_device != (unsigned long )((void (*/* const */)(struct device * , dma_addr_t , size_t , enum dma_data_direction ))0)) { (*(ops->sync_single_for_device))(dev, addr + (unsigned long long )offset, size, dir); } else { } debug_dma_sync_single_range_for_device(dev, addr, offset, size, (int )dir); return; } } __inline static int dma_mapping_error(struct device *dev , dma_addr_t dma_addr ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; { tmp = get_dma_ops(dev); ops = tmp; debug_dma_mapping_error(dev, dma_addr); if ((unsigned long )ops->mapping_error != (unsigned long )((int (*)(struct device * , dma_addr_t ))0)) { tmp___0 = (*(ops->mapping_error))(dev, dma_addr); return (tmp___0); } else { } return (dma_addr == 0ULL); } } extern int dma_supported(struct device * , u64 ) ; extern int dma_set_mask(struct device * , u64 ) ; extern void *dma_alloc_attrs(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ) ; extern void dma_free_attrs(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ) ; __inline static int dma_set_coherent_mask(struct device *dev , u64 mask ) { int tmp ; { tmp = dma_supported(dev, mask); if (tmp == 0) { return (-5); } else { } dev->coherent_dma_mask = mask; return (0); } } __inline static int dma_set_mask_and_coherent(struct device *dev , u64 mask ) { int rc ; int tmp ; { tmp = dma_set_mask(dev, mask); rc = tmp; if (rc == 0) { dma_set_coherent_mask(dev, mask); } else { } return (rc); } } __inline static void *pci_get_drvdata(struct pci_dev *pdev ) { void *tmp ; { tmp = dev_get_drvdata((struct device const *)(& pdev->dev)); return (tmp); } } __inline static void pci_set_drvdata(struct pci_dev *pdev , void *data ) { { dev_set_drvdata(& pdev->dev, data); return; } } __inline static char const *pci_name(struct pci_dev const *pdev ) { char const *tmp ; { tmp = dev_name(& pdev->dev); return (tmp); } } extern int pci_sriov_set_totalvfs(struct pci_dev * , u16 ) ; __inline static int pci_pcie_cap(struct pci_dev *dev ) { { return ((int )dev->pcie_cap); } } __inline static bool pci_is_pcie(struct pci_dev *dev ) { int tmp ; { tmp = pci_pcie_cap(dev); return (tmp != 0); } } __inline static u16 pcie_caps_reg(struct pci_dev const *dev ) { { return ((u16 )dev->pcie_flags_reg); } } __inline static int pci_pcie_type(struct pci_dev const *dev ) { u16 tmp ; { tmp = pcie_caps_reg(dev); return (((int )tmp & 240) >> 4); } } __inline static struct device_node *pci_device_to_OF_node(struct pci_dev const *pdev ) { { return ((unsigned long )pdev != (unsigned long )((struct pci_dev const *)0) ? (struct device_node *)pdev->dev.of_node : (struct device_node *)0); } } extern void __const_udelay(unsigned long ) ; extern void msleep(unsigned int ) ; extern void usleep_range(unsigned long , unsigned long ) ; __inline static void dql_queued(struct dql *dql , unsigned int count ) { long tmp ; { tmp = ldv__builtin_expect(count > 268435455U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/dynamic_queue_limits.h"), "i" (74), "i" (12UL)); ldv_28226: ; goto ldv_28226; } else { } dql->last_obj_cnt = count; __asm__ volatile ("": : : "memory"); dql->num_queued = dql->num_queued + count; return; } } __inline static int dql_avail(struct dql const *dql ) { unsigned int __var ; unsigned int __var___0 ; { __var = 0U; __var___0 = 0U; return ((int )((unsigned int )*((unsigned int const volatile *)(& dql->adj_limit)) - (unsigned int )*((unsigned int const volatile *)(& dql->num_queued)))); } } extern void dql_completed(struct dql * , unsigned int ) ; extern void dql_reset(struct dql * ) ; extern int net_ratelimit(void) ; __inline static __sum16 csum_fold(__wsum sum ) { { __asm__ (" addl %1,%0\n adcl $0xffff,%0": "=r" (sum): "r" (sum << 16), "0" (sum & 4294901760U)); return ((__sum16 )(~ sum >> 16)); } } __inline static __wsum csum_tcpudp_nofold(__be32 saddr , __be32 daddr , unsigned short len , unsigned short proto , __wsum sum ) { { __asm__ (" addl %1, %0\n adcl %2, %0\n adcl %3, %0\n adcl $0, %0\n": "=r" (sum): "g" (daddr), "g" (saddr), "g" (((int )len + (int )proto) << 8), "0" (sum)); return (sum); } } __inline static __sum16 csum_tcpudp_magic(__be32 saddr , __be32 daddr , unsigned short len , unsigned short proto , __wsum sum ) { __wsum tmp ; __sum16 tmp___0 ; { tmp = csum_tcpudp_nofold(saddr, daddr, (int )len, (int )proto, sum); tmp___0 = csum_fold(tmp); return (tmp___0); } } extern __sum16 csum_ipv6_magic(struct in6_addr const * , struct in6_addr const * , __u32 , unsigned short , __wsum ) ; __inline static unsigned int skb_frag_size(skb_frag_t const *frag ) { { return ((unsigned int )frag->size); } } __inline static void skb_frag_size_sub(skb_frag_t *frag , int delta ) { { frag->size = frag->size - (__u32 )delta; return; } } extern void consume_skb(struct sk_buff * ) ; struct sk_buff *ldv_skb_clone_33(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_41(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_35(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_31(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_39(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_40(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; extern int skb_pad(struct sk_buff * , int ) ; __inline static void skb_set_hash(struct sk_buff *skb , __u32 hash , enum pkt_hash_types type ) { { skb->l4_hash = (unsigned int )type == 3U; skb->sw_hash = 0U; skb->hash = hash; return; } } __inline static unsigned char *skb_end_pointer(struct sk_buff const *skb ) { { return ((unsigned char *)skb->head + (unsigned long )skb->end); } } __inline static struct sk_buff *skb_get(struct sk_buff *skb ) { { atomic_inc(& skb->users); return (skb); } } __inline static int skb_header_cloned(struct sk_buff const *skb ) { int dataref ; unsigned char *tmp ; { if ((unsigned int )*((unsigned char *)skb + 142UL) == 0U) { return (0); } else { } tmp = skb_end_pointer(skb); dataref = atomic_read((atomic_t const *)(& ((struct skb_shared_info *)tmp)->dataref)); dataref = (dataref & 65535) - (dataref >> 16); return (dataref != 1); } } __inline static bool skb_is_nonlinear(struct sk_buff const *skb ) { { return ((unsigned int )skb->data_len != 0U); } } __inline static unsigned int skb_headlen(struct sk_buff const *skb ) { { return ((unsigned int )skb->len - (unsigned int )skb->data_len); } } extern void skb_add_rx_frag(struct sk_buff * , int , struct page * , int , int , unsigned int ) ; __inline static unsigned char *skb_tail_pointer(struct sk_buff const *skb ) { { return ((unsigned char *)skb->head + (unsigned long )skb->tail); } } __inline static unsigned char *__skb_put(struct sk_buff *skb , unsigned int len ) { unsigned char *tmp ; unsigned char *tmp___0 ; bool tmp___1 ; long tmp___2 ; { tmp___0 = skb_tail_pointer((struct sk_buff const *)skb); tmp = tmp___0; tmp___1 = skb_is_nonlinear((struct sk_buff const *)skb); tmp___2 = ldv__builtin_expect((long )tmp___1, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/skbuff.h"), "i" (1696), "i" (12UL)); ldv_35162: ; goto ldv_35162; } else { } skb->tail = skb->tail + len; skb->len = skb->len + len; return (tmp); } } extern unsigned char *__pskb_pull_tail(struct sk_buff * , int ) ; __inline static int pskb_may_pull(struct sk_buff *skb , unsigned int len ) { unsigned int tmp ; long tmp___0 ; long tmp___1 ; unsigned int tmp___2 ; unsigned char *tmp___3 ; { tmp = skb_headlen((struct sk_buff const *)skb); tmp___0 = ldv__builtin_expect(tmp >= len, 1L); if (tmp___0 != 0L) { return (1); } else { } tmp___1 = ldv__builtin_expect(skb->len < len, 0L); if (tmp___1 != 0L) { return (0); } else { } tmp___2 = skb_headlen((struct sk_buff const *)skb); tmp___3 = __pskb_pull_tail(skb, (int )(len - tmp___2)); return ((unsigned long )tmp___3 != (unsigned long )((unsigned char *)0U)); } } __inline static unsigned int skb_headroom(struct sk_buff const *skb ) { { return ((unsigned int )((long )skb->data) - (unsigned int )((long )skb->head)); } } __inline static unsigned char *skb_transport_header(struct sk_buff const *skb ) { { return ((unsigned char *)skb->head + (unsigned long )skb->transport_header); } } __inline static unsigned char *skb_network_header(struct sk_buff const *skb ) { { return ((unsigned char *)skb->head + (unsigned long )skb->network_header); } } __inline static int skb_transport_offset(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_transport_header(skb); return ((int )((unsigned int )((long )tmp) - (unsigned int )((long )skb->data))); } } __inline static u32 skb_network_header_len(struct sk_buff const *skb ) { { return ((u32 )((int )skb->transport_header - (int )skb->network_header)); } } __inline static int skb_network_offset(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_network_header(skb); return ((int )((unsigned int )((long )tmp) - (unsigned int )((long )skb->data))); } } struct sk_buff *ldv___netdev_alloc_skb_36(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_37(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_38(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; extern struct sk_buff *__napi_alloc_skb(struct napi_struct * , unsigned int , gfp_t ) ; __inline static struct sk_buff *napi_alloc_skb(struct napi_struct *napi , unsigned int length ) { struct sk_buff *tmp ; { tmp = __napi_alloc_skb(napi, length, 32U); return (tmp); } } __inline static struct page *__dev_alloc_pages(gfp_t gfp_mask , unsigned int order ) { struct page *tmp ; { gfp_mask = gfp_mask | 24832U; tmp = alloc_pages_node(-1, gfp_mask, order); return (tmp); } } __inline static struct page *dev_alloc_pages(unsigned int order ) { struct page *tmp ; { tmp = __dev_alloc_pages(32U, order); return (tmp); } } __inline static struct page *skb_frag_page(skb_frag_t const *frag ) { { return ((struct page *)frag->page.p); } } __inline static void *skb_frag_address(skb_frag_t const *frag ) { struct page *tmp ; void *tmp___0 ; { tmp = skb_frag_page(frag); tmp___0 = lowmem_page_address((struct page const *)tmp); return (tmp___0 + (unsigned long )frag->page_offset); } } __inline static dma_addr_t skb_frag_dma_map(struct device *dev , skb_frag_t const *frag , size_t offset , size_t size , enum dma_data_direction dir ) { struct page *tmp ; dma_addr_t tmp___0 ; { tmp = skb_frag_page(frag); tmp___0 = dma_map_page(dev, tmp, (size_t )frag->page_offset + offset, size, dir); return (tmp___0); } } __inline static int __skb_cow(struct sk_buff *skb , unsigned int headroom , int cloned ) { int delta ; unsigned int tmp ; unsigned int tmp___0 ; int _max1 ; int _max2 ; int _max1___0 ; int _max2___0 ; int tmp___1 ; { delta = 0; tmp___0 = skb_headroom((struct sk_buff const *)skb); if (tmp___0 < headroom) { tmp = skb_headroom((struct sk_buff const *)skb); delta = (int )(headroom - tmp); } else { } if (delta != 0 || cloned != 0) { _max1 = 32; _max2 = 64; _max1___0 = 32; _max2___0 = 64; tmp___1 = ldv_pskb_expand_head_39(skb, (((_max1 > _max2 ? _max1 : _max2) + -1) + delta) & - (_max1___0 > _max2___0 ? _max1___0 : _max2___0), 0, 32U); return (tmp___1); } else { } return (0); } } __inline static int skb_cow_head(struct sk_buff *skb , unsigned int headroom ) { int tmp ; int tmp___0 ; { tmp = skb_header_cloned((struct sk_buff const *)skb); tmp___0 = __skb_cow(skb, headroom, tmp); return (tmp___0); } } __inline static int skb_put_padto(struct sk_buff *skb , unsigned int len ) { unsigned int size ; int tmp ; long tmp___0 ; { size = skb->len; tmp___0 = ldv__builtin_expect(size < len, 0L); if (tmp___0 != 0L) { len = len - size; tmp = skb_pad(skb, (int )len); if (tmp != 0) { return (-12); } else { } __skb_put(skb, len); } else { } return (0); } } extern int skb_copy_bits(struct sk_buff const * , int , void * , int ) ; __inline static void *__skb_header_pointer(struct sk_buff const *skb , int offset , int len , void *data , int hlen , void *buffer ) { int tmp ; { if (hlen - offset >= len) { return (data + (unsigned long )offset); } else { } if ((unsigned long )skb == (unsigned long )((struct sk_buff const *)0)) { return ((void *)0); } else { tmp = skb_copy_bits(skb, offset, buffer, len); if (tmp < 0) { return ((void *)0); } else { } } return (buffer); } } __inline static void *skb_header_pointer(struct sk_buff const *skb , int offset , int len , void *buffer ) { unsigned int tmp ; void *tmp___0 ; { tmp = skb_headlen(skb); tmp___0 = __skb_header_pointer(skb, offset, len, (void *)skb->data, (int )tmp, buffer); return (tmp___0); } } __inline static void skb_copy_to_linear_data(struct sk_buff *skb , void const *from , unsigned int const len ) { { memcpy((void *)skb->data, from, (size_t )len); return; } } extern void skb_clone_tx_timestamp(struct sk_buff * ) ; extern void skb_tstamp_tx(struct sk_buff * , struct skb_shared_hwtstamps * ) ; __inline static void sw_tx_timestamp(struct sk_buff *skb ) { unsigned char *tmp ; unsigned char *tmp___0 ; { tmp = skb_end_pointer((struct sk_buff const *)skb); if (((int )((struct skb_shared_info *)tmp)->tx_flags & 2) != 0) { tmp___0 = skb_end_pointer((struct sk_buff const *)skb); if (((int )((struct skb_shared_info *)tmp___0)->tx_flags & 4) == 0) { skb_tstamp_tx(skb, (struct skb_shared_hwtstamps *)0); } else { } } else { } return; } } __inline static void skb_tx_timestamp(struct sk_buff *skb ) { { skb_clone_tx_timestamp(skb); sw_tx_timestamp(skb); return; } } __inline static void skb_record_rx_queue(struct sk_buff *skb , u16 rx_queue ) { { skb->queue_mapping = (unsigned int )rx_queue + 1U; return; } } __inline static u16 skb_get_rx_queue(struct sk_buff const *skb ) { { return ((unsigned int )((u16 )skb->queue_mapping) + 65535U); } } __inline static bool skb_rx_queue_recorded(struct sk_buff const *skb ) { { return ((unsigned int )((unsigned short )skb->queue_mapping) != 0U); } } __inline static bool skb_is_gso(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_end_pointer(skb); return ((unsigned int )((struct skb_shared_info *)tmp)->gso_size != 0U); } } __inline static bool skb_is_gso_v6(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_end_pointer(skb); return (((int )((struct skb_shared_info *)tmp)->gso_type & 16) != 0); } } __inline static void skb_checksum_none_assert(struct sk_buff const *skb ) { { return; } } __inline static void u64_stats_init(struct u64_stats_sync *syncp ) { { return; } } __inline static unsigned int u64_stats_fetch_begin_irq(struct u64_stats_sync const *syncp ) { { return (0U); } } __inline static bool u64_stats_fetch_retry_irq(struct u64_stats_sync const *syncp , unsigned int start ) { { return (0); } } extern void synchronize_irq(unsigned int ) ; extern int request_threaded_irq(unsigned int , irqreturn_t (*)(int , void * ) , irqreturn_t (*)(int , void * ) , unsigned long , char const * , void * ) ; __inline static int request_irq(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { int tmp ; { tmp = request_threaded_irq(irq, handler, (irqreturn_t (*)(int , void * ))0, flags, name, dev); return (tmp); } } __inline static int ldv_request_irq_43(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; __inline static int ldv_request_irq_44(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; __inline static int ldv_request_irq_46(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; __inline static int ldv_request_irq_47(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; extern void free_irq(unsigned int , void * ) ; void ldv_free_irq_45(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; void ldv_free_irq_48(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; void ldv_free_irq_49(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; void ldv_free_irq_50(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; extern int irq_set_affinity_hint(unsigned int , struct cpumask const * ) ; __inline static struct mii_ioctl_data *if_mii(struct ifreq *rq ) { { return ((struct mii_ioctl_data *)(& rq->ifr_ifru)); } } extern u8 dcb_ieee_getapp_mask(struct net_device * , struct dcb_app * ) ; extern void __napi_schedule(struct napi_struct * ) ; __inline static bool napi_disable_pending(struct napi_struct *n ) { int tmp ; { tmp = constant_test_bit(1L, (unsigned long const volatile *)(& n->state)); return (tmp != 0); } } __inline static bool napi_schedule_prep(struct napi_struct *n ) { bool tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = napi_disable_pending(n); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { tmp___1 = test_and_set_bit(0L, (unsigned long volatile *)(& n->state)); if (tmp___1 == 0) { tmp___2 = 1; } else { tmp___2 = 0; } } else { tmp___2 = 0; } return ((bool )tmp___2); } } __inline static void napi_schedule(struct napi_struct *n ) { bool tmp ; { tmp = napi_schedule_prep(n); if ((int )tmp) { __napi_schedule(n); } else { } return; } } __inline static void napi_complete(struct napi_struct *n ) { { return; } } extern void napi_disable(struct napi_struct * ) ; __inline static void napi_enable(struct napi_struct *n ) { int tmp ; long tmp___0 ; { tmp = constant_test_bit(0L, (unsigned long const volatile *)(& n->state)); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/netdevice.h"), "i" (507), "i" (12UL)); ldv_43734: ; goto ldv_43734; } else { } __asm__ volatile ("": : : "memory"); clear_bit(0L, (unsigned long volatile *)(& n->state)); return; } } __inline static int netdev_get_prio_tc_map(struct net_device const *dev , u32 prio ) { { return ((int )dev->prio_tc_map[prio & 15U]); } } __inline static int netdev_set_prio_tc_map(struct net_device *dev , u8 prio , u8 tc ) { { if ((int )dev->num_tc <= (int )tc) { return (-22); } else { } dev->prio_tc_map[(int )prio & 15] = (unsigned int )tc & 15U; return (0); } } __inline static void netdev_reset_tc(struct net_device *dev ) { { dev->num_tc = 0U; memset((void *)(& dev->tc_to_txq), 0, 64UL); memset((void *)(& dev->prio_tc_map), 0, 16UL); return; } } __inline static int netdev_set_num_tc(struct net_device *dev , u8 num_tc ) { { if ((unsigned int )num_tc > 16U) { return (-22); } else { } dev->num_tc = num_tc; return (0); } } __inline static int netdev_get_num_tc(struct net_device *dev ) { { return ((int )dev->num_tc); } } __inline static struct netdev_queue *netdev_get_tx_queue(struct net_device const *dev , unsigned int index ) { { return ((struct netdev_queue *)dev->_tx + (unsigned long )index); } } __inline static void *netdev_priv(struct net_device const *dev ) { { return ((void *)dev + 3008U); } } extern void free_netdev(struct net_device * ) ; void ldv_free_netdev_60(struct net_device *dev ) ; void ldv_free_netdev_63(struct net_device *dev ) ; extern void netif_schedule_queue(struct netdev_queue * ) ; __inline static void netif_tx_start_queue(struct netdev_queue *dev_queue ) { { clear_bit(0L, (unsigned long volatile *)(& dev_queue->state)); return; } } __inline static void netif_tx_start_all_queues(struct net_device *dev ) { unsigned int i ; struct netdev_queue *txq ; struct netdev_queue *tmp ; { i = 0U; goto ldv_44808; ldv_44807: tmp = netdev_get_tx_queue((struct net_device const *)dev, i); txq = tmp; netif_tx_start_queue(txq); i = i + 1U; ldv_44808: ; if (dev->num_tx_queues > i) { goto ldv_44807; } else { } return; } } extern void netif_tx_wake_queue(struct netdev_queue * ) ; __inline static void netif_tx_wake_all_queues(struct net_device *dev ) { unsigned int i ; struct netdev_queue *txq ; struct netdev_queue *tmp ; { i = 0U; goto ldv_44821; ldv_44820: tmp = netdev_get_tx_queue((struct net_device const *)dev, i); txq = tmp; netif_tx_wake_queue(txq); i = i + 1U; ldv_44821: ; if (dev->num_tx_queues > i) { goto ldv_44820; } else { } return; } } __inline static void netif_tx_stop_queue(struct netdev_queue *dev_queue ) { { set_bit(0L, (unsigned long volatile *)(& dev_queue->state)); return; } } extern void netif_tx_stop_all_queues(struct net_device * ) ; __inline static bool netif_tx_queue_stopped(struct netdev_queue const *dev_queue ) { int tmp ; { tmp = constant_test_bit(0L, (unsigned long const volatile *)(& dev_queue->state)); return (tmp != 0); } } __inline static bool netif_xmit_stopped(struct netdev_queue const *dev_queue ) { { return (((unsigned long )dev_queue->state & 3UL) != 0UL); } } __inline static void netdev_tx_sent_queue(struct netdev_queue *dev_queue , unsigned int bytes ) { int tmp ; long tmp___0 ; int tmp___1 ; long tmp___2 ; { dql_queued(& dev_queue->dql, bytes); tmp = dql_avail((struct dql const *)(& dev_queue->dql)); tmp___0 = ldv__builtin_expect(tmp >= 0, 1L); if (tmp___0 != 0L) { return; } else { } set_bit(1L, (unsigned long volatile *)(& dev_queue->state)); __asm__ volatile ("mfence": : : "memory"); tmp___1 = dql_avail((struct dql const *)(& dev_queue->dql)); tmp___2 = ldv__builtin_expect(tmp___1 >= 0, 0L); if (tmp___2 != 0L) { clear_bit(1L, (unsigned long volatile *)(& dev_queue->state)); } else { } return; } } __inline static void netdev_tx_completed_queue(struct netdev_queue *dev_queue , unsigned int pkts , unsigned int bytes ) { long tmp ; int tmp___0 ; int tmp___1 ; { tmp = ldv__builtin_expect(bytes == 0U, 0L); if (tmp != 0L) { return; } else { } dql_completed(& dev_queue->dql, bytes); __asm__ volatile ("mfence": : : "memory"); tmp___0 = dql_avail((struct dql const *)(& dev_queue->dql)); if (tmp___0 < 0) { return; } else { } tmp___1 = test_and_clear_bit(1L, (unsigned long volatile *)(& dev_queue->state)); if (tmp___1 != 0) { netif_schedule_queue(dev_queue); } else { } return; } } __inline static void netdev_tx_reset_queue(struct netdev_queue *q ) { { clear_bit(1L, (unsigned long volatile *)(& q->state)); dql_reset(& q->dql); return; } } __inline static bool netif_running(struct net_device const *dev ) { int tmp ; { tmp = constant_test_bit(0L, (unsigned long const volatile *)(& dev->state)); return (tmp != 0); } } __inline static void netif_start_subqueue(struct net_device *dev , u16 queue_index ) { struct netdev_queue *txq ; struct netdev_queue *tmp ; { tmp = netdev_get_tx_queue((struct net_device const *)dev, (unsigned int )queue_index); txq = tmp; netif_tx_start_queue(txq); return; } } __inline static void netif_stop_subqueue(struct net_device *dev , u16 queue_index ) { struct netdev_queue *txq ; struct netdev_queue *tmp ; { tmp = netdev_get_tx_queue((struct net_device const *)dev, (unsigned int )queue_index); txq = tmp; netif_tx_stop_queue(txq); return; } } __inline static bool __netif_subqueue_stopped(struct net_device const *dev , u16 queue_index ) { struct netdev_queue *txq ; struct netdev_queue *tmp ; bool tmp___0 ; { tmp = netdev_get_tx_queue(dev, (unsigned int )queue_index); txq = tmp; tmp___0 = netif_tx_queue_stopped((struct netdev_queue const *)txq); return (tmp___0); } } extern void netif_wake_subqueue(struct net_device * , u16 ) ; extern int netif_set_xps_queue(struct net_device * , struct cpumask const * , u16 ) ; extern int netif_set_real_num_tx_queues(struct net_device * , unsigned int ) ; extern int netif_set_real_num_rx_queues(struct net_device * , unsigned int ) ; extern void __dev_kfree_skb_any(struct sk_buff * , enum skb_free_reason ) ; __inline static void dev_kfree_skb_any(struct sk_buff *skb ) { { __dev_kfree_skb_any(skb, 1); return; } } __inline static void dev_consume_skb_any(struct sk_buff *skb ) { { __dev_kfree_skb_any(skb, 0); return; } } extern int netif_receive_skb_sk(struct sock * , struct sk_buff * ) ; __inline static int netif_receive_skb(struct sk_buff *skb ) { int tmp ; { tmp = netif_receive_skb_sk(skb->sk, skb); return (tmp); } } extern gro_result_t napi_gro_receive(struct napi_struct * , struct sk_buff * ) ; extern void napi_gro_flush(struct napi_struct * , bool ) ; __inline static bool netif_carrier_ok(struct net_device const *dev ) { int tmp ; { tmp = constant_test_bit(2L, (unsigned long const volatile *)(& dev->state)); return (tmp == 0); } } extern void netif_carrier_on(struct net_device * ) ; extern void netif_carrier_off(struct net_device * ) ; extern void netif_device_detach(struct net_device * ) ; extern void netif_device_attach(struct net_device * ) ; __inline static u32 netif_msg_init(int debug_value , int default_msg_enable_bits ) { { if (debug_value < 0 || (unsigned int )debug_value > 31U) { return ((u32 )default_msg_enable_bits); } else { } if (debug_value == 0) { return (0U); } else { } return ((u32 )((1 << debug_value) + -1)); } } __inline static void __netif_tx_lock(struct netdev_queue *txq , int cpu ) { { spin_lock(& txq->_xmit_lock); txq->xmit_lock_owner = cpu; return; } } __inline static void __netif_tx_unlock(struct netdev_queue *txq ) { { txq->xmit_lock_owner = -1; spin_unlock(& txq->_xmit_lock); return; } } __inline static void netif_tx_disable(struct net_device *dev ) { unsigned int i ; int cpu ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; struct netdev_queue *txq ; struct netdev_queue *tmp ; { local_bh_disable(); __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_45383; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_45383; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_45383; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_45383; default: __bad_percpu_size(); } ldv_45383: pscr_ret__ = pfo_ret__; goto ldv_45389; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_45393; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_45393; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_45393; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_45393; default: __bad_percpu_size(); } ldv_45393: pscr_ret__ = pfo_ret_____0; goto ldv_45389; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_45402; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_45402; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_45402; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_45402; default: __bad_percpu_size(); } ldv_45402: pscr_ret__ = pfo_ret_____1; goto ldv_45389; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_45411; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_45411; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_45411; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_45411; default: __bad_percpu_size(); } ldv_45411: pscr_ret__ = pfo_ret_____2; goto ldv_45389; default: __bad_size_call_parameter(); goto ldv_45389; } ldv_45389: cpu = pscr_ret__; i = 0U; goto ldv_45421; ldv_45420: tmp = netdev_get_tx_queue((struct net_device const *)dev, i); txq = tmp; __netif_tx_lock(txq, cpu); netif_tx_stop_queue(txq); __netif_tx_unlock(txq); i = i + 1U; ldv_45421: ; if (dev->num_tx_queues > i) { goto ldv_45420; } else { } local_bh_enable(); return; } } extern int register_netdev(struct net_device * ) ; int ldv_register_netdev_59(struct net_device *dev ) ; extern void unregister_netdev(struct net_device * ) ; void ldv_unregister_netdev_57(struct net_device *dev ) ; void ldv_unregister_netdev_62(struct net_device *dev ) ; extern int dev_addr_add(struct net_device * , unsigned char const * , unsigned char ) ; extern int dev_addr_del(struct net_device * , unsigned char const * , unsigned char ) ; extern struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device * , struct list_head ** ) ; extern void netdev_rss_key_fill(void * , size_t ) ; __inline static void netif_set_gso_max_size(struct net_device *dev , unsigned int size ) { { dev->gso_max_size = size; return; } } __inline static bool netif_is_macvlan(struct net_device *dev ) { { return ((dev->priv_flags & 2097152U) != 0U); } } extern void netdev_crit(struct net_device const * , char const * , ...) ; extern void netdev_err(struct net_device const * , char const * , ...) ; extern void netdev_warn(struct net_device const * , char const * , ...) ; extern void netdev_info(struct net_device const * , char const * , ...) ; void *ldv_vzalloc_54(unsigned long ldv_func_arg1 ) ; void *ldv_vzalloc_56(unsigned long ldv_func_arg1 ) ; void *ldv_vzalloc_node_53(unsigned long ldv_func_arg1 , int ldv_func_arg2 ) ; void *ldv_vzalloc_node_55(unsigned long ldv_func_arg1 , int ldv_func_arg2 ) ; extern void vfree(void const * ) ; __inline static struct iphdr *ip_hdr(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_network_header(skb); return ((struct iphdr *)tmp); } } extern void rtnl_lock(void) ; extern void rtnl_unlock(void) ; extern int ndo_dflt_fdb_add(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 , u16 ) ; extern int ndo_dflt_bridge_getlink(struct sk_buff * , u32 , u32 , struct net_device * , u16 , u32 , u32 , int , u32 , int (*)(struct sk_buff * , struct net_device * , u32 ) ) ; extern struct nlattr *nla_find(struct nlattr const * , int , int ) ; __inline static void *nlmsg_data(struct nlmsghdr const *nlh ) { { return ((void *)nlh + 16U); } } __inline static int nlmsg_len(struct nlmsghdr const *nlh ) { { return ((int )((unsigned int )nlh->nlmsg_len - 16U)); } } __inline static struct nlattr *nlmsg_attrdata(struct nlmsghdr const *nlh , int hdrlen ) { unsigned char *data ; void *tmp ; { tmp = nlmsg_data(nlh); data = (unsigned char *)tmp; return ((struct nlattr *)(data + ((unsigned long )((unsigned int )hdrlen + 3U) & 4294967292UL))); } } __inline static int nlmsg_attrlen(struct nlmsghdr const *nlh , int hdrlen ) { int tmp ; { tmp = nlmsg_len(nlh); return ((int )((unsigned int )tmp - (((unsigned int )hdrlen + 3U) & 4294967292U))); } } __inline static struct nlattr *nlmsg_find_attr(struct nlmsghdr const *nlh , int hdrlen , int attrtype ) { int tmp ; struct nlattr *tmp___0 ; struct nlattr *tmp___1 ; { tmp = nlmsg_attrlen(nlh, hdrlen); tmp___0 = nlmsg_attrdata(nlh, hdrlen); tmp___1 = nla_find((struct nlattr const *)tmp___0, tmp, attrtype); return (tmp___1); } } __inline static int nla_type(struct nlattr const *nla ) { { return ((int )nla->nla_type & -49153); } } __inline static void *nla_data(struct nlattr const *nla ) { { return ((void *)nla + 4U); } } __inline static int nla_len(struct nlattr const *nla ) { { return ((int )nla->nla_len + -4); } } __inline static int nla_ok(struct nlattr const *nla , int remaining ) { { return ((remaining > 3 && (unsigned int )((unsigned short )nla->nla_len) > 3U) && (int )nla->nla_len <= remaining); } } __inline static struct nlattr *nla_next(struct nlattr const *nla , int *remaining ) { int totlen ; { totlen = ((int )nla->nla_len + 3) & -4; *remaining = *remaining - totlen; return ((struct nlattr *)nla + (unsigned long )totlen); } } __inline static u16 nla_get_u16(struct nlattr const *nla ) { void *tmp ; { tmp = nla_data(nla); return (*((u16 *)tmp)); } } __inline static struct tcphdr *tcp_hdr(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_transport_header(skb); return ((struct tcphdr *)tmp); } } __inline static unsigned int tcp_hdrlen(struct sk_buff const *skb ) { struct tcphdr *tmp ; { tmp = tcp_hdr(skb); return ((unsigned int )((int )tmp->doff * 4)); } } __inline static struct ipv6hdr *ipv6_hdr(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_network_header(skb); return ((struct ipv6hdr *)tmp); } } extern u32 eth_get_headlen(void * , unsigned int ) ; extern __be16 eth_type_trans(struct sk_buff * , struct net_device * ) ; extern int eth_validate_addr(struct net_device * ) ; extern struct net_device *alloc_etherdev_mqs(int , unsigned int , unsigned int ) ; static u8 const eth_reserved_addr_base[6U] = { 1U, 128U, 194U, 0U, 0U, 0U}; __inline static bool is_link_local_ether_addr(u8 const *addr ) { __be16 *a ; __be16 const *b ; __be16 m ; { a = (__be16 *)addr; b = (__be16 const *)(& eth_reserved_addr_base); m = 61695U; return ((((unsigned int )*((u32 const *)addr) ^ (unsigned int )*((u32 const *)b)) | (unsigned int )(((int )*(a + 2UL) ^ (int )((unsigned short )*(b + 2UL))) & (int )m)) == 0U); } } __inline static bool is_zero_ether_addr(u8 const *addr ) { { return (((unsigned int )*((u32 const *)addr) | (unsigned int )*((u16 const *)addr + 4U)) == 0U); } } __inline static bool is_multicast_ether_addr(u8 const *addr ) { u32 a ; { a = *((u32 const *)addr); return ((a & 1U) != 0U); } } __inline static bool is_unicast_ether_addr(u8 const *addr ) { bool tmp ; int tmp___0 ; { tmp = is_multicast_ether_addr(addr); if ((int )tmp != 0) { tmp___0 = 0; } else { tmp___0 = 1; } return ((bool )tmp___0); } } __inline static bool is_valid_ether_addr(u8 const *addr ) { bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; int tmp___3 ; { tmp = is_multicast_ether_addr(addr); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { tmp___1 = is_zero_ether_addr(addr); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { tmp___3 = 1; } else { tmp___3 = 0; } } else { tmp___3 = 0; } return ((bool )tmp___3); } } __inline static void eth_zero_addr(u8 *addr ) { { memset((void *)addr, 0, 6UL); return; } } __inline static void ether_addr_copy(u8 *dst , u8 const *src ) { { *((u32 *)dst) = *((u32 const *)src); *((u16 *)dst + 4U) = *((u16 const *)src + 4U); return; } } __inline static bool ether_addr_equal(u8 const *addr1 , u8 const *addr2 ) { u32 fold ; { fold = ((unsigned int )*((u32 const *)addr1) ^ (unsigned int )*((u32 const *)addr2)) | (unsigned int )((int )((unsigned short )*((u16 const *)addr1 + 4U)) ^ (int )((unsigned short )*((u16 const *)addr2 + 4U))); return (fold == 0U); } } __inline static int eth_skb_pad(struct sk_buff *skb ) { int tmp ; { tmp = skb_put_padto(skb, 60U); return (tmp); } } __inline static void __vlan_hwaccel_put_tag(struct sk_buff *skb , __be16 vlan_proto , u16 vlan_tci ) { { skb->vlan_proto = vlan_proto; skb->vlan_tci = (__u16 )((unsigned int )vlan_tci | 4096U); return; } } __inline static __be16 __vlan_get_protocol(struct sk_buff *skb , __be16 type , int *depth ) { unsigned int vlan_depth ; int __ret_warn_on ; long tmp ; long tmp___0 ; struct vlan_hdr *vh ; int tmp___1 ; long tmp___2 ; { vlan_depth = (unsigned int )skb->mac_len; if ((unsigned int )type == 129U || (unsigned int )type == 43144U) { if (vlan_depth != 0U) { __ret_warn_on = vlan_depth <= 3U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/if_vlan.h", 492); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (0U); } else { } vlan_depth = vlan_depth - 4U; } else { vlan_depth = 14U; } ldv_54386: tmp___1 = pskb_may_pull(skb, vlan_depth + 4U); tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L); if (tmp___2 != 0L) { return (0U); } else { } vh = (struct vlan_hdr *)skb->data + (unsigned long )vlan_depth; type = vh->h_vlan_encapsulated_proto; vlan_depth = vlan_depth + 4U; if ((unsigned int )type == 129U || (unsigned int )type == 43144U) { goto ldv_54386; } else { } } else { } if ((unsigned long )depth != (unsigned long )((int *)0)) { *depth = (int )vlan_depth; } else { } return (type); } } __inline static __be16 vlan_get_protocol(struct sk_buff *skb ) { __be16 tmp ; { tmp = __vlan_get_protocol(skb, (int )skb->protocol, (int *)0); return (tmp); } } extern void vxlan_get_rx_port(struct net_device * ) ; extern void const *of_get_mac_address(struct device_node * ) ; extern int pci_enable_pcie_error_reporting(struct pci_dev * ) ; extern int pci_disable_pcie_error_reporting(struct pci_dev * ) ; extern int pci_cleanup_aer_uncorrect_error_status(struct pci_dev * ) ; extern int mdio_mii_ioctl(struct mdio_if_info const * , struct mii_ioctl_data * , int ) ; void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw ) ; s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw , u8 *pba_num , u32 pba_num_size ) ; enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status ) ; enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status ) ; s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw , u16 offset , u16 *data ) ; bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw ) ; bool ixgbe_mng_enabled(struct ixgbe_hw *hw ) ; u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw , u32 reg ) ; void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw , u32 reg , u16 value ) ; __inline static bool ixgbe_removed(void *addr ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )addr == (unsigned long )((void *)0), 0L); return (tmp != 0L); } } __inline static void ixgbe_write_reg(struct ixgbe_hw *hw , u32 reg , u32 value ) { u8 *reg_addr ; u8 *__var ; bool tmp ; { __var = (u8 *)0U; reg_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ixgbe_removed((void *)reg_addr); if ((int )tmp) { return; } else { } writel(value, (void volatile *)reg_addr + (unsigned long )reg); return; } } u32 ixgbe_read_reg(struct ixgbe_hw *hw , u32 reg ) ; u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg , int direction , u8 up___0 ) ; s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw , struct ixgbe_dcb_config *dcb_config , int max_frame , u8 direction ) ; s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw , struct ieee_ets *ets , int max_frame ) ; s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw , u8 pfc_en , u8 *prio_tc ) ; s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw , struct ixgbe_dcb_config *dcb_config ) ; extern void dca_register_notify(struct notifier_block * ) ; extern void dca_unregister_notify(struct notifier_block * ) ; extern int dca_add_requester(struct device * ) ; extern int dca_remove_requester(struct device * ) ; extern u8 dca3_get_tag(struct device * , int ) ; __inline static void skb_mark_napi_id(struct sk_buff *skb , struct napi_struct *napi ) { { skb->__annonCompField83.napi_id = napi->napi_id; return; } } __inline static unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring ) { int tmp ; { tmp = constant_test_bit(6L, (unsigned long const volatile *)(& ring->state)); if (tmp != 0) { return (4096U); } else { } return (2048U); } } __inline static unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring ) { int tmp ; { tmp = constant_test_bit(6L, (unsigned long const volatile *)(& ring->state)); if (tmp != 0) { return (1U); } else { } return (0U); } } __inline static void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector ) { { atomic_set(& q_vector->state, 0); return; } } __inline static bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector ) { int rc ; int tmp ; { tmp = atomic_cmpxchg(& q_vector->state, 0, 1); rc = tmp; if (rc != 0) { (q_vector->tx.ring)->stats.yields = (q_vector->tx.ring)->stats.yields + 1ULL; } else { } return (rc == 0); } } __inline static void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector ) { int __ret_warn_on ; int tmp ; long tmp___0 ; { tmp = atomic_read((atomic_t const *)(& q_vector->state)); __ret_warn_on = tmp != 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("/home/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/net/ethernet/intel/ixgbe/ixgbe.h", 429); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((unsigned long )q_vector->napi.gro_list != (unsigned long )((struct sk_buff *)0)) { napi_gro_flush(& q_vector->napi, 0); } else { } atomic_set(& q_vector->state, 0); return; } } __inline static bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector ) { int rc ; int tmp ; { tmp = atomic_cmpxchg(& q_vector->state, 0, 2); rc = tmp; if (rc != 0) { (q_vector->tx.ring)->stats.yields = (q_vector->tx.ring)->stats.yields + 1ULL; } else { } return (rc == 0); } } __inline static void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector ) { int __ret_warn_on ; int tmp ; long tmp___0 ; { tmp = atomic_read((atomic_t const *)(& q_vector->state)); __ret_warn_on = tmp != 2; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("/home/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/net/ethernet/intel/ixgbe/ixgbe.h", 454); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); atomic_set(& q_vector->state, 0); return; } } __inline static bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector ) { int tmp ; { tmp = atomic_read((atomic_t const *)(& q_vector->state)); return (tmp == 2); } } __inline static bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector ) { int rc ; int tmp ; { tmp = atomic_cmpxchg(& q_vector->state, 0, 3); rc = tmp; return (rc == 0); } } __inline static __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc , u32 const stat_err_bits ) { { return (rx_desc->wb.upper.status_error & (__le32 )stat_err_bits); } } __inline static u16 ixgbe_desc_unused(struct ixgbe_ring *ring ) { u16 ntc ; u16 ntu ; { ntc = ring->next_to_clean; ntu = ring->next_to_use; return (((((int )ntc <= (int )ntu ? ring->count : 0U) + (unsigned int )ntc) - (unsigned int )ntu) + 65535U); } } __inline static u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter ) { { switch ((unsigned int )adapter->hw.mac.type) { case 1U: ; case 2U: ; case 3U: ; return (16U); case 4U: ; case 5U: ; return (64U); default: ; return (0U); } } } struct ixgbe_info ixgbe_82598_info ; struct ixgbe_info ixgbe_82599_info ; struct ixgbe_info ixgbe_X540_info ; struct ixgbe_info ixgbe_X550_info ; struct ixgbe_info ixgbe_X550EM_x_info ; struct dcbnl_rtnl_ops const dcbnl_ops ; char ixgbe_driver_name[6U] ; char const ixgbe_driver_version[8U] ; char ixgbe_default_device_descr[39U] ; void ixgbe_up(struct ixgbe_adapter *adapter ) ; void ixgbe_down(struct ixgbe_adapter *adapter ) ; void ixgbe_reinit_locked(struct ixgbe_adapter *adapter ) ; void ixgbe_reset(struct ixgbe_adapter *adapter ) ; void ixgbe_set_ethtool_ops(struct net_device *netdev ) ; int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring ) ; int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring ) ; void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring ) ; void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring ) ; void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter , struct ixgbe_ring *ring ) ; void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter , struct ixgbe_ring *ring ) ; void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter , struct ixgbe_ring *ring ) ; void ixgbe_update_stats(struct ixgbe_adapter *adapter ) ; int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter ) ; int ixgbe_wol_supported(struct ixgbe_adapter *adapter , u16 device_id , u16 subdevice_id ) ; void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter ) ; int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter , u8 *addr , u16 queue ) ; int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter , u8 *addr , u16 queue ) ; void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter ) ; netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb , struct ixgbe_adapter *adapter , struct ixgbe_ring *tx_ring ) ; void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring , struct ixgbe_tx_buffer *tx_buffer ) ; void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring , u16 cleaned_count ) ; void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector ) ; int ixgbe_poll(struct napi_struct *napi , int budget ) ; s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw ) ; s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw , u32 fdirctrl ) ; s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw , u32 fdirctrl ) ; s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw , union ixgbe_atr_hash_dword input , union ixgbe_atr_hash_dword common , u8 queue ) ; s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw , union ixgbe_atr_input *input_mask ) ; s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw , union ixgbe_atr_input *input , u16 soft_id , u8 queue ) ; void ixgbe_set_rx_mode(struct net_device *netdev ) ; void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter ) ; int ixgbe_setup_tc(struct net_device *dev , u8 tc ) ; void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring , u32 vlan_macip_lens , u32 fcoe_sof_eof , u32 type_tucmd , u32 mss_l4len_idx ) ; void ixgbe_do_reset(struct net_device *netdev ) ; void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter ) ; int ixgbe_sysfs_init(struct ixgbe_adapter *adapter ) ; void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter ) ; int ixgbe_fso(struct ixgbe_ring *tx_ring , struct ixgbe_tx_buffer *first , u8 *hdr_len ) ; int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter , union ixgbe_adv_rx_desc *rx_desc , struct sk_buff *skb ) ; int ixgbe_fcoe_ddp_get(struct net_device *netdev , u16 xid , struct scatterlist *sgl , unsigned int sgc ) ; int ixgbe_fcoe_ddp_target(struct net_device *netdev , u16 xid , struct scatterlist *sgl , unsigned int sgc ) ; int ixgbe_fcoe_ddp_put(struct net_device *netdev , u16 xid ) ; int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter ) ; void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter ) ; int ixgbe_fcoe_enable(struct net_device *netdev ) ; int ixgbe_fcoe_disable(struct net_device *netdev ) ; int ixgbe_fcoe_get_wwn(struct net_device *netdev , u64 *wwn , int type ) ; int ixgbe_fcoe_get_hbainfo(struct net_device *netdev , struct netdev_fcoe_hbainfo *info ) ; u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter ) ; void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter ) ; void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter ) ; void ixgbe_dbg_init(void) ; void ixgbe_dbg_exit(void) ; __inline static struct netdev_queue *txring_txq(struct ixgbe_ring const *ring ) { struct netdev_queue *tmp ; { tmp = netdev_get_tx_queue((struct net_device const *)ring->netdev, (unsigned int )ring->queue_index); return (tmp); } } void ixgbe_ptp_init(struct ixgbe_adapter *adapter ) ; void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter ) ; void ixgbe_ptp_stop(struct ixgbe_adapter *adapter ) ; void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter ) ; void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter ) ; void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter , struct sk_buff *skb ) ; int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter , struct ifreq *ifr ) ; int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter , struct ifreq *ifr ) ; void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter ) ; void ixgbe_ptp_reset(struct ixgbe_adapter *adapter ) ; void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter , u32 eicr ) ; void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter ) ; u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter ) ; void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter ) ; void ixgbe_msg_task(struct ixgbe_adapter *adapter ) ; int ixgbe_vf_configuration(struct pci_dev *pdev , unsigned int event_mask ) ; void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter ) ; void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter ) ; int ixgbe_ndo_set_vf_mac(struct net_device *netdev , int vf , u8 *mac ) ; int ixgbe_ndo_set_vf_vlan(struct net_device *netdev , int vf , u16 vlan , u8 qos ) ; int ixgbe_ndo_set_vf_bw(struct net_device *netdev , int vf , int min_tx_rate , int max_tx_rate ) ; int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev , int vf , bool setting ) ; int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev , int vf , bool setting ) ; int ixgbe_ndo_get_vf_config(struct net_device *netdev , int vf , struct ifla_vf_info *ivi ) ; void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter ) ; int ixgbe_disable_sriov(struct ixgbe_adapter *adapter ) ; void ixgbe_enable_sriov(struct ixgbe_adapter *adapter ) ; int ixgbe_pci_sriov_configure(struct pci_dev *dev , int num_vfs ) ; char ixgbe_driver_name[6U] = { 'i', 'x', 'g', 'b', 'e', '\000'}; static char const ixgbe_driver_string[47U] = { 'I', 'n', 't', 'e', 'l', '(', 'R', ')', ' ', '1', '0', ' ', 'G', 'i', 'g', 'a', 'b', 'i', 't', ' ', 'P', 'C', 'I', ' ', 'E', 'x', 'p', 'r', 'e', 's', 's', ' ', 'N', 'e', 't', 'w', 'o', 'r', 'k', ' ', 'D', 'r', 'i', 'v', 'e', 'r', '\000'}; char ixgbe_default_device_descr[39U] = { 'I', 'n', 't', 'e', 'l', '(', 'R', ')', ' ', '1', '0', ' ', 'G', 'i', 'g', 'a', 'b', 'i', 't', ' ', 'N', 'e', 't', 'w', 'o', 'r', 'k', ' ', 'C', 'o', 'n', 'n', 'e', 'c', 't', 'i', 'o', 'n', '\000'}; char const ixgbe_driver_version[8U] = { '4', '.', '0', '.', '1', '-', 'k', '\000'}; static char const ixgbe_copyright[43U] = { 'C', 'o', 'p', 'y', 'r', 'i', 'g', 'h', 't', ' ', '(', 'c', ')', ' ', '1', '9', '9', '9', '-', '2', '0', '1', '4', ' ', 'I', 'n', 't', 'e', 'l', ' ', 'C', 'o', 'r', 'p', 'o', 'r', 'a', 't', 'i', 'o', 'n', '.', '\000'}; static char const ixgbe_overheat_msg[153U] = { 'N', 'e', 't', 'w', 'o', 'r', 'k', ' ', 'a', 'd', 'a', 'p', 't', 'e', 'r', ' ', 'h', 'a', 's', ' ', 'b', 'e', 'e', 'n', ' ', 's', 't', 'o', 'p', 'p', 'e', 'd', ' ', 'b', 'e', 'c', 'a', 'u', 's', 'e', ' ', 'i', 't', ' ', 'h', 'a', 's', ' ', 'o', 'v', 'e', 'r', ' ', 'h', 'e', 'a', 't', 'e', 'd', '.', ' ', 'R', 'e', 's', 't', 'a', 'r', 't', ' ', 't', 'h', 'e', ' ', 'c', 'o', 'm', 'p', 'u', 't', 'e', 'r', '.', ' ', 'I', 'f', ' ', 't', 'h', 'e', ' ', 'p', 'r', 'o', 'b', 'l', 'e', 'm', ' ', 'p', 'e', 'r', 's', 'i', 's', 't', 's', ',', ' ', 'p', 'o', 'w', 'e', 'r', ' ', 'o', 'f', 'f', ' ', 't', 'h', 'e', ' ', 's', 'y', 's', 't', 'e', 'm', ' ', 'a', 'n', 'd', ' ', 'r', 'e', 'p', 'l', 'a', 'c', 'e', ' ', 't', 'h', 'e', ' ', 'a', 'd', 'a', 'p', 't', 'e', 'r', '\000'}; static struct ixgbe_info const *ixgbe_info_tbl[5U] = { (struct ixgbe_info const *)(& ixgbe_82598_info), (struct ixgbe_info const *)(& ixgbe_82599_info), (struct ixgbe_info const *)(& ixgbe_X540_info), (struct ixgbe_info const *)(& ixgbe_X550_info), (struct ixgbe_info const *)(& ixgbe_X550EM_x_info)}; static struct pci_device_id const ixgbe_pci_tbl[35U] = { {32902U, 4278U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4294U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4295U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4296U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5387U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4317U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4332U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4337U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4321U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4340U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4315U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5384U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4343U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 4348U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 5399U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 4347U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 5383U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 5396U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 4345U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 5418U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 5417U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 5404U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 4344U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 5416U, 4294967295U, 4294967295U, 0U, 0U, 2UL}, {32902U, 5453U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 5455U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 5464U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 5463U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 5450U, 4294967295U, 4294967295U, 0U, 0U, 1UL}, {32902U, 5472U, 4294967295U, 4294967295U, 0U, 0U, 2UL}, {32902U, 5475U, 4294967295U, 4294967295U, 0U, 0U, 3UL}, {32902U, 5546U, 4294967295U, 4294967295U, 0U, 0U, 4UL}, {32902U, 5547U, 4294967295U, 4294967295U, 0U, 0U, 4UL}, {32902U, 5549U, 4294967295U, 4294967295U, 0U, 0U, 4UL}, {0U, 0U, 0U, 0U, 0U, 0U, 0UL}}; struct pci_device_id const __mod_pci__ixgbe_pci_tbl_device_table[35U] ; static int ixgbe_notify_dca(struct notifier_block *nb , unsigned long event , void *p ) ; static struct notifier_block dca_notifier = {& ixgbe_notify_dca, (struct notifier_block *)0, 0}; static unsigned int max_vfs ; static unsigned int allow_unsupported_sfp ; static int debug = -1; static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw , struct pci_dev *pdev ) ; static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter , u32 reg , u16 *value ) { struct pci_dev *parent_dev ; struct pci_bus *parent_bus ; bool tmp ; int tmp___0 ; bool tmp___1 ; { parent_bus = ((adapter->pdev)->bus)->parent; if ((unsigned long )parent_bus == (unsigned long )((struct pci_bus *)0)) { return (-1); } else { } parent_dev = parent_bus->self; if ((unsigned long )parent_dev == (unsigned long )((struct pci_dev *)0)) { return (-1); } else { } tmp = pci_is_pcie(parent_dev); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (-1); } else { } pcie_capability_read_word(parent_dev, (int )reg, value); if ((unsigned int )*value == 65535U) { tmp___1 = ixgbe_check_cfg_remove(& adapter->hw, parent_dev); if ((int )tmp___1) { return (-1); } else { } } else { } return (0); } } static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u16 link_status ; int err ; { hw = & adapter->hw; link_status = 0U; hw->bus.type = 3; err = ixgbe_read_pci_cfg_word_parent(adapter, 18U, & link_status); if (err != 0) { return (err); } else { } hw->bus.width = ixgbe_convert_bus_width((int )link_status); hw->bus.speed = ixgbe_convert_bus_speed((int )link_status); return (0); } } __inline static bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw ) { { switch ((int )hw->device_id) { case 5450: ; case 5464: ; return (1); default: ; return (0); } } } static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter , int expected_gts ) { int max_gts ; enum pci_bus_speed speed ; enum pcie_link_width width ; struct pci_dev *pdev ; bool tmp ; int tmp___0 ; { max_gts = 0; speed = 255; width = 255; tmp = ixgbe_pcie_from_parent(& adapter->hw); if ((int )tmp) { pdev = (((adapter->pdev)->bus)->parent)->self; } else { pdev = adapter->pdev; } tmp___0 = pcie_get_minimum_link(pdev, & speed, & width); if ((tmp___0 != 0 || (unsigned int )speed == 255U) || (unsigned int )width == 255U) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "Unable to determine PCI Express bandwidth.\n"); return; } else { } switch ((unsigned int )speed) { case 20U: max_gts = (int )((unsigned int )width * 2U); goto ldv_57265; case 21U: max_gts = (int )((unsigned int )width * 4U); goto ldv_57265; case 22U: max_gts = (int )((unsigned int )width * 8U); goto ldv_57265; default: dev_warn((struct device const *)(& (adapter->pdev)->dev), "Unable to determine PCI Express bandwidth.\n"); return; } ldv_57265: _dev_info((struct device const *)(& (adapter->pdev)->dev), "PCI Express bandwidth of %dGT/s available\n", max_gts); _dev_info((struct device const *)(& (adapter->pdev)->dev), "(Speed:%s, Width: x%d, Encoding Loss:%s)\n", (unsigned int )speed != 22U ? ((unsigned int )speed != 21U ? ((unsigned int )speed == 20U ? (char *)"2.5GT/s" : (char *)"Unknown") : (char *)"5.0GT/s") : (char *)"8.0GT/s", (unsigned int )width, (unsigned int )speed != 20U ? ((unsigned int )speed != 21U ? ((unsigned int )speed == 22U ? (char *)"<2%" : (char *)"Unknown") : (char *)"20%") : (char *)"20%"); if (max_gts < expected_gts) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "This is not sufficient for optimal performance of this card.\n"); dev_warn((struct device const *)(& (adapter->pdev)->dev), "For optimal performance, at least %dGT/s of bandwidth is required.\n", expected_gts); dev_warn((struct device const *)(& (adapter->pdev)->dev), "A slot with more lanes and/or higher speed is suggested.\n"); } else { } return; } } static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter ) { int tmp ; int tmp___0 ; int tmp___1 ; { tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp == 0) { tmp___0 = constant_test_bit(4L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 == 0) { tmp___1 = test_and_set_bit(5L, (unsigned long volatile *)(& adapter->state)); if (tmp___1 == 0) { schedule_work(& adapter->service_task); } else { } } else { } } else { } return; } } static void ixgbe_remove_adapter(struct ixgbe_hw *hw ) { struct ixgbe_adapter *adapter ; int tmp ; { adapter = (struct ixgbe_adapter *)hw->back; if ((unsigned long )hw->hw_addr == (unsigned long )((u8 *)0U)) { return; } else { } hw->hw_addr = (u8 *)0U; dev_err((struct device const *)(& (adapter->pdev)->dev), "Adapter removed\n"); tmp = constant_test_bit(6L, (unsigned long const volatile *)(& adapter->state)); if (tmp != 0) { ixgbe_service_event_schedule(adapter); } else { } return; } } static void ixgbe_check_remove(struct ixgbe_hw *hw , u32 reg ) { u32 value ; { if (reg == 8U) { ixgbe_remove_adapter(hw); return; } else { } value = ixgbe_read_reg(hw, 8U); if (value == 4294967295U) { ixgbe_remove_adapter(hw); } else { } return; } } u32 ixgbe_read_reg(struct ixgbe_hw *hw , u32 reg ) { u8 *reg_addr ; u8 *__var ; u32 value ; bool tmp ; long tmp___0 ; { __var = (u8 *)0U; reg_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ixgbe_removed((void *)reg_addr); if ((int )tmp) { return (4294967295U); } else { } value = readl((void const volatile *)reg_addr + (unsigned long )reg); tmp___0 = ldv__builtin_expect(value == 4294967295U, 0L); if (tmp___0 != 0L) { ixgbe_check_remove(hw, reg); } else { } return (value); } } static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw , struct pci_dev *pdev ) { u16 value ; { pci_read_config_word((struct pci_dev const *)pdev, 0, & value); if ((unsigned int )value == 65535U) { ixgbe_remove_adapter(hw); return (1); } else { } return (0); } } u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw , u32 reg ) { struct ixgbe_adapter *adapter ; u16 value ; bool tmp ; bool tmp___0 ; { adapter = (struct ixgbe_adapter *)hw->back; tmp = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp) { return (65535U); } else { } pci_read_config_word((struct pci_dev const *)adapter->pdev, (int )reg, & value); if ((unsigned int )value == 65535U) { tmp___0 = ixgbe_check_cfg_remove(hw, adapter->pdev); if ((int )tmp___0) { return (65535U); } else { } } else { } return (value); } } static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw , u32 reg ) { struct ixgbe_adapter *adapter ; u32 value ; bool tmp ; bool tmp___0 ; { adapter = (struct ixgbe_adapter *)hw->back; tmp = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp) { return (4294967295U); } else { } pci_read_config_dword((struct pci_dev const *)adapter->pdev, (int )reg, & value); if (value == 4294967295U) { tmp___0 = ixgbe_check_cfg_remove(hw, adapter->pdev); if ((int )tmp___0) { return (4294967295U); } else { } } else { } return (value); } } void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw , u32 reg , u16 value ) { struct ixgbe_adapter *adapter ; bool tmp ; { adapter = (struct ixgbe_adapter *)hw->back; tmp = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp) { return; } else { } pci_write_config_word((struct pci_dev const *)adapter->pdev, (int )reg, (int )value); return; } } static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter ) { int tmp ; long tmp___0 ; { tmp = constant_test_bit(5L, (unsigned long const volatile *)(& adapter->state)); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c"), "i" (421), "i" (12UL)); ldv_57315: ; goto ldv_57315; } else { } __asm__ volatile ("": : : "memory"); clear_bit(5L, (unsigned long volatile *)(& adapter->state)); return; } } static struct ixgbe_reg_info const ixgbe_reg_info_tbl[19U] = { {0U, (char *)"CTRL"}, {8U, (char *)"STATUS"}, {24U, (char *)"CTRL_EXT"}, {2048U, (char *)"EICR"}, {8448U, (char *)"SRRCTL"}, {8704U, (char *)"DRXCTL"}, {4104U, (char *)"RDLEN"}, {4112U, (char *)"RDH"}, {4120U, (char *)"RDT"}, {4136U, (char *)"RXDCTL"}, {4096U, (char *)"RDBAL"}, {4100U, (char *)"RDBAH"}, {24576U, (char *)"TDBAL"}, {24580U, (char *)"TDBAH"}, {24584U, (char *)"TDLEN"}, {24592U, (char *)"TDH"}, {24600U, (char *)"TDT"}, {24616U, (char *)"TXDCTL"}, {0U, (char *)0}}; static void ixgbe_regdump(struct ixgbe_hw *hw , struct ixgbe_reg_info *reginfo ) { int i ; int j ; char rname[16U] ; u32 regs[64U] ; u32 tmp ; { i = 0; j = 0; switch (reginfo->ofs) { case 8448U: i = 0; goto ldv_57330; ldv_57329: regs[i] = ixgbe_read_reg(hw, (u32 )(i <= 15 ? (i + 2112) * 4 : (i <= 63 ? i * 64 + 4116 : (i + -64) * 64 + 53268))); i = i + 1; ldv_57330: ; if (i <= 63) { goto ldv_57329; } else { } goto ldv_57332; case 8704U: i = 0; goto ldv_57335; ldv_57334: regs[i] = ixgbe_read_reg(hw, (u32 )(i <= 15 ? (i + 2176) * 4 : (i <= 63 ? i * 64 + 4108 : (i + -64) * 64 + 53260))); i = i + 1; ldv_57335: ; if (i <= 63) { goto ldv_57334; } else { } goto ldv_57332; case 4104U: i = 0; goto ldv_57339; ldv_57338: regs[i] = ixgbe_read_reg(hw, (u32 )(i <= 63 ? i * 64 + 4104 : (i + -64) * 64 + 53256)); i = i + 1; ldv_57339: ; if (i <= 63) { goto ldv_57338; } else { } goto ldv_57332; case 4112U: i = 0; goto ldv_57343; ldv_57342: regs[i] = ixgbe_read_reg(hw, (u32 )(i <= 63 ? i * 64 + 4112 : (i + -64) * 64 + 53264)); i = i + 1; ldv_57343: ; if (i <= 63) { goto ldv_57342; } else { } goto ldv_57332; case 4120U: i = 0; goto ldv_57347; ldv_57346: regs[i] = ixgbe_read_reg(hw, (u32 )(i <= 63 ? i * 64 + 4120 : (i + -64) * 64 + 53272)); i = i + 1; ldv_57347: ; if (i <= 63) { goto ldv_57346; } else { } goto ldv_57332; case 4136U: i = 0; goto ldv_57351; ldv_57350: regs[i] = ixgbe_read_reg(hw, (u32 )(i <= 63 ? i * 64 + 4136 : (i + -64) * 64 + 53288)); i = i + 1; ldv_57351: ; if (i <= 63) { goto ldv_57350; } else { } goto ldv_57332; case 4096U: i = 0; goto ldv_57355; ldv_57354: regs[i] = ixgbe_read_reg(hw, (u32 )(i <= 63 ? (i + 64) * 64 : (i + 768) * 64)); i = i + 1; ldv_57355: ; if (i <= 63) { goto ldv_57354; } else { } goto ldv_57332; case 4100U: i = 0; goto ldv_57359; ldv_57358: regs[i] = ixgbe_read_reg(hw, (u32 )(i <= 63 ? i * 64 + 4100 : (i + -64) * 64 + 53252)); i = i + 1; ldv_57359: ; if (i <= 63) { goto ldv_57358; } else { } goto ldv_57332; case 24576U: i = 0; goto ldv_57363; ldv_57362: regs[i] = ixgbe_read_reg(hw, (u32 )((i + 384) * 64)); i = i + 1; ldv_57363: ; if (i <= 63) { goto ldv_57362; } else { } goto ldv_57332; case 24580U: i = 0; goto ldv_57367; ldv_57366: regs[i] = ixgbe_read_reg(hw, (u32 )(i * 64 + 24580)); i = i + 1; ldv_57367: ; if (i <= 63) { goto ldv_57366; } else { } goto ldv_57332; case 24584U: i = 0; goto ldv_57371; ldv_57370: regs[i] = ixgbe_read_reg(hw, (u32 )(i * 64 + 24584)); i = i + 1; ldv_57371: ; if (i <= 63) { goto ldv_57370; } else { } goto ldv_57332; case 24592U: i = 0; goto ldv_57375; ldv_57374: regs[i] = ixgbe_read_reg(hw, (u32 )(i * 64 + 24592)); i = i + 1; ldv_57375: ; if (i <= 63) { goto ldv_57374; } else { } goto ldv_57332; case 24600U: i = 0; goto ldv_57379; ldv_57378: regs[i] = ixgbe_read_reg(hw, (u32 )(i * 64 + 24600)); i = i + 1; ldv_57379: ; if (i <= 63) { goto ldv_57378; } else { } goto ldv_57332; case 24616U: i = 0; goto ldv_57383; ldv_57382: regs[i] = ixgbe_read_reg(hw, (u32 )(i * 64 + 24616)); i = i + 1; ldv_57383: ; if (i <= 63) { goto ldv_57382; } else { } goto ldv_57332; default: tmp = ixgbe_read_reg(hw, reginfo->ofs); printk("\016ixgbe: %-15s %08x\n", reginfo->name, tmp); return; } ldv_57332: i = 0; goto ldv_57390; ldv_57389: snprintf((char *)(& rname), 16UL, "%s[%d-%d]", reginfo->name, i * 8, i * 8 + 7); printk("\vixgbe: %-15s", (char *)(& rname)); j = 0; goto ldv_57387; ldv_57386: printk(" %08x", regs[i * 8 + j]); j = j + 1; ldv_57387: ; if (j <= 7) { goto ldv_57386; } else { } printk("\n"); i = i + 1; ldv_57390: ; if (i <= 7) { goto ldv_57389; } else { } return; } } static void ixgbe_dump(struct ixgbe_adapter *adapter ) { struct net_device *netdev ; struct ixgbe_hw *hw ; struct ixgbe_reg_info *reginfo ; int n ; struct ixgbe_ring *tx_ring ; struct ixgbe_tx_buffer *tx_buffer ; union ixgbe_adv_tx_desc *tx_desc ; struct my_u0 *u0 ; struct ixgbe_ring *rx_ring ; union ixgbe_adv_rx_desc *rx_desc ; struct ixgbe_rx_buffer *rx_buffer_info ; u32 staterr ; int i ; bool tmp ; int tmp___0 ; unsigned int tmp___1 ; void *tmp___2 ; { netdev = adapter->netdev; hw = & adapter->hw; n = 0; i = 0; if (((int )adapter->msg_enable & 8192) == 0) { return; } else { } if ((unsigned long )netdev != (unsigned long )((struct net_device *)0)) { _dev_info((struct device const *)(& (adapter->pdev)->dev), "Net device Info\n"); printk("\016ixgbe: Device Name state trans_start last_rx\n"); printk("\016ixgbe: %-15s %016lX %016lX %016lX\n", (char *)(& netdev->name), netdev->state, netdev->trans_start, netdev->last_rx); } else { } _dev_info((struct device const *)(& (adapter->pdev)->dev), "Register Dump\n"); printk("\016ixgbe: Register Name Value\n"); reginfo = (struct ixgbe_reg_info *)(& ixgbe_reg_info_tbl); goto ldv_57412; ldv_57411: ixgbe_regdump(hw, reginfo); reginfo = reginfo + 1; ldv_57412: ; if ((unsigned long )reginfo->name != (unsigned long )((char *)0)) { goto ldv_57411; } else { } if ((unsigned long )netdev == (unsigned long )((struct net_device *)0)) { return; } else { tmp = netif_running((struct net_device const *)netdev); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } } _dev_info((struct device const *)(& (adapter->pdev)->dev), "TX Rings Summary\n"); printk("\016ixgbe: %s %s %s %s\n", (char *)"Queue [NTU] [NTC] [bi(ntc)->dma ]", (char *)"leng", (char *)"ntw", (char *)"timestamp"); n = 0; goto ldv_57415; ldv_57414: tx_ring = adapter->tx_ring[n]; tx_buffer = tx_ring->__annonCompField118.tx_buffer_info + (unsigned long )tx_ring->next_to_clean; printk("\016ixgbe: %5d %5X %5X %016llX %08X %p %016llX\n", n, (int )tx_ring->next_to_use, (int )tx_ring->next_to_clean, tx_buffer->dma, tx_buffer->len, tx_buffer->next_to_watch, (unsigned long long )tx_buffer->time_stamp); n = n + 1; ldv_57415: ; if (adapter->num_tx_queues > n) { goto ldv_57414; } else { } if (((int )adapter->msg_enable & 1024) == 0) { goto rx_ring_summary; } else { } _dev_info((struct device const *)(& (adapter->pdev)->dev), "TX Rings Dump\n"); n = 0; goto ldv_57422; ldv_57421: tx_ring = adapter->tx_ring[n]; printk("\016ixgbe: ------------------------------------\n"); printk("\016ixgbe: TX QUEUE INDEX = %d\n", (int )tx_ring->queue_index); printk("\016ixgbe: ------------------------------------\n"); printk("\016ixgbe: %s%s %s %s %s %s\n", (char *)"T [desc] [address 63:0 ] ", (char *)"[PlPOIdStDDt Ln] [bi->dma ] ", (char *)"leng", (char *)"ntw", (char *)"timestamp", (char *)"bi->skb"); i = 0; goto ldv_57419; ldv_57418: tx_desc = (union ixgbe_adv_tx_desc *)tx_ring->desc + (unsigned long )i; tx_buffer = tx_ring->__annonCompField118.tx_buffer_info + (unsigned long )i; u0 = (struct my_u0 *)tx_desc; if (tx_buffer->len != 0U) { printk("\016ixgbe: T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p", i, u0->a, u0->b, tx_buffer->dma, tx_buffer->len, tx_buffer->next_to_watch, (unsigned long long )tx_buffer->time_stamp, tx_buffer->skb); if ((int )tx_ring->next_to_use == i && (int )tx_ring->next_to_clean == i) { printk(" NTC/U\n"); } else if ((int )tx_ring->next_to_use == i) { printk(" NTU\n"); } else if ((int )tx_ring->next_to_clean == i) { printk(" NTC\n"); } else { printk("\n"); } if (((int )adapter->msg_enable & 4096) != 0 && (unsigned long )tx_buffer->skb != (unsigned long )((struct sk_buff *)0)) { print_hex_dump("\016", "", 1, 16, 1, (void const *)(tx_buffer->skb)->data, (size_t )tx_buffer->len, 1); } else { } } else { } i = i + 1; ldv_57419: ; if ((unsigned long )tx_ring->desc != (unsigned long )((void *)0) && (int )tx_ring->count > i) { goto ldv_57418; } else { } n = n + 1; ldv_57422: ; if (adapter->num_tx_queues > n) { goto ldv_57421; } else { } rx_ring_summary: _dev_info((struct device const *)(& (adapter->pdev)->dev), "RX Rings Summary\n"); printk("\016ixgbe: Queue [NTU] [NTC]\n"); n = 0; goto ldv_57425; ldv_57424: rx_ring = adapter->rx_ring[n]; printk("\016ixgbe: %5d %5X %5X\n", n, (int )rx_ring->next_to_use, (int )rx_ring->next_to_clean); n = n + 1; ldv_57425: ; if (adapter->num_rx_queues > n) { goto ldv_57424; } else { } if (((int )adapter->msg_enable & 2048) == 0) { return; } else { } _dev_info((struct device const *)(& (adapter->pdev)->dev), "RX Rings Dump\n"); n = 0; goto ldv_57431; ldv_57430: rx_ring = adapter->rx_ring[n]; printk("\016ixgbe: ------------------------------------\n"); printk("\016ixgbe: RX QUEUE INDEX = %d\n", (int )rx_ring->queue_index); printk("\016ixgbe: ------------------------------------\n"); printk("\016ixgbe: %s%s%s", (char *)"R [desc] [ PktBuf A0] ", (char *)"[ HeadBuf DD] [bi->dma ] [bi->skb ] ", (char *)"<-- Adv Rx Read format\n"); printk("\016ixgbe: %s%s%s", (char *)"RWB[desc] [PcsmIpSHl PtRs] ", (char *)"[vl er S cks ln] ---------------- [bi->skb ] ", (char *)"<-- Adv Rx Write-Back format\n"); i = 0; goto ldv_57428; ldv_57427: rx_buffer_info = rx_ring->__annonCompField118.rx_buffer_info + (unsigned long )i; rx_desc = (union ixgbe_adv_rx_desc *)rx_ring->desc + (unsigned long )i; u0 = (struct my_u0 *)rx_desc; staterr = rx_desc->wb.upper.status_error; if ((int )staterr & 1) { printk("\016ixgbe: RWB[0x%03X] %016llX %016llX ---------------- %p", i, u0->a, u0->b, rx_buffer_info->skb); } else { printk("\016ixgbe: R [0x%03X] %016llX %016llX %016llX %p", i, u0->a, u0->b, rx_buffer_info->dma, rx_buffer_info->skb); if (((int )adapter->msg_enable & 4096) != 0 && rx_buffer_info->dma != 0ULL) { tmp___1 = ixgbe_rx_bufsz(rx_ring); tmp___2 = lowmem_page_address((struct page const *)rx_buffer_info->page); print_hex_dump("\016", "", 1, 16, 1, (void const *)tmp___2 + (unsigned long )rx_buffer_info->page_offset, (size_t )tmp___1, 1); } else { } } if ((int )rx_ring->next_to_use == i) { printk(" NTU\n"); } else if ((int )rx_ring->next_to_clean == i) { printk(" NTC\n"); } else { printk("\n"); } i = i + 1; ldv_57428: ; if ((int )rx_ring->count > i) { goto ldv_57427; } else { } n = n + 1; ldv_57431: ; if (adapter->num_rx_queues > n) { goto ldv_57430; } else { } return; } } static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter ) { u32 ctrl_ext ; { ctrl_ext = ixgbe_read_reg(& adapter->hw, 24U); ixgbe_write_reg(& adapter->hw, 24U, ctrl_ext & 4026531839U); return; } } static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter ) { u32 ctrl_ext ; { ctrl_ext = ixgbe_read_reg(& adapter->hw, 24U); ixgbe_write_reg(& adapter->hw, 24U, ctrl_ext | 268435456U); return; } } static void ixgbe_set_ivar(struct ixgbe_adapter *adapter , s8 direction , u8 queue , u8 msix_vector ) { u32 ivar ; u32 index ; struct ixgbe_hw *hw ; { hw = & adapter->hw; switch ((unsigned int )hw->mac.type) { case 1U: msix_vector = (u8 )((unsigned int )msix_vector | 128U); if ((int )direction == -1) { direction = 0; } else { } index = (u32 )(((int )direction * 64 + (int )queue) >> 2) & 31U; ivar = ixgbe_read_reg(hw, (index + 576U) * 4U); ivar = (u32 )(~ (255 << ((int )queue & 3) * 8)) & ivar; ivar = (u32 )((int )msix_vector << ((int )queue & 3) * 8) | ivar; ixgbe_write_reg(hw, (index + 576U) * 4U, ivar); goto ldv_57451; case 2U: ; case 3U: ; case 4U: ; case 5U: ; if ((int )direction == -1) { msix_vector = (u8 )((unsigned int )msix_vector | 128U); index = (u32 )(((int )queue & 1) * 8); ivar = ixgbe_read_reg(& adapter->hw, 2560U); ivar = (u32 )(~ (255 << (int )index)) & ivar; ivar = (u32 )((int )msix_vector << (int )index) | ivar; ixgbe_write_reg(& adapter->hw, 2560U, ivar); goto ldv_57451; } else { msix_vector = (u8 )((unsigned int )msix_vector | 128U); index = (u32 )((((int )queue & 1) * 2 + (int )direction) * 8); ivar = ixgbe_read_reg(hw, (u32 )((((int )queue >> 1) + 576) * 4)); ivar = (u32 )(~ (255 << (int )index)) & ivar; ivar = (u32 )((int )msix_vector << (int )index) | ivar; ixgbe_write_reg(hw, (u32 )((((int )queue >> 1) + 576) * 4), ivar); goto ldv_57451; } default: ; goto ldv_57451; } ldv_57451: ; return; } } __inline static void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter , u64 qmask ) { u32 mask ; { switch ((unsigned int )adapter->hw.mac.type) { case 1U: mask = (u32 )qmask & 65535U; ixgbe_write_reg(& adapter->hw, 2056U, mask); goto ldv_57463; case 2U: ; case 3U: ; case 4U: ; case 5U: mask = (u32 )qmask; ixgbe_write_reg(& adapter->hw, 2704U, mask); mask = (u32 )(qmask >> 32); ixgbe_write_reg(& adapter->hw, 2708U, mask); goto ldv_57463; default: ; goto ldv_57463; } ldv_57463: ; return; } } void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring , struct ixgbe_tx_buffer *tx_buffer ) { { if ((unsigned long )tx_buffer->skb != (unsigned long )((struct sk_buff *)0)) { dev_kfree_skb_any(tx_buffer->skb); if (tx_buffer->len != 0U) { dma_unmap_single_attrs(ring->dev, tx_buffer->dma, (size_t )tx_buffer->len, 1, (struct dma_attrs *)0); } else { } } else if (tx_buffer->len != 0U) { dma_unmap_page(ring->dev, tx_buffer->dma, (size_t )tx_buffer->len, 1); } else { } tx_buffer->next_to_watch = (union ixgbe_adv_tx_desc *)0; tx_buffer->skb = (struct sk_buff *)0; tx_buffer->len = 0U; return; } } static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; struct ixgbe_hw_stats *hwstats ; int i ; u32 data ; { hw = & adapter->hw; hwstats = & adapter->stats; if ((unsigned int )hw->fc.current_mode != 3U && (unsigned int )hw->fc.current_mode != 1U) { return; } else { } switch ((unsigned int )hw->mac.type) { case 1U: data = ixgbe_read_reg(hw, 53096U); goto ldv_57481; default: data = ixgbe_read_reg(hw, 16808U); } ldv_57481: hwstats->lxoffrxc = hwstats->lxoffrxc + (u64 )data; if (data == 0U) { return; } else { } i = 0; goto ldv_57484; ldv_57483: clear_bit(3L, (unsigned long volatile *)(& (adapter->tx_ring[i])->state)); i = i + 1; ldv_57484: ; if (adapter->num_tx_queues > i) { goto ldv_57483; } else { } return; } } static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; struct ixgbe_hw_stats *hwstats ; u32 xoff[8U] ; unsigned int tmp ; u8 tc ; int i ; bool pfc_en ; u32 pxoffrxc ; int tmp___0 ; struct ixgbe_ring *tx_ring ; { hw = & adapter->hw; hwstats = & adapter->stats; xoff[0] = 0U; tmp = 1U; while (1) { if (tmp >= 8U) { break; } else { } xoff[tmp] = 0U; tmp = tmp + 1U; } pfc_en = adapter->dcb_cfg.pfc_mode_enable; if ((unsigned long )adapter->ixgbe_ieee_pfc != (unsigned long )((struct ieee_pfc *)0)) { pfc_en = ((int )pfc_en | ((unsigned int )(adapter->ixgbe_ieee_pfc)->pfc_en != 0U)) != 0; } else { } if ((adapter->flags & 4096U) == 0U || ! pfc_en) { ixgbe_update_xoff_rx_lfc(adapter); return; } else { } i = 0; goto ldv_57500; ldv_57499: ; switch ((unsigned int )hw->mac.type) { case 1U: pxoffrxc = ixgbe_read_reg(hw, (u32 )((i + 13256) * 4)); goto ldv_57497; default: pxoffrxc = ixgbe_read_reg(hw, (u32 )((i + 4184) * 4)); } ldv_57497: hwstats->pxoffrxc[i] = hwstats->pxoffrxc[i] + (u64 )pxoffrxc; tmp___0 = netdev_get_prio_tc_map((struct net_device const *)adapter->netdev, (u32 )i); tc = (u8 )tmp___0; xoff[(int )tc] = xoff[(int )tc] + pxoffrxc; i = i + 1; ldv_57500: ; if (((adapter->flags & 4096U) != 0U ? 8 : 1) > i) { goto ldv_57499; } else { } i = 0; goto ldv_57504; ldv_57503: tx_ring = adapter->tx_ring[i]; tc = tx_ring->dcb_tc; if (xoff[(int )tc] != 0U) { clear_bit(3L, (unsigned long volatile *)(& tx_ring->state)); } else { } i = i + 1; ldv_57504: ; if (adapter->num_tx_queues > i) { goto ldv_57503; } else { } return; } } static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring ) { { return (ring->stats.packets); } } static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring ) { struct ixgbe_adapter *adapter ; struct ixgbe_hw *hw ; u32 head ; u32 tail ; void *tmp ; { if ((unsigned long )ring->l2_accel_priv != (unsigned long )((struct ixgbe_fwd_adapter *)0)) { adapter = (ring->l2_accel_priv)->real_adapter; } else { tmp = netdev_priv((struct net_device const *)ring->netdev); adapter = (struct ixgbe_adapter *)tmp; } hw = & adapter->hw; head = ixgbe_read_reg(hw, (u32 )((int )ring->reg_idx * 64 + 24592)); tail = ixgbe_read_reg(hw, (u32 )((int )ring->reg_idx * 64 + 24600)); if (head != tail) { return ((u64 )(head < tail ? tail - head : ((u32 )ring->count + tail) - head)); } else { } return (0ULL); } } __inline static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring ) { u32 tx_done ; u64 tmp ; u32 tx_done_old ; u32 tx_pending ; u64 tmp___0 ; int tmp___1 ; { tmp = ixgbe_get_tx_completed(tx_ring); tx_done = (u32 )tmp; tx_done_old = (u32 )tx_ring->__annonCompField121.tx_stats.tx_done_old; tmp___0 = ixgbe_get_tx_pending(tx_ring); tx_pending = (u32 )tmp___0; clear_bit(2L, (unsigned long volatile *)(& tx_ring->state)); if (tx_done_old == tx_done && tx_pending != 0U) { tmp___1 = test_and_set_bit(3L, (unsigned long volatile *)(& tx_ring->state)); return (tmp___1 != 0); } else { } tx_ring->__annonCompField121.tx_stats.tx_done_old = (u64 )tx_done; clear_bit(3L, (unsigned long volatile *)(& tx_ring->state)); return (0); } } static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter ) { int tmp ; { tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp == 0) { adapter->flags2 = adapter->flags2 | 64U; if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "initiating reset due to tx timeout\n"); } else { } ixgbe_service_event_schedule(adapter); } else { } return; } } static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector , struct ixgbe_ring *tx_ring ) { struct ixgbe_adapter *adapter ; struct ixgbe_tx_buffer *tx_buffer ; union ixgbe_adv_tx_desc *tx_desc ; unsigned int total_bytes ; unsigned int total_packets ; unsigned int budget ; unsigned int i ; int tmp ; union ixgbe_adv_tx_desc *eop_desc ; long tmp___0 ; long tmp___1 ; long tmp___2 ; struct ixgbe_hw *hw ; u32 tmp___3 ; u32 tmp___4 ; int tmp___5 ; bool tmp___6 ; struct netdev_queue *tmp___7 ; bool tmp___8 ; int tmp___9 ; bool tmp___10 ; u16 tmp___11 ; int tmp___12 ; long tmp___13 ; { adapter = q_vector->adapter; total_bytes = 0U; total_packets = 0U; budget = (unsigned int )q_vector->tx.work_limit; i = (unsigned int )tx_ring->next_to_clean; tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp != 0) { return (1); } else { } tx_buffer = tx_ring->__annonCompField118.tx_buffer_info + (unsigned long )i; tx_desc = (union ixgbe_adv_tx_desc *)tx_ring->desc + (unsigned long )i; i = i - (unsigned int )tx_ring->count; ldv_57541: eop_desc = tx_buffer->next_to_watch; if ((unsigned long )eop_desc == (unsigned long )((union ixgbe_adv_tx_desc *)0)) { goto ldv_57537; } else { } if ((eop_desc->wb.status & 1U) == 0U) { goto ldv_57537; } else { } tx_buffer->next_to_watch = (union ixgbe_adv_tx_desc *)0; total_bytes = tx_buffer->bytecount + total_bytes; total_packets = (unsigned int )tx_buffer->gso_segs + total_packets; dev_consume_skb_any(tx_buffer->skb); dma_unmap_single_attrs(tx_ring->dev, tx_buffer->dma, (size_t )tx_buffer->len, 1, (struct dma_attrs *)0); tx_buffer->skb = (struct sk_buff *)0; tx_buffer->len = 0U; goto ldv_57539; ldv_57538: tx_buffer = tx_buffer + 1; tx_desc = tx_desc + 1; i = i + 1U; tmp___0 = ldv__builtin_expect(i == 0U, 0L); if (tmp___0 != 0L) { i = i - (unsigned int )tx_ring->count; tx_buffer = tx_ring->__annonCompField118.tx_buffer_info; tx_desc = (union ixgbe_adv_tx_desc *)tx_ring->desc; } else { } if (tx_buffer->len != 0U) { dma_unmap_page(tx_ring->dev, tx_buffer->dma, (size_t )tx_buffer->len, 1); tx_buffer->len = 0U; } else { } ldv_57539: ; if ((unsigned long )tx_desc != (unsigned long )eop_desc) { goto ldv_57538; } else { } tx_buffer = tx_buffer + 1; tx_desc = tx_desc + 1; i = i + 1U; tmp___1 = ldv__builtin_expect(i == 0U, 0L); if (tmp___1 != 0L) { i = i - (unsigned int )tx_ring->count; tx_buffer = tx_ring->__annonCompField118.tx_buffer_info; tx_desc = (union ixgbe_adv_tx_desc *)tx_ring->desc; } else { } __builtin_prefetch((void const *)tx_desc); budget = budget - 1U; tmp___2 = ldv__builtin_expect(budget != 0U, 1L); if (tmp___2 != 0L) { goto ldv_57541; } else { } ldv_57537: i = (unsigned int )tx_ring->count + i; tx_ring->next_to_clean = (u16 )i; u64_stats_init(& tx_ring->syncp); tx_ring->stats.bytes = tx_ring->stats.bytes + (u64 )total_bytes; tx_ring->stats.packets = tx_ring->stats.packets + (u64 )total_packets; u64_stats_init(& tx_ring->syncp); q_vector->tx.total_bytes = q_vector->tx.total_bytes + total_bytes; q_vector->tx.total_packets = q_vector->tx.total_packets + total_packets; tmp___5 = constant_test_bit(2L, (unsigned long const volatile *)(& tx_ring->state)); if (tmp___5 != 0) { tmp___6 = ixgbe_check_tx_hang(tx_ring); if ((int )tmp___6) { hw = & adapter->hw; if ((int )adapter->msg_enable & 1) { tmp___3 = ixgbe_read_reg(hw, (u32 )((int )tx_ring->reg_idx * 64 + 24600)); tmp___4 = ixgbe_read_reg(hw, (u32 )((int )tx_ring->reg_idx * 64 + 24592)); netdev_err((struct net_device const *)adapter->netdev, "Detected Tx Unit Hang\n Tx Queue <%d>\n TDH, TDT <%x>, <%x>\n next_to_use <%x>\n next_to_clean <%x>\ntx_buffer_info[next_to_clean]\n time_stamp <%lx>\n jiffies <%lx>\n", (int )tx_ring->queue_index, tmp___4, tmp___3, (int )tx_ring->next_to_use, i, (tx_ring->__annonCompField118.tx_buffer_info + (unsigned long )i)->time_stamp, jiffies); } else { } netif_stop_subqueue(tx_ring->netdev, (int )tx_ring->queue_index); if (((int )adapter->msg_enable & 2) != 0) { netdev_info((struct net_device const *)adapter->netdev, "tx hang %d detected on queue %d, resetting adapter\n", adapter->tx_timeout_count + 1U, (int )tx_ring->queue_index); } else { } ixgbe_tx_timeout_reset(adapter); return (1); } else { } } else { } tmp___7 = txring_txq((struct ixgbe_ring const *)tx_ring); netdev_tx_completed_queue(tmp___7, total_packets, total_bytes); if (total_packets != 0U) { tmp___10 = netif_carrier_ok((struct net_device const *)tx_ring->netdev); if ((int )tmp___10) { tmp___11 = ixgbe_desc_unused(tx_ring); if ((unsigned int )tmp___11 > 41U) { tmp___12 = 1; } else { tmp___12 = 0; } } else { tmp___12 = 0; } } else { tmp___12 = 0; } tmp___13 = ldv__builtin_expect((long )tmp___12, 0L); if (tmp___13 != 0L) { __asm__ volatile ("mfence": : : "memory"); tmp___8 = __netif_subqueue_stopped((struct net_device const *)tx_ring->netdev, (int )tx_ring->queue_index); if ((int )tmp___8) { tmp___9 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___9 == 0) { netif_wake_subqueue(tx_ring->netdev, (int )tx_ring->queue_index); tx_ring->__annonCompField121.tx_stats.restart_queue = tx_ring->__annonCompField121.tx_stats.restart_queue + 1ULL; } else { } } else { } } else { } return (budget != 0U); } } static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter , struct ixgbe_ring *tx_ring , int cpu ) { struct ixgbe_hw *hw ; u32 txctrl ; u8 tmp ; u16 reg_offset ; { hw = & adapter->hw; tmp = dca3_get_tag(tx_ring->dev, cpu); txctrl = (u32 )tmp; switch ((unsigned int )hw->mac.type) { case 1U: reg_offset = (unsigned int )((u16 )((int )tx_ring->reg_idx + 7296)) * 4U; goto ldv_57552; case 2U: ; case 3U: reg_offset = (unsigned int )((u16 )tx_ring->reg_idx) * 64U + 24588U; txctrl = txctrl << 24; goto ldv_57552; default: ; return; } ldv_57552: txctrl = txctrl | 8736U; ixgbe_write_reg(hw, (u32 )reg_offset, txctrl); return; } } static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter , struct ixgbe_ring *rx_ring , int cpu ) { struct ixgbe_hw *hw ; u32 rxctrl ; u8 tmp ; u8 reg_idx ; { hw = & adapter->hw; tmp = dca3_get_tag(rx_ring->dev, cpu); rxctrl = (u32 )tmp; reg_idx = rx_ring->reg_idx; switch ((unsigned int )hw->mac.type) { case 2U: ; case 3U: rxctrl = rxctrl << 24; goto ldv_57566; default: ; goto ldv_57566; } ldv_57566: rxctrl = rxctrl | 544U; ixgbe_write_reg(hw, (u32 )((unsigned int )reg_idx <= 15U ? ((int )reg_idx + 2176) * 4 : ((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4108 : ((int )reg_idx + -64) * 64 + 53260)), rxctrl); return; } } static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector ) { struct ixgbe_adapter *adapter ; struct ixgbe_ring *ring ; int cpu ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; { adapter = q_vector->adapter; __preempt_count_add(1); __asm__ volatile ("": : : "memory"); __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_57579; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_57579; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_57579; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_57579; default: __bad_percpu_size(); } ldv_57579: pscr_ret__ = pfo_ret__; goto ldv_57585; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_57589; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_57589; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_57589; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_57589; default: __bad_percpu_size(); } ldv_57589: pscr_ret__ = pfo_ret_____0; goto ldv_57585; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_57598; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_57598; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_57598; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_57598; default: __bad_percpu_size(); } ldv_57598: pscr_ret__ = pfo_ret_____1; goto ldv_57585; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_57607; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_57607; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_57607; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_57607; default: __bad_percpu_size(); } ldv_57607: pscr_ret__ = pfo_ret_____2; goto ldv_57585; default: __bad_size_call_parameter(); goto ldv_57585; } ldv_57585: cpu = pscr_ret__; if (q_vector->cpu == cpu) { goto out_no_update; } else { } ring = q_vector->tx.ring; goto ldv_57618; ldv_57617: ixgbe_update_tx_dca(adapter, ring, cpu); ring = ring->next; ldv_57618: ; if ((unsigned long )ring != (unsigned long )((struct ixgbe_ring *)0)) { goto ldv_57617; } else { } ring = q_vector->rx.ring; goto ldv_57621; ldv_57620: ixgbe_update_rx_dca(adapter, ring, cpu); ring = ring->next; ldv_57621: ; if ((unsigned long )ring != (unsigned long )((struct ixgbe_ring *)0)) { goto ldv_57620; } else { } q_vector->cpu = cpu; out_no_update: __asm__ volatile ("": : : "memory"); __preempt_count_sub(1); return; } } static void ixgbe_setup_dca(struct ixgbe_adapter *adapter ) { int i ; { if ((adapter->flags & 256U) == 0U) { return; } else { } ixgbe_write_reg(& adapter->hw, 69748U, 2U); i = 0; goto ldv_57628; ldv_57627: (adapter->q_vector[i])->cpu = -1; ixgbe_update_dca(adapter->q_vector[i]); i = i + 1; ldv_57628: ; if (adapter->num_q_vectors > i) { goto ldv_57627; } else { } return; } } static int __ixgbe_notify_dca(struct device *dev , void *data ) { struct ixgbe_adapter *adapter ; void *tmp ; unsigned long event ; int tmp___0 ; { tmp = dev_get_drvdata((struct device const *)dev); adapter = (struct ixgbe_adapter *)tmp; event = *((unsigned long *)data); if ((adapter->flags & 512U) == 0U) { return (0); } else { } switch (event) { case 1UL: ; if ((adapter->flags & 256U) != 0U) { goto ldv_57637; } else { } tmp___0 = dca_add_requester(dev); if (tmp___0 == 0) { adapter->flags = adapter->flags | 256U; ixgbe_setup_dca(adapter); goto ldv_57637; } else { } case 2UL: ; if ((adapter->flags & 256U) != 0U) { dca_remove_requester(dev); adapter->flags = adapter->flags & 4294967039U; ixgbe_write_reg(& adapter->hw, 69748U, 1U); } else { } goto ldv_57637; } ldv_57637: ; return (0); } } __inline static void ixgbe_rx_hash(struct ixgbe_ring *ring , union ixgbe_adv_rx_desc *rx_desc , struct sk_buff *skb ) { { if (((ring->netdev)->features & 8589934592ULL) != 0ULL) { skb_set_hash(skb, rx_desc->wb.lower.hi_dword.rss, 2); } else { } return; } } __inline static bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring , union ixgbe_adv_rx_desc *rx_desc ) { __le16 pkt_info ; int tmp ; { pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; tmp = constant_test_bit(6L, (unsigned long const volatile *)(& ring->state)); return ((bool )(tmp != 0 && ((int )pkt_info & 112) == 32)); } } __inline static void ixgbe_rx_checksum(struct ixgbe_ring *ring , union ixgbe_adv_rx_desc *rx_desc , struct sk_buff *skb ) { __le16 pkt_info ; __le16 hdr_info ; bool encap_pkt ; __le32 tmp ; __le32 tmp___0 ; __le32 tmp___1 ; int tmp___2 ; __le32 tmp___3 ; __le32 tmp___4 ; __le32 tmp___5 ; { pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; encap_pkt = 0; skb_checksum_none_assert((struct sk_buff const *)skb); if (((ring->netdev)->features & 17179869184ULL) == 0ULL) { return; } else { } if (((int )pkt_info & 2048) != 0 && (int )hdr_info & 1) { encap_pkt = 1; skb->encapsulation = 1U; skb->ip_summed = 0U; } else { } tmp = ixgbe_test_staterr(rx_desc, 64U); if (tmp != 0U) { tmp___0 = ixgbe_test_staterr(rx_desc, 2147483648U); if (tmp___0 != 0U) { ring->__annonCompField121.rx_stats.csum_err = ring->__annonCompField121.rx_stats.csum_err + 1ULL; return; } else { } } else { } tmp___1 = ixgbe_test_staterr(rx_desc, 32U); if (tmp___1 == 0U) { return; } else { } tmp___3 = ixgbe_test_staterr(rx_desc, 1073741824U); if (tmp___3 != 0U) { if (((int )pkt_info & 512) != 0) { tmp___2 = constant_test_bit(5L, (unsigned long const volatile *)(& ring->state)); if (tmp___2 != 0) { return; } else { } } else { } ring->__annonCompField121.rx_stats.csum_err = ring->__annonCompField121.rx_stats.csum_err + 1ULL; return; } else { } skb->ip_summed = 1U; if ((int )encap_pkt) { tmp___4 = ixgbe_test_staterr(rx_desc, 256U); if (tmp___4 == 0U) { return; } else { } tmp___5 = ixgbe_test_staterr(rx_desc, 67108864U); if (tmp___5 != 0U) { ring->__annonCompField121.rx_stats.csum_err = ring->__annonCompField121.rx_stats.csum_err + 1ULL; return; } else { } skb->csum_level = 1U; } else { } return; } } static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring , struct ixgbe_rx_buffer *bi ) { struct page *page ; dma_addr_t dma ; long tmp ; unsigned int tmp___0 ; long tmp___1 ; unsigned int tmp___2 ; unsigned int tmp___3 ; int tmp___4 ; { page = bi->page; tmp = ldv__builtin_expect((unsigned long )page != (unsigned long )((struct page *)0), 1L); if (tmp != 0L) { return (1); } else { } tmp___0 = ixgbe_rx_pg_order(rx_ring); page = dev_alloc_pages(tmp___0); tmp___1 = ldv__builtin_expect((unsigned long )page == (unsigned long )((struct page *)0), 0L); if (tmp___1 != 0L) { rx_ring->__annonCompField121.rx_stats.alloc_rx_page_failed = rx_ring->__annonCompField121.rx_stats.alloc_rx_page_failed + 1ULL; return (0); } else { } tmp___2 = ixgbe_rx_pg_order(rx_ring); dma = dma_map_page(rx_ring->dev, page, 0UL, 4096UL << (int )tmp___2, 2); tmp___4 = dma_mapping_error(rx_ring->dev, dma); if (tmp___4 != 0) { tmp___3 = ixgbe_rx_pg_order(rx_ring); __free_pages(page, tmp___3); rx_ring->__annonCompField121.rx_stats.alloc_rx_page_failed = rx_ring->__annonCompField121.rx_stats.alloc_rx_page_failed + 1ULL; return (0); } else { } bi->dma = dma; bi->page = page; bi->page_offset = 0U; return (1); } } void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring , u16 cleaned_count ) { union ixgbe_adv_rx_desc *rx_desc ; struct ixgbe_rx_buffer *bi ; u16 i ; bool tmp ; int tmp___0 ; long tmp___1 ; { i = rx_ring->next_to_use; if ((unsigned int )cleaned_count == 0U) { return; } else { } rx_desc = (union ixgbe_adv_rx_desc *)rx_ring->desc + (unsigned long )i; bi = rx_ring->__annonCompField118.rx_buffer_info + (unsigned long )i; i = (int )i - (int )rx_ring->count; ldv_57671: tmp = ixgbe_alloc_mapped_page(rx_ring, bi); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_57670; } else { } rx_desc->read.pkt_addr = bi->dma + (dma_addr_t )bi->page_offset; rx_desc = rx_desc + 1; bi = bi + 1; i = (u16 )((int )i + 1); tmp___1 = ldv__builtin_expect((unsigned int )i == 0U, 0L); if (tmp___1 != 0L) { rx_desc = (union ixgbe_adv_rx_desc *)rx_ring->desc; bi = rx_ring->__annonCompField118.rx_buffer_info; i = (int )i - (int )rx_ring->count; } else { } rx_desc->wb.upper.status_error = 0U; cleaned_count = (u16 )((int )cleaned_count - 1); if ((unsigned int )cleaned_count != 0U) { goto ldv_57671; } else { } ldv_57670: i = (int )rx_ring->count + (int )i; if ((int )rx_ring->next_to_use != (int )i) { rx_ring->next_to_use = i; rx_ring->__annonCompField120.next_to_alloc = i; __asm__ volatile ("sfence": : : "memory"); writel((unsigned int )i, (void volatile *)rx_ring->tail); } else { } return; } } static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring , struct sk_buff *skb ) { u16 hdr_len ; unsigned int tmp ; unsigned char *tmp___0 ; unsigned char *tmp___1 ; { tmp = skb_headlen((struct sk_buff const *)skb); hdr_len = (u16 )tmp; tmp___0 = skb_end_pointer((struct sk_buff const *)skb); ((struct skb_shared_info *)tmp___0)->gso_size = (unsigned short )((((skb->len - (unsigned int )hdr_len) + (unsigned int )((struct ixgbe_cb *)(& skb->cb))->append_cnt) - 1U) / (unsigned int )((struct ixgbe_cb *)(& skb->cb))->append_cnt); tmp___1 = skb_end_pointer((struct sk_buff const *)skb); ((struct skb_shared_info *)tmp___1)->gso_type = 1U; return; } } static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring , struct sk_buff *skb ) { { if ((unsigned int )((struct ixgbe_cb *)(& skb->cb))->append_cnt == 0U) { return; } else { } rx_ring->__annonCompField121.rx_stats.rsc_count = rx_ring->__annonCompField121.rx_stats.rsc_count + (u64 )((struct ixgbe_cb *)(& skb->cb))->append_cnt; rx_ring->__annonCompField121.rx_stats.rsc_flush = rx_ring->__annonCompField121.rx_stats.rsc_flush + 1ULL; ixgbe_set_rsc_gso_size(rx_ring, skb); ((struct ixgbe_cb *)(& skb->cb))->append_cnt = 0U; return; } } static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring , union ixgbe_adv_rx_desc *rx_desc , struct sk_buff *skb ) { struct net_device *dev ; __le32 tmp ; long tmp___0 ; u16 vid ; __le32 tmp___1 ; { dev = rx_ring->netdev; ixgbe_update_rsc_stats(rx_ring, skb); ixgbe_rx_hash(rx_ring, rx_desc, skb); ixgbe_rx_checksum(rx_ring, rx_desc, skb); tmp = ixgbe_test_staterr(rx_desc, 65536U); tmp___0 = ldv__builtin_expect(tmp != 0U, 0L); if (tmp___0 != 0L) { ixgbe_ptp_rx_hwtstamp((rx_ring->q_vector)->adapter, skb); } else { } if ((dev->features & 256ULL) != 0ULL) { tmp___1 = ixgbe_test_staterr(rx_desc, 8U); if (tmp___1 != 0U) { vid = rx_desc->wb.upper.vlan; __vlan_hwaccel_put_tag(skb, 129, (int )vid); } else { } } else { } skb_record_rx_queue(skb, (int )rx_ring->queue_index); skb->protocol = eth_type_trans(skb, dev); return; } } static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector , struct sk_buff *skb ) { bool tmp ; { tmp = ixgbe_qv_busy_polling(q_vector); if ((int )tmp) { netif_receive_skb(skb); } else { napi_gro_receive(& q_vector->napi, skb); } return; } } static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring , union ixgbe_adv_rx_desc *rx_desc , struct sk_buff *skb ) { u32 ntc ; __le32 rsc_enabled ; u32 rsc_cnt ; long tmp ; int tmp___0 ; __le32 tmp___1 ; long tmp___2 ; { ntc = (u32 )((int )rx_ring->next_to_clean + 1); ntc = (u32 )rx_ring->count > ntc ? ntc : 0U; rx_ring->next_to_clean = (u16 )ntc; __builtin_prefetch((void const *)rx_ring->desc + (unsigned long )ntc); tmp___0 = constant_test_bit(4L, (unsigned long const volatile *)(& rx_ring->state)); if (tmp___0 != 0) { rsc_enabled = rx_desc->wb.lower.lo_dword.data & 1966080U; tmp = ldv__builtin_expect(rsc_enabled != 0U, 0L); if (tmp != 0L) { rsc_cnt = rsc_enabled; rsc_cnt = rsc_cnt >> 17; ((struct ixgbe_cb *)(& skb->cb))->append_cnt = (unsigned int )((int )((struct ixgbe_cb *)(& skb->cb))->append_cnt + (int )((u16 )rsc_cnt)) - 1U; ntc = rx_desc->wb.upper.status_error; ntc = ntc & 1048560U; ntc = ntc >> 4; } else { } } else { } tmp___1 = ixgbe_test_staterr(rx_desc, 2U); tmp___2 = ldv__builtin_expect(tmp___1 != 0U, 1L); if (tmp___2 != 0L) { return (0); } else { } (rx_ring->__annonCompField118.rx_buffer_info + (unsigned long )ntc)->skb = skb; rx_ring->__annonCompField121.rx_stats.non_eop_descs = rx_ring->__annonCompField121.rx_stats.non_eop_descs + 1ULL; return (1); } } static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring , struct sk_buff *skb ) { struct skb_frag_struct *frag ; unsigned char *tmp ; unsigned char *va ; unsigned int pull_len ; void *tmp___0 ; { tmp = skb_end_pointer((struct sk_buff const *)skb); frag = (struct skb_frag_struct *)(& ((struct skb_shared_info *)tmp)->frags); tmp___0 = skb_frag_address((skb_frag_t const *)frag); va = (unsigned char *)tmp___0; pull_len = eth_get_headlen((void *)va, 256U); skb_copy_to_linear_data(skb, (void const *)va, (pull_len + 7U) & 4294967288U); skb_frag_size_sub(frag, (int )pull_len); frag->page_offset = frag->page_offset + pull_len; skb->data_len = skb->data_len - pull_len; skb->tail = skb->tail + pull_len; return; } } static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring , struct sk_buff *skb ) { unsigned int tmp ; struct skb_frag_struct *frag ; unsigned char *tmp___0 ; unsigned int tmp___1 ; long tmp___2 ; { tmp___2 = ldv__builtin_expect((long )((struct ixgbe_cb *)(& skb->cb))->page_released, 0L); if (tmp___2 != 0L) { tmp = ixgbe_rx_pg_order(rx_ring); dma_unmap_page(rx_ring->dev, ((struct ixgbe_cb *)(& skb->cb))->dma, 4096UL << (int )tmp, 2); ((struct ixgbe_cb *)(& skb->cb))->page_released = 0; } else { tmp___0 = skb_end_pointer((struct sk_buff const *)skb); frag = (struct skb_frag_struct *)(& ((struct skb_shared_info *)tmp___0)->frags); tmp___1 = ixgbe_rx_bufsz(rx_ring); dma_sync_single_range_for_cpu(rx_ring->dev, ((struct ixgbe_cb *)(& skb->cb))->dma, (unsigned long )frag->page_offset, (size_t )tmp___1, 2); } ((struct ixgbe_cb *)(& skb->cb))->dma = 0ULL; return; } } static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring , union ixgbe_adv_rx_desc *rx_desc , struct sk_buff *skb ) { struct net_device *netdev ; __le32 tmp ; long tmp___0 ; bool tmp___1 ; bool tmp___2 ; int tmp___3 ; { netdev = rx_ring->netdev; tmp = ixgbe_test_staterr(rx_desc, 989855744U); tmp___0 = ldv__builtin_expect((long )(tmp != 0U && (netdev->features & 274877906944ULL) == 0ULL), 0L); if (tmp___0 != 0L) { dev_kfree_skb_any(skb); return (1); } else { } tmp___1 = skb_is_nonlinear((struct sk_buff const *)skb); if ((int )tmp___1) { ixgbe_pull_tail(rx_ring, skb); } else { } tmp___2 = ixgbe_rx_is_fcoe(rx_ring, rx_desc); if ((int )tmp___2) { return (0); } else { } tmp___3 = eth_skb_pad(skb); if (tmp___3 != 0) { return (1); } else { } return (0); } } static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring , struct ixgbe_rx_buffer *old_buff ) { struct ixgbe_rx_buffer *new_buff ; u16 nta ; unsigned int tmp ; { nta = rx_ring->__annonCompField120.next_to_alloc; new_buff = rx_ring->__annonCompField118.rx_buffer_info + (unsigned long )nta; nta = (u16 )((int )nta + 1); rx_ring->__annonCompField120.next_to_alloc = (int )rx_ring->count > (int )nta ? nta : 0U; *new_buff = *old_buff; tmp = ixgbe_rx_bufsz(rx_ring); dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, (unsigned long )new_buff->page_offset, (size_t )tmp, 2); return; } } __inline static bool ixgbe_page_is_reserved(struct page *page ) { int tmp ; int tmp___0 ; { tmp = page_to_nid((struct page const *)page); tmp___0 = numa_mem_id(); return ((bool )(tmp != tmp___0 || (int )page->__annonCompField42.__annonCompField37.pfmemalloc)); } } static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring , struct ixgbe_rx_buffer *rx_buffer , union ixgbe_adv_rx_desc *rx_desc , struct sk_buff *skb ) { struct page *page ; unsigned int size ; unsigned int truesize ; unsigned int tmp ; unsigned char *va ; void *tmp___0 ; unsigned char *tmp___1 ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; unsigned int tmp___5 ; bool tmp___6 ; int tmp___7 ; unsigned char *tmp___8 ; bool tmp___9 ; long tmp___10 ; int tmp___11 ; long tmp___12 ; { page = rx_buffer->page; size = (unsigned int )rx_desc->wb.upper.length; tmp = ixgbe_rx_bufsz(rx_ring); truesize = tmp; if (size <= 256U) { tmp___6 = skb_is_nonlinear((struct sk_buff const *)skb); if (tmp___6) { tmp___7 = 0; } else { tmp___7 = 1; } if (tmp___7) { tmp___0 = lowmem_page_address((struct page const *)page); va = (unsigned char *)tmp___0 + (unsigned long )rx_buffer->page_offset; tmp___1 = __skb_put(skb, size); memcpy((void *)tmp___1, (void const *)va, (size_t )(size + 7U) & 4294967288UL); tmp___2 = ixgbe_page_is_reserved(page); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } tmp___4 = ldv__builtin_expect((long )tmp___3, 1L); if (tmp___4 != 0L) { return (1); } else { } tmp___5 = ixgbe_rx_pg_order(rx_ring); __free_pages(page, tmp___5); return (0); } else { } } else { } tmp___8 = skb_end_pointer((struct sk_buff const *)skb); skb_add_rx_frag(skb, (int )((struct skb_shared_info *)tmp___8)->nr_frags, page, (int )rx_buffer->page_offset, (int )size, truesize); tmp___9 = ixgbe_page_is_reserved(page); tmp___10 = ldv__builtin_expect((long )tmp___9, 0L); if (tmp___10 != 0L) { return (0); } else { } tmp___11 = page_count(page); tmp___12 = ldv__builtin_expect(tmp___11 != 1, 0L); if (tmp___12 != 0L) { return (0); } else { } rx_buffer->page_offset = rx_buffer->page_offset ^ truesize; atomic_inc(& page->__annonCompField42.__annonCompField41.__annonCompField40._count); return (1); } } static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring , union ixgbe_adv_rx_desc *rx_desc ) { struct ixgbe_rx_buffer *rx_buffer ; struct sk_buff *skb ; struct page *page ; void *page_addr ; void *tmp ; long tmp___0 ; __le32 tmp___1 ; long tmp___2 ; __le32 tmp___3 ; unsigned int tmp___4 ; long tmp___5 ; unsigned int tmp___6 ; bool tmp___7 ; { rx_buffer = rx_ring->__annonCompField118.rx_buffer_info + (unsigned long )rx_ring->next_to_clean; page = rx_buffer->page; prefetchw((void const *)page); skb = rx_buffer->skb; tmp___5 = ldv__builtin_expect((unsigned long )skb == (unsigned long )((struct sk_buff *)0), 1L); if (tmp___5 != 0L) { tmp = lowmem_page_address((struct page const *)page); page_addr = tmp + (unsigned long )rx_buffer->page_offset; __builtin_prefetch((void const *)page_addr); __builtin_prefetch((void const *)page_addr + 64U); skb = napi_alloc_skb(& (rx_ring->q_vector)->napi, 256U); tmp___0 = ldv__builtin_expect((unsigned long )skb == (unsigned long )((struct sk_buff *)0), 0L); if (tmp___0 != 0L) { rx_ring->__annonCompField121.rx_stats.alloc_rx_buff_failed = rx_ring->__annonCompField121.rx_stats.alloc_rx_buff_failed + 1ULL; return ((struct sk_buff *)0); } else { } prefetchw((void const *)skb->data); tmp___1 = ixgbe_test_staterr(rx_desc, 2U); tmp___2 = ldv__builtin_expect(tmp___1 != 0U, 1L); if (tmp___2 != 0L) { goto dma_sync; } else { } ((struct ixgbe_cb *)(& skb->cb))->dma = rx_buffer->dma; } else { tmp___3 = ixgbe_test_staterr(rx_desc, 2U); if (tmp___3 != 0U) { ixgbe_dma_sync_frag(rx_ring, skb); } else { } dma_sync: tmp___4 = ixgbe_rx_bufsz(rx_ring); dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, (unsigned long )rx_buffer->page_offset, (size_t )tmp___4, 2); rx_buffer->skb = (struct sk_buff *)0; } tmp___7 = ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb); if ((int )tmp___7) { ixgbe_reuse_rx_page(rx_ring, rx_buffer); } else if (((struct ixgbe_cb *)(& skb->cb))->dma == rx_buffer->dma) { ((struct ixgbe_cb *)(& skb->cb))->page_released = 1; } else { tmp___6 = ixgbe_rx_pg_order(rx_ring); dma_unmap_page(rx_ring->dev, rx_buffer->dma, 4096UL << (int )tmp___6, 2); } rx_buffer->page = (struct page *)0; return (skb); } } static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector , struct ixgbe_ring *rx_ring , int const budget ) { unsigned int total_rx_bytes ; unsigned int total_rx_packets ; struct ixgbe_adapter *adapter ; int ddp_bytes ; unsigned int mss ; u16 cleaned_count ; u16 tmp ; union ixgbe_adv_rx_desc *rx_desc ; struct sk_buff *skb ; bool tmp___0 ; bool tmp___1 ; bool tmp___2 ; long tmp___3 ; { total_rx_bytes = 0U; total_rx_packets = 0U; adapter = q_vector->adapter; mss = 0U; tmp = ixgbe_desc_unused(rx_ring); cleaned_count = tmp; goto ldv_57760; ldv_57761: ; if ((unsigned int )cleaned_count > 15U) { ixgbe_alloc_rx_buffers(rx_ring, (int )cleaned_count); cleaned_count = 0U; } else { } rx_desc = (union ixgbe_adv_rx_desc *)rx_ring->desc + (unsigned long )rx_ring->next_to_clean; if (rx_desc->wb.upper.status_error == 0U) { goto ldv_57759; } else { } __asm__ volatile ("": : : "memory"); skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); if ((unsigned long )skb == (unsigned long )((struct sk_buff *)0)) { goto ldv_57759; } else { } cleaned_count = (u16 )((int )cleaned_count + 1); tmp___0 = ixgbe_is_non_eop(rx_ring, rx_desc, skb); if ((int )tmp___0) { goto ldv_57760; } else { } tmp___1 = ixgbe_cleanup_headers(rx_ring, rx_desc, skb); if ((int )tmp___1) { goto ldv_57760; } else { } total_rx_bytes = skb->len + total_rx_bytes; ixgbe_process_skb_fields(rx_ring, rx_desc, skb); tmp___2 = ixgbe_rx_is_fcoe(rx_ring, rx_desc); if ((int )tmp___2) { ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); if (ddp_bytes > 0) { if (mss == 0U) { mss = (rx_ring->netdev)->mtu - 46U; if (mss > 512U) { mss = mss & 4294966784U; } else { } } else { } total_rx_bytes = total_rx_bytes + (unsigned int )ddp_bytes; total_rx_packets = (((unsigned int )ddp_bytes + mss) - 1U) / mss + total_rx_packets; } else { } if (ddp_bytes == 0) { dev_kfree_skb_any(skb); goto ldv_57760; } else { } } else { } skb_mark_napi_id(skb, & q_vector->napi); ixgbe_rx_skb(q_vector, skb); total_rx_packets = total_rx_packets + 1U; ldv_57760: tmp___3 = ldv__builtin_expect((unsigned int )budget > total_rx_packets, 1L); if (tmp___3 != 0L) { goto ldv_57761; } else { } ldv_57759: u64_stats_init(& rx_ring->syncp); rx_ring->stats.packets = rx_ring->stats.packets + (u64 )total_rx_packets; rx_ring->stats.bytes = rx_ring->stats.bytes + (u64 )total_rx_bytes; u64_stats_init(& rx_ring->syncp); q_vector->rx.total_packets = q_vector->rx.total_packets + total_rx_packets; q_vector->rx.total_bytes = q_vector->rx.total_bytes + total_rx_bytes; return ((int )total_rx_packets); } } static int ixgbe_low_latency_recv(struct napi_struct *napi ) { struct ixgbe_q_vector *q_vector ; struct napi_struct const *__mptr ; struct ixgbe_adapter *adapter ; struct ixgbe_ring *ring ; int found ; int tmp ; bool tmp___0 ; int tmp___1 ; { __mptr = (struct napi_struct const *)napi; q_vector = (struct ixgbe_q_vector *)__mptr + 0xffffffffffffffc0UL; adapter = q_vector->adapter; found = 0; tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp != 0) { return (-1); } else { } tmp___0 = ixgbe_qv_lock_poll(q_vector); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (-2); } else { } ring = q_vector->rx.ring; goto ldv_57773; ldv_57772: found = ixgbe_clean_rx_irq(q_vector, ring, 4); if (found != 0) { ring->stats.cleaned = ring->stats.cleaned + (u64 )found; } else { ring->stats.misses = ring->stats.misses + 1ULL; } if (found != 0) { goto ldv_57771; } else { } ring = ring->next; ldv_57773: ; if ((unsigned long )ring != (unsigned long )((struct ixgbe_ring *)0)) { goto ldv_57772; } else { } ldv_57771: ixgbe_qv_unlock_poll(q_vector); return (found); } } static void ixgbe_configure_msix(struct ixgbe_adapter *adapter ) { struct ixgbe_q_vector *q_vector ; int v_idx ; u32 mask ; u32 eitrsel ; struct ixgbe_ring *ring ; { if (adapter->num_vfs > 32U) { eitrsel = (u32 )((1 << (int )(adapter->num_vfs - 32U)) + -1); ixgbe_write_reg(& adapter->hw, 2196U, eitrsel); } else { } v_idx = 0; goto ldv_57789; ldv_57788: q_vector = adapter->q_vector[v_idx]; ring = q_vector->rx.ring; goto ldv_57783; ldv_57782: ixgbe_set_ivar(adapter, 0, (int )ring->reg_idx, (int )((u8 )v_idx)); ring = ring->next; ldv_57783: ; if ((unsigned long )ring != (unsigned long )((struct ixgbe_ring *)0)) { goto ldv_57782; } else { } ring = q_vector->tx.ring; goto ldv_57786; ldv_57785: ixgbe_set_ivar(adapter, 1, (int )ring->reg_idx, (int )((u8 )v_idx)); ring = ring->next; ldv_57786: ; if ((unsigned long )ring != (unsigned long )((struct ixgbe_ring *)0)) { goto ldv_57785; } else { } ixgbe_write_eitr(q_vector); v_idx = v_idx + 1; ldv_57789: ; if (adapter->num_q_vectors > v_idx) { goto ldv_57788; } else { } switch ((unsigned int )adapter->hw.mac.type) { case 1U: ixgbe_set_ivar(adapter, -1, 97, (int )((u8 )v_idx)); goto ldv_57792; case 2U: ; case 3U: ; case 4U: ; case 5U: ixgbe_set_ivar(adapter, -1, 1, (int )((u8 )v_idx)); goto ldv_57792; default: ; goto ldv_57792; } ldv_57792: ixgbe_write_reg(& adapter->hw, (u32 )(v_idx <= 23 ? (v_idx + 520) * 4 : (v_idx + 18600) * 4), 1950U); mask = 3222339583U; mask = mask & 2145910783U; ixgbe_write_reg(& adapter->hw, 2064U, mask); return; } } static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector , struct ixgbe_ring_container *ring_container ) { int bytes ; int packets ; u32 timepassed_us ; u64 bytes_perint ; u8 itr_setting ; { bytes = (int )ring_container->total_bytes; packets = (int )ring_container->total_packets; itr_setting = ring_container->itr; if (packets == 0) { return; } else { } timepassed_us = (u32 )((int )q_vector->itr >> 2); if (timepassed_us == 0U) { return; } else { } bytes_perint = (u64 )((u32 )bytes / timepassed_us); switch ((int )itr_setting) { case 0: ; if (bytes_perint > 10ULL) { itr_setting = 1U; } else { } goto ldv_57813; case 1: ; if (bytes_perint > 20ULL) { itr_setting = 2U; } else if (bytes_perint <= 10ULL) { itr_setting = 0U; } else { } goto ldv_57813; case 2: ; if (bytes_perint <= 20ULL) { itr_setting = 1U; } else { } goto ldv_57813; } ldv_57813: ring_container->total_bytes = 0U; ring_container->total_packets = 0U; ring_container->itr = itr_setting; return; } } void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector ) { struct ixgbe_adapter *adapter ; struct ixgbe_hw *hw ; int v_idx ; u32 itr_reg ; { adapter = q_vector->adapter; hw = & adapter->hw; v_idx = (int )q_vector->v_idx; itr_reg = (u32 )q_vector->itr & 4088U; switch ((unsigned int )adapter->hw.mac.type) { case 1U: itr_reg = (itr_reg << 16) | itr_reg; goto ldv_57824; case 2U: ; case 3U: ; case 4U: ; case 5U: itr_reg = itr_reg | 2147483648U; goto ldv_57824; default: ; goto ldv_57824; } ldv_57824: ixgbe_write_reg(hw, (u32 )(v_idx <= 23 ? (v_idx + 520) * 4 : (v_idx + 18600) * 4), itr_reg); return; } } static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector ) { u32 new_itr ; u8 current_itr ; u8 _max1 ; u8 _max2 ; { new_itr = (u32 )q_vector->itr; ixgbe_update_itr(q_vector, & q_vector->tx); ixgbe_update_itr(q_vector, & q_vector->rx); _max1 = q_vector->rx.itr; _max2 = q_vector->tx.itr; current_itr = (u8 )((int )_max1 > (int )_max2 ? _max1 : _max2); switch ((int )current_itr) { case 0: new_itr = 40U; goto ldv_57839; case 1: new_itr = 200U; goto ldv_57839; case 2: new_itr = 500U; goto ldv_57839; default: ; goto ldv_57839; } ldv_57839: ; if ((u32 )q_vector->itr != new_itr) { new_itr = (((u32 )q_vector->itr * new_itr) * 10U) / (new_itr * 9U + (u32 )q_vector->itr); q_vector->itr = (u16 )new_itr; ixgbe_write_eitr(q_vector); } else { } return; } } static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 eicr ; int tmp ; u32 speed ; bool link_up ; s32 tmp___0 ; { hw = & adapter->hw; eicr = adapter->interrupt_event; tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp != 0) { return; } else { } if ((adapter->flags2 & 4U) == 0U && (adapter->flags2 & 8U) == 0U) { return; } else { } adapter->flags2 = adapter->flags2 & 4294967287U; switch ((int )hw->device_id) { case 5404: ; if ((eicr & 16777216U) == 0U && (eicr & 1048576U) == 0U) { return; } else { } if ((eicr & 1048576U) == 0U && (unsigned long )hw->mac.ops.check_link != (unsigned long )((s32 (*)(struct ixgbe_hw * , ixgbe_link_speed * , bool * , bool ))0)) { link_up = 0; (*(hw->mac.ops.check_link))(hw, & speed, & link_up, 0); if ((int )link_up) { return; } else { } } else { } tmp___0 = (*(hw->phy.ops.check_overtemp))(hw); if (tmp___0 != -26) { return; } else { } goto ldv_57851; default: ; if ((unsigned int )adapter->hw.mac.type > 2U) { return; } else { } if (((u32 )*(hw->mvals + 11UL) & eicr) == 0U) { return; } else { } goto ldv_57851; } ldv_57851: ; if ((int )adapter->msg_enable & 1) { netdev_crit((struct net_device const *)adapter->netdev, "%s\n", (char const *)(& ixgbe_overheat_msg)); } else { } adapter->interrupt_event = 0U; return; } } static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter , u32 eicr ) { struct ixgbe_hw *hw ; { hw = & adapter->hw; if ((adapter->flags & 32768U) != 0U && ((u32 )*(hw->mvals + 12UL) & eicr) != 0U) { if (((int )adapter->msg_enable & 2) != 0) { netdev_crit((struct net_device const *)adapter->netdev, "Fan has stopped, replace the adapter\n"); } else { } ixgbe_write_reg(hw, 2048U, *(hw->mvals + 12UL)); } else { } return; } } static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter , u32 eicr ) { struct ixgbe_hw *hw ; int tmp ; { hw = & adapter->hw; if ((adapter->flags2 & 4U) == 0U) { return; } else { } switch ((unsigned int )adapter->hw.mac.type) { case 2U: ; if (((u32 )*(hw->mvals + 11UL) & eicr) != 0U || (eicr & 1048576U) != 0U) { tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp == 0) { adapter->interrupt_event = eicr; adapter->flags2 = adapter->flags2 | 8U; ixgbe_service_event_schedule(adapter); return; } else { } } else { } return; case 3U: ; if ((eicr & 8388608U) == 0U) { return; } else { } goto ldv_57865; default: ; return; } ldv_57865: ; if ((int )adapter->msg_enable & 1) { netdev_crit((struct net_device const *)adapter->netdev, "%s\n", (char const *)(& ixgbe_overheat_msg)); } else { } return; } } __inline static bool ixgbe_is_sfp(struct ixgbe_hw *hw ) { enum ixgbe_media_type tmp ; { switch ((unsigned int )hw->mac.type) { case 1U: ; if ((unsigned int )hw->phy.type == 10U) { return (1); } else { } return (0); case 2U: ; case 5U: tmp = (*(hw->mac.ops.get_media_type))(hw); switch ((unsigned int )tmp) { case 1U: ; case 2U: ; return (1); default: ; return (0); } default: ; return (0); } } } static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter , u32 eicr ) { struct ixgbe_hw *hw ; int tmp ; int tmp___0 ; { hw = & adapter->hw; if (((u32 )*(hw->mvals + 13UL) & eicr) != 0U) { ixgbe_write_reg(hw, 2048U, *(hw->mvals + 13UL)); tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp == 0) { adapter->flags2 = adapter->flags2 | 32U; ixgbe_service_event_schedule(adapter); } else { } } else { } if (((u32 )*(hw->mvals + 12UL) & eicr) != 0U) { ixgbe_write_reg(hw, 2048U, *(hw->mvals + 12UL)); tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 == 0) { adapter->flags = adapter->flags | 131072U; ixgbe_service_event_schedule(adapter); } else { } } else { } return; } } static void ixgbe_check_lsc(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int tmp ; { hw = & adapter->hw; adapter->lsc_int = adapter->lsc_int + 1ULL; adapter->flags = adapter->flags | 65536U; adapter->link_check_timeout = jiffies; tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp == 0) { ixgbe_write_reg(hw, 2184U, 1048576U); ixgbe_read_reg(hw, 8U); ixgbe_service_event_schedule(adapter); } else { } return; } } __inline static void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter , u64 qmask ) { u32 mask ; struct ixgbe_hw *hw ; { hw = & adapter->hw; switch ((unsigned int )hw->mac.type) { case 1U: mask = (u32 )qmask & 65535U; ixgbe_write_reg(hw, 2176U, mask); goto ldv_57893; case 2U: ; case 3U: ; case 4U: ; case 5U: mask = (u32 )qmask; if (mask != 0U) { ixgbe_write_reg(hw, 2720U, mask); } else { } mask = (u32 )(qmask >> 32); if (mask != 0U) { ixgbe_write_reg(hw, 2724U, mask); } else { } goto ldv_57893; default: ; goto ldv_57893; } ldv_57893: ; return; } } __inline static void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter , u64 qmask ) { u32 mask ; struct ixgbe_hw *hw ; { hw = & adapter->hw; switch ((unsigned int )hw->mac.type) { case 1U: mask = (u32 )qmask & 65535U; ixgbe_write_reg(hw, 2184U, mask); goto ldv_57906; case 2U: ; case 3U: ; case 4U: ; case 5U: mask = (u32 )qmask; if (mask != 0U) { ixgbe_write_reg(hw, 2736U, mask); } else { } mask = (u32 )(qmask >> 32); if (mask != 0U) { ixgbe_write_reg(hw, 2740U, mask); } else { } goto ldv_57906; default: ; goto ldv_57906; } ldv_57906: ; return; } } __inline static void ixgbe_irq_enable(struct ixgbe_adapter *adapter , bool queues , bool flush ) { struct ixgbe_hw *hw ; u32 mask ; { hw = & adapter->hw; mask = 3222274048U; if ((adapter->flags & 65536U) != 0U) { mask = mask & 4293918719U; } else { } if ((adapter->flags2 & 4U) != 0U) { switch ((unsigned int )adapter->hw.mac.type) { case 2U: mask = (u32 )*(hw->mvals + 11UL) | mask; goto ldv_57920; case 3U: ; case 4U: ; case 5U: mask = mask | 8388608U; goto ldv_57920; default: ; goto ldv_57920; } ldv_57920: ; } else { } if ((adapter->flags & 32768U) != 0U) { mask = (u32 )*(hw->mvals + 12UL) | mask; } else { } switch ((unsigned int )adapter->hw.mac.type) { case 2U: mask = (u32 )*(hw->mvals + 12UL) | mask; mask = (u32 )*(hw->mvals + 13UL) | mask; case 3U: ; case 4U: ; case 5U: ; if ((unsigned int )adapter->hw.phy.type == 6U) { mask = mask | 33554432U; } else { } mask = mask | 268435456U; mask = mask | 524288U; goto ldv_57929; default: ; goto ldv_57929; } ldv_57929: ; if ((adapter->flags & 262144U) != 0U && (adapter->flags2 & 128U) == 0U) { mask = mask | 65536U; } else { } ixgbe_write_reg(& adapter->hw, 2176U, mask); if ((int )queues) { ixgbe_irq_enable_queues(adapter, 0xffffffffffffffffULL); } else { } if ((int )flush) { ixgbe_read_reg(& adapter->hw, 8U); } else { } return; } } static irqreturn_t ixgbe_msix_other(int irq , void *data ) { struct ixgbe_adapter *adapter ; struct ixgbe_hw *hw ; u32 eicr ; int reinit_count ; int i ; struct ixgbe_ring *ring ; int tmp ; long tmp___0 ; int tmp___1 ; { adapter = (struct ixgbe_adapter *)data; hw = & adapter->hw; eicr = ixgbe_read_reg(hw, 2056U); eicr = eicr & 4294901760U; ixgbe_write_reg(hw, 2048U, eicr); if ((eicr & 1048576U) != 0U) { ixgbe_check_lsc(adapter); } else { } if ((eicr & 524288U) != 0U) { ixgbe_msg_task(adapter); } else { } switch ((unsigned int )hw->mac.type) { case 2U: ; case 3U: ; case 4U: ; case 5U: ; if ((unsigned int )hw->phy.type == 6U && (eicr & 33554432U) != 0U) { adapter->flags2 = adapter->flags2 | 2048U; ixgbe_service_event_schedule(adapter); ixgbe_write_reg(hw, 2048U, 33554432U); } else { } if ((eicr & 268435456U) != 0U) { if (((int )adapter->msg_enable & 4) != 0) { netdev_info((struct net_device const *)adapter->netdev, "Received ECC Err, initiating reset\n"); } else { } adapter->flags2 = adapter->flags2 | 64U; ixgbe_service_event_schedule(adapter); ixgbe_write_reg(hw, 2048U, 268435456U); } else { } if ((eicr & 65536U) != 0U) { reinit_count = 0; i = 0; goto ldv_57946; ldv_57945: ring = adapter->tx_ring[i]; tmp = test_and_clear_bit(0L, (unsigned long volatile *)(& ring->state)); if (tmp != 0) { reinit_count = reinit_count + 1; } else { } i = i + 1; ldv_57946: ; if (adapter->num_tx_queues > i) { goto ldv_57945; } else { } if (reinit_count != 0) { ixgbe_write_reg(hw, 2184U, 65536U); adapter->flags2 = adapter->flags2 | 128U; ixgbe_service_event_schedule(adapter); } else { } } else { } ixgbe_check_sfp_event(adapter, eicr); ixgbe_check_overtemp_event(adapter, eicr); goto ldv_57948; default: ; goto ldv_57948; } ldv_57948: ixgbe_check_fan_failure(adapter, eicr); tmp___0 = ldv__builtin_expect((eicr & 16777216U) != 0U, 0L); if (tmp___0 != 0L) { ixgbe_ptp_check_pps_event(adapter, eicr); } else { } tmp___1 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___1 == 0) { ixgbe_irq_enable(adapter, 0, 0); } else { } return (1); } } static irqreturn_t ixgbe_msix_clean_rings(int irq , void *data ) { struct ixgbe_q_vector *q_vector ; { q_vector = (struct ixgbe_q_vector *)data; if ((unsigned long )q_vector->rx.ring != (unsigned long )((struct ixgbe_ring *)0) || (unsigned long )q_vector->tx.ring != (unsigned long )((struct ixgbe_ring *)0)) { napi_schedule(& q_vector->napi); } else { } return (1); } } int ixgbe_poll(struct napi_struct *napi , int budget ) { struct ixgbe_q_vector *q_vector ; struct napi_struct const *__mptr ; struct ixgbe_adapter *adapter ; struct ixgbe_ring *ring ; int per_ring_budget ; bool clean_complete ; bool tmp ; bool tmp___0 ; int tmp___1 ; int _max1 ; int _max2 ; int tmp___2 ; int tmp___3 ; { __mptr = (struct napi_struct const *)napi; q_vector = (struct ixgbe_q_vector *)__mptr + 0xffffffffffffffc0UL; adapter = q_vector->adapter; clean_complete = 1; if ((adapter->flags & 256U) != 0U) { ixgbe_update_dca(q_vector); } else { } ring = q_vector->tx.ring; goto ldv_57967; ldv_57966: tmp = ixgbe_clean_tx_irq(q_vector, ring); clean_complete = ((int )clean_complete & (int )tmp) != 0; ring = ring->next; ldv_57967: ; if ((unsigned long )ring != (unsigned long )((struct ixgbe_ring *)0)) { goto ldv_57966; } else { } tmp___0 = ixgbe_qv_lock_napi(q_vector); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (budget); } else { } if ((unsigned int )q_vector->rx.count > 1U) { _max1 = budget / (int )q_vector->rx.count; _max2 = 1; per_ring_budget = _max1 > _max2 ? _max1 : _max2; } else { per_ring_budget = budget; } ring = q_vector->rx.ring; goto ldv_57973; ldv_57972: tmp___2 = ixgbe_clean_rx_irq(q_vector, ring, per_ring_budget); clean_complete = ((int )clean_complete & (tmp___2 < per_ring_budget)) != 0; ring = ring->next; ldv_57973: ; if ((unsigned long )ring != (unsigned long )((struct ixgbe_ring *)0)) { goto ldv_57972; } else { } ixgbe_qv_unlock_napi(q_vector); if (! clean_complete) { return (budget); } else { } napi_complete(napi); if ((int )adapter->rx_itr_setting & 1) { ixgbe_set_itr(q_vector); } else { } tmp___3 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___3 == 0) { ixgbe_irq_enable_queues(adapter, 1ULL << (int )q_vector->v_idx); } else { } return (0); } } static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter ) { struct net_device *netdev ; int vector ; int err ; int ri ; int ti ; struct ixgbe_q_vector *q_vector ; struct msix_entry *entry ; int tmp ; int tmp___0 ; int tmp___1 ; { netdev = adapter->netdev; ri = 0; ti = 0; vector = 0; goto ldv_57988; ldv_57987: q_vector = adapter->q_vector[vector]; entry = adapter->msix_entries + (unsigned long )vector; if ((unsigned long )q_vector->tx.ring != (unsigned long )((struct ixgbe_ring *)0) && (unsigned long )q_vector->rx.ring != (unsigned long )((struct ixgbe_ring *)0)) { tmp = ri; ri = ri + 1; snprintf((char *)(& q_vector->name), 24UL, "%s-%s-%d", (char *)(& netdev->name), (char *)"TxRx", tmp); ti = ti + 1; } else if ((unsigned long )q_vector->rx.ring != (unsigned long )((struct ixgbe_ring *)0)) { tmp___0 = ri; ri = ri + 1; snprintf((char *)(& q_vector->name), 24UL, "%s-%s-%d", (char *)(& netdev->name), (char *)"rx", tmp___0); } else if ((unsigned long )q_vector->tx.ring != (unsigned long )((struct ixgbe_ring *)0)) { tmp___1 = ti; ti = ti + 1; snprintf((char *)(& q_vector->name), 24UL, "%s-%s-%d", (char *)(& netdev->name), (char *)"tx", tmp___1); } else { goto ldv_57985; } err = ldv_request_irq_43(entry->vector, & ixgbe_msix_clean_rings, 0UL, (char const *)(& q_vector->name), (void *)q_vector); if (err != 0) { if (((int )adapter->msg_enable & 2) != 0) { netdev_err((struct net_device const *)adapter->netdev, "request_irq failed for MSIX interrupt Error: %d\n", err); } else { } goto free_queue_irqs; } else { } if ((adapter->flags & 262144U) != 0U) { irq_set_affinity_hint(entry->vector, (struct cpumask const *)(& q_vector->affinity_mask)); } else { } ldv_57985: vector = vector + 1; ldv_57988: ; if (adapter->num_q_vectors > vector) { goto ldv_57987; } else { } err = ldv_request_irq_44((adapter->msix_entries + (unsigned long )vector)->vector, & ixgbe_msix_other, 0UL, (char const *)(& netdev->name), (void *)adapter); if (err != 0) { if (((int )adapter->msg_enable & 2) != 0) { netdev_err((struct net_device const *)adapter->netdev, "request_irq for msix_other failed: %d\n", err); } else { } goto free_queue_irqs; } else { } return (0); free_queue_irqs: ; goto ldv_57991; ldv_57990: vector = vector - 1; irq_set_affinity_hint((adapter->msix_entries + (unsigned long )vector)->vector, (struct cpumask const *)0); ldv_free_irq_45((adapter->msix_entries + (unsigned long )vector)->vector, (void *)adapter->q_vector[vector]); ldv_57991: ; if (vector != 0) { goto ldv_57990; } else { } adapter->flags = adapter->flags & 4294967287U; pci_disable_msix(adapter->pdev); kfree((void const *)adapter->msix_entries); adapter->msix_entries = (struct msix_entry *)0; return (err); } } static irqreturn_t ixgbe_intr(int irq , void *data ) { struct ixgbe_adapter *adapter ; struct ixgbe_hw *hw ; struct ixgbe_q_vector *q_vector ; u32 eicr ; int tmp ; long tmp___0 ; int tmp___1 ; { adapter = (struct ixgbe_adapter *)data; hw = & adapter->hw; q_vector = adapter->q_vector[0]; ixgbe_write_reg(hw, 2184U, 4294967295U); eicr = ixgbe_read_reg(hw, 2048U); if (eicr == 0U) { tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp == 0) { ixgbe_irq_enable(adapter, 1, 1); } else { } return (0); } else { } if ((eicr & 1048576U) != 0U) { ixgbe_check_lsc(adapter); } else { } switch ((unsigned int )hw->mac.type) { case 2U: ixgbe_check_sfp_event(adapter, eicr); case 3U: ; case 4U: ; case 5U: ; if ((eicr & 268435456U) != 0U) { if (((int )adapter->msg_enable & 4) != 0) { netdev_info((struct net_device const *)adapter->netdev, "Received ECC Err, initiating reset\n"); } else { } adapter->flags2 = adapter->flags2 | 64U; ixgbe_service_event_schedule(adapter); ixgbe_write_reg(hw, 2048U, 268435456U); } else { } ixgbe_check_overtemp_event(adapter, eicr); goto ldv_58005; default: ; goto ldv_58005; } ldv_58005: ixgbe_check_fan_failure(adapter, eicr); tmp___0 = ldv__builtin_expect((eicr & 16777216U) != 0U, 0L); if (tmp___0 != 0L) { ixgbe_ptp_check_pps_event(adapter, eicr); } else { } napi_schedule(& q_vector->napi); tmp___1 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___1 == 0) { ixgbe_irq_enable(adapter, 0, 0); } else { } return (1); } } static int ixgbe_request_irq(struct ixgbe_adapter *adapter ) { struct net_device *netdev ; int err ; { netdev = adapter->netdev; if ((adapter->flags & 8U) != 0U) { err = ixgbe_request_msix_irqs(adapter); } else if ((adapter->flags & 2U) != 0U) { err = ldv_request_irq_46((adapter->pdev)->irq, & ixgbe_intr, 0UL, (char const *)(& netdev->name), (void *)adapter); } else { err = ldv_request_irq_47((adapter->pdev)->irq, & ixgbe_intr, 128UL, (char const *)(& netdev->name), (void *)adapter); } if (err != 0) { if (((int )adapter->msg_enable & 2) != 0) { netdev_err((struct net_device const *)adapter->netdev, "request_irq failed, Error %d\n", err); } else { } } else { } return (err); } } static void ixgbe_free_irq(struct ixgbe_adapter *adapter ) { int vector ; struct ixgbe_q_vector *q_vector ; struct msix_entry *entry ; int tmp ; { if ((adapter->flags & 8U) == 0U) { ldv_free_irq_48((adapter->pdev)->irq, (void *)adapter); return; } else { } vector = 0; goto ldv_58020; ldv_58019: q_vector = adapter->q_vector[vector]; entry = adapter->msix_entries + (unsigned long )vector; if ((unsigned long )q_vector->rx.ring == (unsigned long )((struct ixgbe_ring *)0) && (unsigned long )q_vector->tx.ring == (unsigned long )((struct ixgbe_ring *)0)) { goto ldv_58018; } else { } irq_set_affinity_hint(entry->vector, (struct cpumask const *)0); ldv_free_irq_49(entry->vector, (void *)q_vector); ldv_58018: vector = vector + 1; ldv_58020: ; if (adapter->num_q_vectors > vector) { goto ldv_58019; } else { } tmp = vector; vector = vector + 1; ldv_free_irq_50((adapter->msix_entries + (unsigned long )tmp)->vector, (void *)adapter); return; } } __inline static void ixgbe_irq_disable(struct ixgbe_adapter *adapter ) { int vector ; int tmp ; { switch ((unsigned int )adapter->hw.mac.type) { case 1U: ixgbe_write_reg(& adapter->hw, 2184U, 4294967295U); goto ldv_58026; case 2U: ; case 3U: ; case 4U: ; case 5U: ixgbe_write_reg(& adapter->hw, 2184U, 4294901760U); ixgbe_write_reg(& adapter->hw, 2736U, 4294967295U); ixgbe_write_reg(& adapter->hw, 2740U, 4294967295U); goto ldv_58026; default: ; goto ldv_58026; } ldv_58026: ixgbe_read_reg(& adapter->hw, 8U); if ((adapter->flags & 8U) != 0U) { vector = 0; goto ldv_58034; ldv_58033: synchronize_irq((adapter->msix_entries + (unsigned long )vector)->vector); vector = vector + 1; ldv_58034: ; if (adapter->num_q_vectors > vector) { goto ldv_58033; } else { } tmp = vector; vector = vector + 1; synchronize_irq((adapter->msix_entries + (unsigned long )tmp)->vector); } else { synchronize_irq((adapter->pdev)->irq); } return; } } static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter ) { struct ixgbe_q_vector *q_vector ; { q_vector = adapter->q_vector[0]; ixgbe_write_eitr(q_vector); ixgbe_set_ivar(adapter, 0, 0, 0); ixgbe_set_ivar(adapter, 1, 0, 0); if (((int )adapter->msg_enable & 8192) != 0) { netdev_info((struct net_device const *)adapter->netdev, "Legacy interrupt IVAR setup done\n"); } else { } return; } } void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter , struct ixgbe_ring *ring ) { struct ixgbe_hw *hw ; u64 tdba ; int wait_loop ; u32 txdctl ; u8 reg_idx ; struct ixgbe_q_vector *q_vector ; int tmp ; u32 tmp___0 ; { hw = & adapter->hw; tdba = ring->dma; wait_loop = 10; txdctl = 33554432U; reg_idx = ring->reg_idx; ixgbe_write_reg(hw, (u32 )((int )reg_idx * 64 + 24616), 0U); ixgbe_read_reg(hw, 8U); ixgbe_write_reg(hw, (u32 )(((int )reg_idx + 384) * 64), (u32 )tdba); ixgbe_write_reg(hw, (u32 )((int )reg_idx * 64 + 24580), (u32 )(tdba >> 32)); ixgbe_write_reg(hw, (u32 )((int )reg_idx * 64 + 24584), (u32 )ring->count * 16U); ixgbe_write_reg(hw, (u32 )((int )reg_idx * 64 + 24592), 0U); ixgbe_write_reg(hw, (u32 )((int )reg_idx * 64 + 24600), 0U); ring->tail = adapter->io_addr + (unsigned long )((int )reg_idx * 64 + 24600); if ((unsigned long )ring->q_vector == (unsigned long )((struct ixgbe_q_vector *)0) || (unsigned int )(ring->q_vector)->itr <= 39U) { txdctl = txdctl | 65536U; } else { txdctl = txdctl | 524288U; } txdctl = txdctl | 288U; if ((adapter->flags & 262144U) != 0U) { ring->__annonCompField120.__annonCompField119.atr_sample_rate = (u8 )adapter->atr_sample_rate; ring->__annonCompField120.__annonCompField119.atr_count = 0U; set_bit(0L, (unsigned long volatile *)(& ring->state)); } else { ring->__annonCompField120.__annonCompField119.atr_sample_rate = 0U; } tmp = test_and_set_bit(1L, (unsigned long volatile *)(& ring->state)); if (tmp == 0) { q_vector = ring->q_vector; if ((unsigned long )q_vector != (unsigned long )((struct ixgbe_q_vector *)0)) { netif_set_xps_queue(ring->netdev, (struct cpumask const *)(& q_vector->affinity_mask), (int )ring->queue_index); } else { } } else { } clear_bit(3L, (unsigned long volatile *)(& ring->state)); ixgbe_write_reg(hw, (u32 )((int )reg_idx * 64 + 24616), txdctl); if ((unsigned int )hw->mac.type == 1U) { tmp___0 = ixgbe_read_reg(hw, 17060U); if ((tmp___0 & 1073741824U) == 0U) { return; } else { } } else { } ldv_58050: usleep_range(1000UL, 2000UL); txdctl = ixgbe_read_reg(hw, (u32 )((int )reg_idx * 64 + 24616)); wait_loop = wait_loop - 1; if (wait_loop != 0 && (txdctl & 33554432U) == 0U) { goto ldv_58050; } else { } if (wait_loop == 0) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "Could not enable Tx Queue %d\n", (int )reg_idx); } else { } } else { } return; } } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 rttdcs ; u32 mtqc ; u8 tcs ; int tmp ; u32 sectx ; u32 tmp___0 ; { hw = & adapter->hw; tmp = netdev_get_num_tc(adapter->netdev); tcs = (u8 )tmp; if ((unsigned int )hw->mac.type == 1U) { return; } else { } rttdcs = ixgbe_read_reg(hw, 18688U); rttdcs = rttdcs | 64U; ixgbe_write_reg(hw, 18688U, rttdcs); if ((adapter->flags & 8388608U) != 0U) { mtqc = 2U; if ((unsigned int )tcs > 4U) { mtqc = mtqc | 13U; } else if ((unsigned int )tcs > 1U) { mtqc = mtqc | 9U; } else if ((unsigned int )adapter->ring_feature[2].indices == 4U) { mtqc = mtqc | 8U; } else { mtqc = mtqc | 4U; } } else if ((unsigned int )tcs > 4U) { mtqc = 13U; } else if ((unsigned int )tcs > 1U) { mtqc = 9U; } else { mtqc = 0U; } ixgbe_write_reg(hw, 33056U, mtqc); if ((unsigned int )tcs != 0U) { tmp___0 = ixgbe_read_reg(hw, 34832U); sectx = tmp___0; sectx = sectx | 7936U; ixgbe_write_reg(hw, 34832U, sectx); } else { } rttdcs = rttdcs & 4294967231U; ixgbe_write_reg(hw, 18688U, rttdcs); return; } } static void ixgbe_configure_tx(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 dmatxctl ; u32 i ; { hw = & adapter->hw; ixgbe_setup_mtqc(adapter); if ((unsigned int )hw->mac.type != 1U) { dmatxctl = ixgbe_read_reg(hw, 19072U); dmatxctl = dmatxctl | 1U; ixgbe_write_reg(hw, 19072U, dmatxctl); } else { } i = 0U; goto ldv_58067; ldv_58066: ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); i = i + 1U; ldv_58067: ; if ((u32 )adapter->num_tx_queues > i) { goto ldv_58066; } else { } return; } } static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter , struct ixgbe_ring *ring ) { struct ixgbe_hw *hw ; u8 reg_idx ; u32 srrctl ; u32 tmp ; { hw = & adapter->hw; reg_idx = ring->reg_idx; tmp = ixgbe_read_reg(hw, (u32 )((unsigned int )reg_idx <= 15U ? ((int )reg_idx + 2112) * 4 : ((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4116 : ((int )reg_idx + -64) * 64 + 53268))); srrctl = tmp; srrctl = srrctl | 268435456U; ixgbe_write_reg(hw, (u32 )((unsigned int )reg_idx <= 15U ? ((int )reg_idx + 2112) * 4 : ((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4116 : ((int )reg_idx + -64) * 64 + 53268)), srrctl); return; } } static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter , struct ixgbe_ring *ring ) { struct ixgbe_hw *hw ; u8 reg_idx ; u32 srrctl ; u32 tmp ; { hw = & adapter->hw; reg_idx = ring->reg_idx; tmp = ixgbe_read_reg(hw, (u32 )((unsigned int )reg_idx <= 15U ? ((int )reg_idx + 2112) * 4 : ((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4116 : ((int )reg_idx + -64) * 64 + 53268))); srrctl = tmp; srrctl = srrctl & 4026531839U; ixgbe_write_reg(hw, (u32 )((unsigned int )reg_idx <= 15U ? ((int )reg_idx + 2112) * 4 : ((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4116 : ((int )reg_idx + -64) * 64 + 53268)), srrctl); return; } } void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter ) { int i ; bool pfc_en ; { pfc_en = adapter->dcb_cfg.pfc_mode_enable; if ((unsigned long )adapter->ixgbe_ieee_pfc != (unsigned long )((struct ieee_pfc *)0)) { pfc_en = ((int )pfc_en | ((unsigned int )(adapter->ixgbe_ieee_pfc)->pfc_en != 0U)) != 0; } else { } if (adapter->num_vfs != 0U || ((adapter->num_rx_queues > 1 && ((unsigned int )adapter->hw.fc.current_mode & 2U) == 0U) && ! pfc_en)) { i = 0; goto ldv_58089; ldv_58088: ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); i = i + 1; ldv_58089: ; if (adapter->num_rx_queues > i) { goto ldv_58088; } else { } } else { i = 0; goto ldv_58092; ldv_58091: ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); i = i + 1; ldv_58092: ; if (adapter->num_rx_queues > i) { goto ldv_58091; } else { } } return; } } static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter , struct ixgbe_ring *rx_ring ) { struct ixgbe_hw *hw ; u32 srrctl ; u8 reg_idx ; u16 mask ; unsigned int tmp ; { hw = & adapter->hw; reg_idx = rx_ring->reg_idx; if ((unsigned int )hw->mac.type == 1U) { mask = adapter->ring_feature[2].mask; reg_idx = (int )((u8 )mask) & (int )reg_idx; } else { } srrctl = 1024U; tmp = ixgbe_rx_bufsz(rx_ring); srrctl = (tmp >> 10) | srrctl; srrctl = srrctl | 33554432U; ixgbe_write_reg(hw, (u32 )((unsigned int )reg_idx <= 15U ? ((int )reg_idx + 2112) * 4 : ((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4116 : ((int )reg_idx + -64) * 64 + 53268)), srrctl); return; } } u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter ) { { if ((unsigned int )adapter->hw.mac.type <= 3U) { return (128U); } else if ((adapter->flags & 8388608U) != 0U) { return (64U); } else { return (512U); } } } static void ixgbe_store_reta(struct ixgbe_adapter *adapter ) { u32 i ; u32 reta_entries ; u32 tmp ; struct ixgbe_hw *hw ; u32 reta ; u32 indices_multi ; u8 *indir_tbl ; { tmp = ixgbe_rss_indir_tbl_entries(adapter); reta_entries = tmp; hw = & adapter->hw; reta = 0U; indir_tbl = (u8 *)(& adapter->rss_indir_tbl); if ((unsigned int )adapter->hw.mac.type == 1U) { indices_multi = 17U; } else { indices_multi = 1U; } i = 0U; goto ldv_58115; ldv_58114: reta = ((u32 )*(indir_tbl + (unsigned long )i) * indices_multi << (int )((i & 3U) * 8U)) | reta; if ((i & 3U) == 3U) { if (i <= 127U) { ixgbe_write_reg(hw, ((i >> 2) + 5888U) * 4U, reta); } else { ixgbe_write_reg(hw, ((i >> 2) + 15232U) * 4U, reta); } reta = 0U; } else { } i = i + 1U; ldv_58115: ; if (i < reta_entries) { goto ldv_58114; } else { } return; } } static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter ) { u32 i ; u32 reta_entries ; u32 tmp ; struct ixgbe_hw *hw ; u32 vfreta ; unsigned int pf_pool ; { tmp = ixgbe_rss_indir_tbl_entries(adapter); reta_entries = tmp; hw = & adapter->hw; vfreta = 0U; pf_pool = adapter->num_vfs; i = 0U; goto ldv_58126; ldv_58125: vfreta = ((unsigned int )adapter->rss_indir_tbl[i] << (int )((i & 3U) * 8U)) | vfreta; if ((i & 3U) == 3U) { ixgbe_write_reg(hw, (((i >> 2) + pf_pool * 16U) + 25600U) * 4U, vfreta); vfreta = 0U; } else { } i = i + 1U; ldv_58126: ; if (i < reta_entries) { goto ldv_58125; } else { } return; } } static void ixgbe_setup_reta(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 i ; u32 j ; u32 reta_entries ; u32 tmp ; u16 rss_i ; { hw = & adapter->hw; tmp = ixgbe_rss_indir_tbl_entries(adapter); reta_entries = tmp; rss_i = adapter->ring_feature[2].indices; if ((adapter->flags & 8388608U) != 0U && (unsigned int )rss_i <= 1U) { rss_i = 2U; } else { } i = 0U; goto ldv_58137; ldv_58136: ixgbe_write_reg(hw, (i + 5920U) * 4U, adapter->rss_key[i]); i = i + 1U; ldv_58137: ; if (i <= 9U) { goto ldv_58136; } else { } memset((void *)(& adapter->rss_indir_tbl), 0, 512UL); i = 0U; j = 0U; goto ldv_58140; ldv_58139: ; if ((u32 )rss_i == j) { j = 0U; } else { } adapter->rss_indir_tbl[i] = (u8 )j; i = i + 1U; j = j + 1U; ldv_58140: ; if (i < reta_entries) { goto ldv_58139; } else { } ixgbe_store_reta(adapter); return; } } static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u16 rss_i ; unsigned int pf_pool ; int i ; int j ; { hw = & adapter->hw; rss_i = adapter->ring_feature[2].indices; pf_pool = adapter->num_vfs; i = 0; goto ldv_58151; ldv_58150: ixgbe_write_reg(hw, ((pf_pool * 16U + (unsigned int )i) + 24576U) * 4U, adapter->rss_key[i]); i = i + 1; ldv_58151: ; if (i <= 9) { goto ldv_58150; } else { } i = 0; j = 0; goto ldv_58154; ldv_58153: ; if ((int )rss_i == j) { j = 0; } else { } adapter->rss_indir_tbl[i] = (u8 )j; i = i + 1; j = j + 1; ldv_58154: ; if (i <= 63) { goto ldv_58153; } else { } ixgbe_store_vfreta(adapter); return; } } static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 mrqc ; u32 rss_field ; u32 vfmrqc ; u32 rxcsum ; u8 tcs ; int tmp ; unsigned int pf_pool ; { hw = & adapter->hw; mrqc = 0U; rss_field = 0U; vfmrqc = 0U; rxcsum = ixgbe_read_reg(hw, 20480U); rxcsum = rxcsum | 8192U; ixgbe_write_reg(hw, 20480U, rxcsum); if ((unsigned int )adapter->hw.mac.type == 1U) { if ((unsigned int )adapter->ring_feature[2].mask != 0U) { mrqc = 1U; } else { } } else { tmp = netdev_get_num_tc(adapter->netdev); tcs = (u8 )tmp; if ((adapter->flags & 8388608U) != 0U) { if ((unsigned int )tcs > 4U) { mrqc = 12U; } else if ((unsigned int )tcs > 1U) { mrqc = 13U; } else if ((unsigned int )adapter->ring_feature[2].indices == 4U) { mrqc = 10U; } else { mrqc = 11U; } } else if ((unsigned int )tcs > 4U) { mrqc = 4U; } else if ((unsigned int )tcs > 1U) { mrqc = 5U; } else { mrqc = 1U; } } rss_field = rss_field | 3342336U; if ((adapter->flags2 & 256U) != 0U) { rss_field = rss_field | 4194304U; } else { } if ((adapter->flags2 & 512U) != 0U) { rss_field = rss_field | 8388608U; } else { } netdev_rss_key_fill((void *)(& adapter->rss_key), 40UL); if ((unsigned int )hw->mac.type > 3U && (adapter->flags & 8388608U) != 0U) { pf_pool = adapter->num_vfs; mrqc = mrqc | 8192U; ixgbe_write_reg(hw, 22552U, mrqc); ixgbe_setup_vfreta(adapter); vfmrqc = 1U; vfmrqc = vfmrqc | rss_field; ixgbe_write_reg(hw, (pf_pool + 3328U) * 4U, vfmrqc); } else { ixgbe_setup_reta(adapter); mrqc = mrqc | rss_field; ixgbe_write_reg(hw, 22552U, mrqc); } return; } } static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter , struct ixgbe_ring *ring ) { struct ixgbe_hw *hw ; u32 rscctrl ; u8 reg_idx ; int tmp ; { hw = & adapter->hw; reg_idx = ring->reg_idx; tmp = constant_test_bit(4L, (unsigned long const volatile *)(& ring->state)); if (tmp == 0) { return; } else { } rscctrl = ixgbe_read_reg(hw, (u32 )((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4140 : ((int )reg_idx + -64) * 64 + 53292)); rscctrl = rscctrl | 1U; rscctrl = rscctrl | 12U; ixgbe_write_reg(hw, (u32 )((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4140 : ((int )reg_idx + -64) * 64 + 53292), rscctrl); return; } } static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter , struct ixgbe_ring *ring ) { struct ixgbe_hw *hw ; int wait_loop ; u32 rxdctl ; u8 reg_idx ; bool tmp ; u32 tmp___0 ; { hw = & adapter->hw; wait_loop = 10; reg_idx = ring->reg_idx; tmp = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp) { return; } else { } if ((unsigned int )hw->mac.type == 1U) { tmp___0 = ixgbe_read_reg(hw, 17060U); if ((tmp___0 & 1073741824U) == 0U) { return; } else { } } else { } ldv_58181: usleep_range(1000UL, 2000UL); rxdctl = ixgbe_read_reg(hw, (u32 )((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4136 : ((int )reg_idx + -64) * 64 + 53288)); wait_loop = wait_loop - 1; if (wait_loop != 0 && (rxdctl & 33554432U) == 0U) { goto ldv_58181; } else { } if (wait_loop == 0) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "RXDCTL.ENABLE on Rx queue %d not set within the polling period\n", (int )reg_idx); } else { } } else { } return; } } void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter , struct ixgbe_ring *ring ) { struct ixgbe_hw *hw ; int wait_loop ; u32 rxdctl ; u8 reg_idx ; bool tmp ; u32 tmp___0 ; { hw = & adapter->hw; wait_loop = 10; reg_idx = ring->reg_idx; tmp = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp) { return; } else { } rxdctl = ixgbe_read_reg(hw, (u32 )((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4136 : ((int )reg_idx + -64) * 64 + 53288)); rxdctl = rxdctl & 4261412863U; ixgbe_write_reg(hw, (u32 )((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4136 : ((int )reg_idx + -64) * 64 + 53288), rxdctl); if ((unsigned int )hw->mac.type == 1U) { tmp___0 = ixgbe_read_reg(hw, 17060U); if ((tmp___0 & 1073741824U) == 0U) { return; } else { } } else { } ldv_58191: __const_udelay(42950UL); rxdctl = ixgbe_read_reg(hw, (u32 )((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4136 : ((int )reg_idx + -64) * 64 + 53288)); wait_loop = wait_loop - 1; if (wait_loop != 0 && (rxdctl & 33554432U) != 0U) { goto ldv_58191; } else { } if (wait_loop == 0) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "RXDCTL.ENABLE on Rx queue %d not cleared within the polling period\n", (int )reg_idx); } else { } } else { } return; } } void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter , struct ixgbe_ring *ring ) { struct ixgbe_hw *hw ; u64 rdba ; u32 rxdctl ; u8 reg_idx ; u16 tmp ; { hw = & adapter->hw; rdba = ring->dma; reg_idx = ring->reg_idx; rxdctl = ixgbe_read_reg(hw, (u32 )((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4136 : ((int )reg_idx + -64) * 64 + 53288)); ixgbe_disable_rx_queue(adapter, ring); ixgbe_write_reg(hw, (u32 )((unsigned int )reg_idx <= 63U ? ((int )reg_idx + 64) * 64 : ((int )reg_idx + 768) * 64), (u32 )rdba); ixgbe_write_reg(hw, (u32 )((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4100 : ((int )reg_idx + -64) * 64 + 53252), (u32 )(rdba >> 32)); ixgbe_write_reg(hw, (u32 )((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4104 : ((int )reg_idx + -64) * 64 + 53256), (u32 )ring->count * 16U); ixgbe_write_reg(hw, (u32 )((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4112 : ((int )reg_idx + -64) * 64 + 53264), 0U); ixgbe_write_reg(hw, (u32 )((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4120 : ((int )reg_idx + -64) * 64 + 53272), 0U); ring->tail = adapter->io_addr + (unsigned long )((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4120 : ((int )reg_idx + -64) * 64 + 53272); ixgbe_configure_srrctl(adapter, ring); ixgbe_configure_rscctl(adapter, ring); if ((unsigned int )hw->mac.type == 1U) { rxdctl = rxdctl & 4290772992U; rxdctl = rxdctl | 525344U; } else { } rxdctl = rxdctl | 33554432U; ixgbe_write_reg(hw, (u32 )((unsigned int )reg_idx <= 63U ? (int )reg_idx * 64 + 4136 : ((int )reg_idx + -64) * 64 + 53288), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); tmp = ixgbe_desc_unused(ring); ixgbe_alloc_rx_buffers(ring, (int )tmp); return; } } static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int rss_i ; u16 pool ; u32 psrtype ; unsigned long tmp ; unsigned long tmp___0 ; { hw = & adapter->hw; rss_i = (int )adapter->ring_feature[2].indices; psrtype = 4912U; if ((unsigned int )hw->mac.type == 1U) { return; } else { } if (rss_i > 3) { psrtype = psrtype | 1073741824U; } else if (rss_i > 1) { psrtype = psrtype | 536870912U; } else { } tmp = find_first_bit((unsigned long const *)(& adapter->fwd_bitmask), 32UL); pool = (u16 )tmp; goto ldv_58209; ldv_58208: ixgbe_write_reg(hw, (u32 )((int )pool + (int )adapter->ring_feature[1].offset <= 15 ? (((int )pool + (int )adapter->ring_feature[1].offset) + 5408) * 4 : (((int )pool + (int )adapter->ring_feature[1].offset) + 14976) * 4), psrtype); tmp___0 = find_next_bit((unsigned long const *)(& adapter->fwd_bitmask), 32UL, (unsigned long )((int )pool + 1)); pool = (u16 )tmp___0; ldv_58209: ; if ((unsigned int )pool <= 31U) { goto ldv_58208; } else { } return; } } static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 reg_offset ; u32 vf_shift ; u32 gcr_ext ; u32 vmdctl ; int i ; { hw = & adapter->hw; if ((adapter->flags & 8388608U) == 0U) { return; } else { } vmdctl = ixgbe_read_reg(hw, 20912U); vmdctl = vmdctl | 1U; vmdctl = vmdctl & 4294959231U; vmdctl = (u32 )((int )adapter->ring_feature[1].offset << 7) | vmdctl; vmdctl = vmdctl | 1073741824U; ixgbe_write_reg(hw, 20912U, vmdctl); vf_shift = (u32 )adapter->ring_feature[1].offset & 31U; reg_offset = (int )adapter->ring_feature[1].offset > 31; ixgbe_write_reg(hw, (reg_offset + 5240U) * 4U, (u32 )(-1 << (int )vf_shift)); ixgbe_write_reg(hw, ((reg_offset ^ 1U) + 5240U) * 4U, reg_offset - 1U); ixgbe_write_reg(hw, (reg_offset + 8260U) * 4U, (u32 )(-1 << (int )vf_shift)); ixgbe_write_reg(hw, ((reg_offset ^ 1U) + 8260U) * 4U, reg_offset - 1U); if ((unsigned int )adapter->bridge_mode == 0U) { ixgbe_write_reg(hw, 33312U, 1U); } else { } (*(hw->mac.ops.set_vmdq))(hw, 0U, (u32 )adapter->ring_feature[1].offset); switch ((int )adapter->ring_feature[1].mask) { case 120: gcr_ext = 1U; goto ldv_58221; case 124: gcr_ext = 2U; goto ldv_58221; default: gcr_ext = 3U; goto ldv_58221; } ldv_58221: ixgbe_write_reg(hw, 69712U, gcr_ext); (*(hw->mac.ops.set_mac_anti_spoofing))(hw, adapter->num_vfs != 0U, (int )adapter->num_vfs); if ((unsigned long )hw->mac.ops.set_ethertype_anti_spoofing != (unsigned long )((void (*)(struct ixgbe_hw * , bool , int ))0)) { ixgbe_write_reg(hw, 20796U, 2684389580U); } else { } i = 0; goto ldv_58225; ldv_58224: ; if ((unsigned int )(adapter->vfinfo + (unsigned long )i)->spoofchk_enabled == 0U) { ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, 0); } else { } if ((unsigned long )hw->mac.ops.set_ethertype_anti_spoofing != (unsigned long )((void (*)(struct ixgbe_hw * , bool , int ))0)) { (*(hw->mac.ops.set_ethertype_anti_spoofing))(hw, 1, i); } else { } ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, (int )(adapter->vfinfo + (unsigned long )i)->rss_query_enabled); i = i + 1; ldv_58225: ; if ((unsigned int )i < adapter->num_vfs) { goto ldv_58224; } else { } return; } } static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; struct net_device *netdev ; int max_frame ; struct ixgbe_ring *rx_ring ; int i ; u32 mhadd ; u32 hlreg0 ; { hw = & adapter->hw; netdev = adapter->netdev; max_frame = (int )(netdev->mtu + 18U); if ((adapter->flags & 2097152U) != 0U && max_frame <= 3071) { max_frame = 3072; } else { } if (max_frame <= 1517) { max_frame = 1518; } else { } mhadd = ixgbe_read_reg(hw, 17000U); if ((u32 )max_frame != mhadd >> 16) { mhadd = mhadd & 65535U; mhadd = (u32 )(max_frame << 16) | mhadd; ixgbe_write_reg(hw, 17000U, mhadd); } else { } hlreg0 = ixgbe_read_reg(hw, 16960U); hlreg0 = hlreg0 | 4U; ixgbe_write_reg(hw, 16960U, hlreg0); i = 0; goto ldv_58238; ldv_58237: rx_ring = adapter->rx_ring[i]; if ((adapter->flags2 & 2U) != 0U) { set_bit(4L, (unsigned long volatile *)(& rx_ring->state)); } else { clear_bit(4L, (unsigned long volatile *)(& rx_ring->state)); } i = i + 1; ldv_58238: ; if (adapter->num_rx_queues > i) { goto ldv_58237; } else { } return; } } static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 rdrxctl ; u32 tmp ; u32 tmp___0 ; { hw = & adapter->hw; tmp = ixgbe_read_reg(hw, 12032U); rdrxctl = tmp; switch ((unsigned int )hw->mac.type) { case 4U: ; case 5U: ; case 1U: rdrxctl = rdrxctl | 32U; goto ldv_58248; case 2U: ; case 3U: tmp___0 = ixgbe_read_reg(hw, 12328U); ixgbe_write_reg(hw, 12328U, tmp___0 | 128U); rdrxctl = rdrxctl & 4290904063U; rdrxctl = rdrxctl | 100663296U; rdrxctl = rdrxctl | 2U; goto ldv_58248; default: ; return; } ldv_58248: ixgbe_write_reg(hw, 12032U, rdrxctl); return; } } static void ixgbe_configure_rx(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int i ; u32 rxctrl ; u32 rfctl ; { hw = & adapter->hw; (*(hw->mac.ops.disable_rx))(hw); ixgbe_setup_psrtype(adapter); ixgbe_setup_rdrxctl(adapter); rfctl = ixgbe_read_reg(hw, 20488U); rfctl = rfctl & 4294967263U; if ((adapter->flags2 & 2U) == 0U) { rfctl = rfctl | 32U; } else { } ixgbe_write_reg(hw, 20488U, rfctl); ixgbe_setup_mrqc(adapter); ixgbe_set_rx_buffer_len(adapter); i = 0; goto ldv_58260; ldv_58259: ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); i = i + 1; ldv_58260: ; if (adapter->num_rx_queues > i) { goto ldv_58259; } else { } rxctrl = ixgbe_read_reg(hw, 12288U); if ((unsigned int )hw->mac.type == 1U) { rxctrl = rxctrl | 2U; } else { } rxctrl = rxctrl | 1U; (*(hw->mac.ops.enable_rx_dma))(hw, rxctrl); return; } } static int ixgbe_vlan_rx_add_vid(struct net_device *netdev , __be16 proto , u16 vid ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; (*(hw->mac.ops.set_vfta))(& adapter->hw, (u32 )vid, (u32 )adapter->ring_feature[1].offset, 1); set_bit((long )vid, (unsigned long volatile *)(& adapter->active_vlans)); return (0); } } static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev , __be16 proto , u16 vid ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; (*(hw->mac.ops.set_vfta))(& adapter->hw, (u32 )vid, (u32 )adapter->ring_feature[1].offset, 0); clear_bit((long )vid, (unsigned long volatile *)(& adapter->active_vlans)); return (0); } } static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 vlnctrl ; int i ; int j ; struct ixgbe_ring *ring ; { hw = & adapter->hw; switch ((unsigned int )hw->mac.type) { case 1U: vlnctrl = ixgbe_read_reg(hw, 20616U); vlnctrl = vlnctrl & 2147483647U; ixgbe_write_reg(hw, 20616U, vlnctrl); goto ldv_58284; case 2U: ; case 3U: ; case 4U: ; case 5U: i = 0; goto ldv_58292; ldv_58291: ring = adapter->rx_ring[i]; if ((unsigned long )ring->l2_accel_priv != (unsigned long )((struct ixgbe_fwd_adapter *)0)) { goto ldv_58290; } else { } j = (int )ring->reg_idx; vlnctrl = ixgbe_read_reg(hw, (u32 )(j <= 63 ? j * 64 + 4136 : (j + -64) * 64 + 53288)); vlnctrl = vlnctrl & 3221225471U; ixgbe_write_reg(hw, (u32 )(j <= 63 ? j * 64 + 4136 : (j + -64) * 64 + 53288), vlnctrl); ldv_58290: i = i + 1; ldv_58292: ; if (adapter->num_rx_queues > i) { goto ldv_58291; } else { } goto ldv_58284; default: ; goto ldv_58284; } ldv_58284: ; return; } } static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 vlnctrl ; int i ; int j ; struct ixgbe_ring *ring ; { hw = & adapter->hw; switch ((unsigned int )hw->mac.type) { case 1U: vlnctrl = ixgbe_read_reg(hw, 20616U); vlnctrl = vlnctrl | 2147483648U; ixgbe_write_reg(hw, 20616U, vlnctrl); goto ldv_58303; case 2U: ; case 3U: ; case 4U: ; case 5U: i = 0; goto ldv_58311; ldv_58310: ring = adapter->rx_ring[i]; if ((unsigned long )ring->l2_accel_priv != (unsigned long )((struct ixgbe_fwd_adapter *)0)) { goto ldv_58309; } else { } j = (int )ring->reg_idx; vlnctrl = ixgbe_read_reg(hw, (u32 )(j <= 63 ? j * 64 + 4136 : (j + -64) * 64 + 53288)); vlnctrl = vlnctrl | 1073741824U; ixgbe_write_reg(hw, (u32 )(j <= 63 ? j * 64 + 4136 : (j + -64) * 64 + 53288), vlnctrl); ldv_58309: i = i + 1; ldv_58311: ; if (adapter->num_rx_queues > i) { goto ldv_58310; } else { } goto ldv_58303; default: ; goto ldv_58303; } ldv_58303: ; return; } } static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter ) { u16 vid ; unsigned long tmp ; unsigned long tmp___0 ; { ixgbe_vlan_rx_add_vid(adapter->netdev, 129, 0); tmp = find_first_bit((unsigned long const *)(& adapter->active_vlans), 4096UL); vid = (u16 )tmp; goto ldv_58319; ldv_58318: ixgbe_vlan_rx_add_vid(adapter->netdev, 129, (int )vid); tmp___0 = find_next_bit((unsigned long const *)(& adapter->active_vlans), 4096UL, (unsigned long )((int )vid + 1)); vid = (u16 )tmp___0; ldv_58319: ; if ((unsigned int )vid <= 4095U) { goto ldv_58318; } else { } return; } } static int ixgbe_write_mc_addr_list(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; bool tmp___0 ; int tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; tmp___0 = netif_running((struct net_device const *)netdev); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } if ((unsigned long )hw->mac.ops.update_mc_addr_list != (unsigned long )((s32 (*)(struct ixgbe_hw * , struct net_device * ))0)) { (*(hw->mac.ops.update_mc_addr_list))(hw, netdev); } else { return (-12); } ixgbe_restore_vf_multicasts(adapter); return (netdev->mc.count); } } void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int i ; { hw = & adapter->hw; i = 0; goto ldv_58332; ldv_58331: ; if (((int )(adapter->mac_table + (unsigned long )i)->state & 4) != 0) { (*(hw->mac.ops.set_rar))(hw, (u32 )i, (u8 *)(& (adapter->mac_table + (unsigned long )i)->addr), (u32 )(adapter->mac_table + (unsigned long )i)->queue, 2147483648U); } else { (*(hw->mac.ops.clear_rar))(hw, (u32 )i); } (adapter->mac_table + (unsigned long )i)->state = (unsigned int )(adapter->mac_table + (unsigned long )i)->state & 65533U; i = i + 1; ldv_58332: ; if ((u32 )i < hw->mac.num_rar_entries) { goto ldv_58331; } else { } return; } } static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int i ; { hw = & adapter->hw; i = 0; goto ldv_58340; ldv_58339: ; if (((int )(adapter->mac_table + (unsigned long )i)->state & 2) != 0) { if (((int )(adapter->mac_table + (unsigned long )i)->state & 4) != 0) { (*(hw->mac.ops.set_rar))(hw, (u32 )i, (u8 *)(& (adapter->mac_table + (unsigned long )i)->addr), (u32 )(adapter->mac_table + (unsigned long )i)->queue, 2147483648U); } else { (*(hw->mac.ops.clear_rar))(hw, (u32 )i); } (adapter->mac_table + (unsigned long )i)->state = (unsigned int )(adapter->mac_table + (unsigned long )i)->state & 65533U; } else { } i = i + 1; ldv_58340: ; if ((u32 )i < hw->mac.num_rar_entries) { goto ldv_58339; } else { } return; } } static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter ) { int i ; struct ixgbe_hw *hw ; { hw = & adapter->hw; i = 0; goto ldv_58348; ldv_58347: (adapter->mac_table + (unsigned long )i)->state = (u16 )((unsigned int )(adapter->mac_table + (unsigned long )i)->state | 2U); (adapter->mac_table + (unsigned long )i)->state = (unsigned int )(adapter->mac_table + (unsigned long )i)->state & 65531U; eth_zero_addr((u8 *)(& (adapter->mac_table + (unsigned long )i)->addr)); (adapter->mac_table + (unsigned long )i)->queue = 0U; i = i + 1; ldv_58348: ; if ((u32 )i < hw->mac.num_rar_entries) { goto ldv_58347; } else { } ixgbe_sync_mac_table(adapter); return; } } static int ixgbe_available_rars(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int i ; int count ; { hw = & adapter->hw; count = 0; i = 0; goto ldv_58357; ldv_58356: ; if ((unsigned int )(adapter->mac_table + (unsigned long )i)->state == 0U) { count = count + 1; } else { } i = i + 1; ldv_58357: ; if ((u32 )i < hw->mac.num_rar_entries) { goto ldv_58356; } else { } return (count); } } static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter , u8 *addr ) { struct ixgbe_hw *hw ; { hw = & adapter->hw; memcpy((void *)(& (adapter->mac_table)->addr), (void const *)addr, 6UL); (adapter->mac_table)->queue = adapter->ring_feature[1].offset; (adapter->mac_table)->state = 5U; (*(hw->mac.ops.set_rar))(hw, 0U, (u8 *)(& (adapter->mac_table)->addr), (u32 )(adapter->mac_table)->queue, 2147483648U); return; } } int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter , u8 *addr , u16 queue ) { struct ixgbe_hw *hw ; int i ; bool tmp ; { hw = & adapter->hw; tmp = is_zero_ether_addr((u8 const *)addr); if ((int )tmp) { return (-22); } else { } i = 0; goto ldv_58373; ldv_58372: ; if (((int )(adapter->mac_table + (unsigned long )i)->state & 4) != 0) { goto ldv_58371; } else { } (adapter->mac_table + (unsigned long )i)->state = (u16 )((unsigned int )(adapter->mac_table + (unsigned long )i)->state | 6U); ether_addr_copy((u8 *)(& (adapter->mac_table + (unsigned long )i)->addr), (u8 const *)addr); (adapter->mac_table + (unsigned long )i)->queue = queue; ixgbe_sync_mac_table(adapter); return (i); ldv_58371: i = i + 1; ldv_58373: ; if ((u32 )i < hw->mac.num_rar_entries) { goto ldv_58372; } else { } return (-12); } } int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter , u8 *addr , u16 queue ) { int i ; struct ixgbe_hw *hw ; bool tmp ; bool tmp___0 ; { hw = & adapter->hw; tmp = is_zero_ether_addr((u8 const *)addr); if ((int )tmp) { return (-22); } else { } i = 0; goto ldv_58383; ldv_58382: tmp___0 = ether_addr_equal((u8 const *)addr, (u8 const *)(& (adapter->mac_table + (unsigned long )i)->addr)); if ((int )tmp___0 && (int )(adapter->mac_table + (unsigned long )i)->queue == (int )queue) { (adapter->mac_table + (unsigned long )i)->state = (u16 )((unsigned int )(adapter->mac_table + (unsigned long )i)->state | 2U); (adapter->mac_table + (unsigned long )i)->state = (unsigned int )(adapter->mac_table + (unsigned long )i)->state & 65531U; eth_zero_addr((u8 *)(& (adapter->mac_table + (unsigned long )i)->addr)); (adapter->mac_table + (unsigned long )i)->queue = 0U; ixgbe_sync_mac_table(adapter); return (0); } else { } i = i + 1; ldv_58383: ; if ((u32 )i < hw->mac.num_rar_entries) { goto ldv_58382; } else { } return (-12); } } static int ixgbe_write_uc_addr_list(struct net_device *netdev , int vfn ) { struct ixgbe_adapter *adapter ; void *tmp ; int count ; int tmp___0 ; struct netdev_hw_addr *ha ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; count = 0; tmp___0 = ixgbe_available_rars(adapter); if (netdev->uc.count > tmp___0) { return (-12); } else { } if (netdev->uc.count != 0) { __mptr = (struct list_head const *)netdev->uc.list.next; ha = (struct netdev_hw_addr *)__mptr; goto ldv_58397; ldv_58396: ixgbe_del_mac_filter(adapter, (u8 *)(& ha->addr), (int )((u16 )vfn)); ixgbe_add_mac_filter(adapter, (u8 *)(& ha->addr), (int )((u16 )vfn)); count = count + 1; __mptr___0 = (struct list_head const *)ha->list.next; ha = (struct netdev_hw_addr *)__mptr___0; ldv_58397: ; if ((unsigned long )(& ha->list) != (unsigned long )(& netdev->uc.list)) { goto ldv_58396; } else { } } else { } return (count); } } void ixgbe_set_rx_mode(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; u32 fctrl ; u32 vmolr ; u32 vlnctrl ; int count ; u32 tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; vmolr = 150994944U; fctrl = ixgbe_read_reg(hw, 20608U); vlnctrl = ixgbe_read_reg(hw, 20616U); fctrl = fctrl & 4294967293U; fctrl = fctrl | 1024U; fctrl = fctrl | 8192U; fctrl = fctrl | 4096U; fctrl = fctrl & 4294966527U; vlnctrl = vlnctrl & 2684354559U; if ((netdev->flags & 256U) != 0U) { hw->addr_ctrl.user_set_promisc = 1; fctrl = fctrl | 768U; vmolr = vmolr | 268435456U; if ((adapter->flags & 8404992U) != 0U) { vlnctrl = vlnctrl | 1610612736U; } else { } } else { if ((netdev->flags & 512U) != 0U) { fctrl = fctrl | 256U; vmolr = vmolr | 268435456U; } else { } vlnctrl = vlnctrl | 1073741824U; hw->addr_ctrl.user_set_promisc = 0; } count = ixgbe_write_uc_addr_list(netdev, (int )adapter->ring_feature[1].offset); if (count < 0) { fctrl = fctrl | 512U; vmolr = vmolr | 67108864U; } else { } count = ixgbe_write_mc_addr_list(netdev); if (count < 0) { fctrl = fctrl | 256U; vmolr = vmolr | 268435456U; } else if (count != 0) { vmolr = vmolr | 33554432U; } else { } if ((unsigned int )hw->mac.type != 1U) { tmp___0 = ixgbe_read_reg(hw, (u32 )(((int )adapter->ring_feature[1].offset + 15360) * 4)); vmolr = (tmp___0 & 3925868543U) | vmolr; ixgbe_write_reg(hw, (u32 )(((int )adapter->ring_feature[1].offset + 15360) * 4), vmolr); } else { } if (((adapter->netdev)->features & 274877906944ULL) != 0ULL) { fctrl = fctrl | 5122U; fctrl = fctrl & 4294959103U; } else { } ixgbe_write_reg(hw, 20616U, vlnctrl); ixgbe_write_reg(hw, 20608U, fctrl); if ((netdev->features & 256ULL) != 0ULL) { ixgbe_vlan_strip_enable(adapter); } else { ixgbe_vlan_strip_disable(adapter); } return; } } static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter ) { int q_idx ; { q_idx = 0; goto ldv_58413; ldv_58412: ixgbe_qv_init_lock(adapter->q_vector[q_idx]); napi_enable(& (adapter->q_vector[q_idx])->napi); q_idx = q_idx + 1; ldv_58413: ; if (adapter->num_q_vectors > q_idx) { goto ldv_58412; } else { } return; } } static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter ) { int q_idx ; bool tmp ; int tmp___0 ; { q_idx = 0; goto ldv_58423; ldv_58422: napi_disable(& (adapter->q_vector[q_idx])->napi); goto ldv_58420; ldv_58419: printk("\016ixgbe: QV %d locked\n", q_idx); usleep_range(1000UL, 20000UL); ldv_58420: tmp = ixgbe_qv_disable(adapter->q_vector[q_idx]); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_58419; } else { } q_idx = q_idx + 1; ldv_58423: ; if (adapter->num_q_vectors > q_idx) { goto ldv_58422; } else { } return; } } static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int max_frame ; int _max1 ; int _max2 ; u32 msb ; u16 rss_i ; { hw = & adapter->hw; max_frame = (int )((adapter->netdev)->mtu + 18U); if ((adapter->flags & 4096U) == 0U) { if ((unsigned int )hw->mac.type == 1U) { netif_set_gso_max_size(adapter->netdev, 65536U); } else { } return; } else { } if ((unsigned int )hw->mac.type == 1U) { netif_set_gso_max_size(adapter->netdev, 32768U); } else { } if (((adapter->netdev)->features & 2147483648ULL) != 0ULL) { _max1 = max_frame; _max2 = 3072; max_frame = _max1 > _max2 ? _max1 : _max2; } else { } if (((int )adapter->dcbx_cap & 4) != 0) { ixgbe_dcb_calculate_tc_credits(hw, & adapter->dcb_cfg, max_frame, 0); ixgbe_dcb_calculate_tc_credits(hw, & adapter->dcb_cfg, max_frame, 1); ixgbe_dcb_hw_config(hw, & adapter->dcb_cfg); } else if ((unsigned long )adapter->ixgbe_ieee_ets != (unsigned long )((struct ieee_ets *)0) && (unsigned long )adapter->ixgbe_ieee_pfc != (unsigned long )((struct ieee_pfc *)0)) { ixgbe_dcb_hw_ets(& adapter->hw, adapter->ixgbe_ieee_ets, max_frame); ixgbe_dcb_hw_pfc_config(& adapter->hw, (int )(adapter->ixgbe_ieee_pfc)->pfc_en, (u8 *)(& (adapter->ixgbe_ieee_ets)->prio_tc)); } else { } if ((unsigned int )hw->mac.type != 1U) { msb = 0U; rss_i = (unsigned int )adapter->ring_feature[2].indices + 65535U; goto ldv_58436; ldv_58435: msb = msb + 1U; rss_i = (u16 )((int )rss_i >> 1); ldv_58436: ; if ((unsigned int )rss_i != 0U) { goto ldv_58435; } else { } ixgbe_write_reg(hw, 60528U, msb * 286331153U); } else { } return; } } static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter , int pb ) { struct ixgbe_hw *hw ; struct net_device *dev ; int link ; int tc ; int kb ; int marker ; u32 dv_id ; u32 rx_pba ; u8 tmp ; u32 tmp___0 ; { hw = & adapter->hw; dev = adapter->netdev; link = (int )(dev->mtu + 38U); tc = link; if ((dev->features & 2147483648ULL) != 0ULL && tc <= 3071) { tmp = ixgbe_fcoe_get_tc(adapter); if ((int )tmp == pb) { tc = 3072; } else { } } else { } switch ((unsigned int )hw->mac.type) { case 3U: ; case 4U: ; case 5U: dv_id = (u32 )(((link * 288 + 3373344) / 25 + 1) + tc * 16); goto ldv_58453; default: dv_id = (u32 )(((link * 288 + 2009376) / 25 + 1) + tc * 16); goto ldv_58453; } ldv_58453: ; if ((adapter->flags & 8388608U) != 0U) { dv_id = (u32 )(tc * 8) + dv_id; } else { } kb = (int )((dv_id + 8191U) / 8192U); tmp___0 = ixgbe_read_reg(hw, (u32 )((pb + 3840) * 4)); rx_pba = tmp___0 >> 10; marker = (int )(rx_pba - (u32 )kb); if (marker < 0) { if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "Packet Buffer(%i) can not provide enoughheadroom to support flow control.Decrease MTU or number of traffic classes\n", pb); } else { } marker = tc + 1; } else { } return (marker); } } static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter , int pb ) { struct ixgbe_hw *hw ; struct net_device *dev ; int tc ; u32 dv_id ; int tmp ; { hw = & adapter->hw; dev = adapter->netdev; tc = (int )(dev->mtu + 18U); if ((dev->features & 2147483648ULL) != 0ULL && tc <= 3071) { tmp = netdev_get_prio_tc_map((struct net_device const *)dev, (u32 )adapter->fcoe.up); if (tmp == pb) { tc = 3072; } else { } } else { } switch ((unsigned int )hw->mac.type) { case 3U: ; case 4U: ; case 5U: dv_id = (u32 )((tc + 900) * 16 + 1); goto ldv_58466; default: dv_id = (u32 )((tc + 900) * 32 + 2); goto ldv_58466; } ldv_58466: ; return ((int )((dv_id + 8191U) / 8192U)); } } static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int num_tc ; int tmp ; int i ; int tmp___0 ; int tmp___1 ; { hw = & adapter->hw; tmp = netdev_get_num_tc(adapter->netdev); num_tc = tmp; if (num_tc == 0) { num_tc = 1; } else { } i = 0; goto ldv_58475; ldv_58474: tmp___0 = ixgbe_hpbthresh(adapter, i); hw->fc.high_water[i] = (u32 )tmp___0; tmp___1 = ixgbe_lpbthresh(adapter, i); hw->fc.low_water[i] = (u32 )tmp___1; if (hw->fc.low_water[i] > hw->fc.high_water[i]) { hw->fc.low_water[i] = 0U; } else { } i = i + 1; ldv_58475: ; if (i < num_tc) { goto ldv_58474; } else { } goto ldv_58478; ldv_58477: hw->fc.high_water[i] = 0U; i = i + 1; ldv_58478: ; if (i <= 7) { goto ldv_58477; } else { } return; } } static void ixgbe_configure_pb(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int hdrm ; u8 tc ; int tmp ; { hw = & adapter->hw; tmp = netdev_get_num_tc(adapter->netdev); tc = (u8 )tmp; if ((adapter->flags & 262144U) != 0U || (adapter->flags & 524288U) != 0U) { hdrm = 32 << (int )adapter->fdir_pballoc; } else { hdrm = 0; } (*(hw->mac.ops.set_rxpba))(hw, (int )tc, (u32 )hdrm, 0); ixgbe_pbthresh_setup(adapter); return; } } static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; struct hlist_node *node2 ; struct ixgbe_fdir_filter *filter ; int tmp ; struct hlist_node *____ptr ; struct hlist_node const *__mptr ; struct ixgbe_fdir_filter *tmp___0 ; struct hlist_node *____ptr___0 ; struct hlist_node const *__mptr___0 ; struct ixgbe_fdir_filter *tmp___1 ; { hw = & adapter->hw; spin_lock(& adapter->fdir_perfect_lock); tmp = hlist_empty((struct hlist_head const *)(& adapter->fdir_filter_list)); if (tmp == 0) { ixgbe_fdir_set_input_mask_82599(hw, & adapter->fdir_mask); } else { } ____ptr = adapter->fdir_filter_list.first; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp___0 = (struct ixgbe_fdir_filter *)__mptr; } else { tmp___0 = (struct ixgbe_fdir_filter *)0; } filter = tmp___0; goto ldv_58502; ldv_58501: ixgbe_fdir_write_perfect_filter_82599(hw, & filter->filter, (int )filter->sw_idx, (unsigned int )filter->action != 127U ? (int )(adapter->rx_ring[(int )filter->action])->reg_idx : 127); ____ptr___0 = node2; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___1 = (struct ixgbe_fdir_filter *)__mptr___0; } else { tmp___1 = (struct ixgbe_fdir_filter *)0; } filter = tmp___1; ldv_58502: ; if ((unsigned long )filter != (unsigned long )((struct ixgbe_fdir_filter *)0)) { node2 = filter->fdir_node.next; goto ldv_58501; } else { } spin_unlock(& adapter->fdir_perfect_lock); return; } } static void ixgbe_macvlan_set_rx_mode(struct net_device *dev , unsigned int pool , struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 vmolr ; { hw = & adapter->hw; vmolr = ixgbe_read_reg(hw, (pool + 15360U) * 4U); vmolr = vmolr | 184549376U; vmolr = vmolr & 4026531839U; if ((dev->flags & 512U) != 0U) { vmolr = vmolr | 268435456U; } else { vmolr = vmolr | 33554432U; (*(hw->mac.ops.update_mc_addr_list))(hw, dev); } ixgbe_write_uc_addr_list(adapter->netdev, (int )pool); ixgbe_write_reg(hw, (pool + 15360U) * 4U, vmolr); return; } } static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter ) { struct ixgbe_adapter *adapter ; int rss_i ; struct ixgbe_hw *hw ; u16 pool ; u32 psrtype ; { adapter = vadapter->real_adapter; rss_i = adapter->num_rx_queues_per_pool; hw = & adapter->hw; pool = (u16 )vadapter->pool; psrtype = 4912U; if ((unsigned int )hw->mac.type == 1U) { return; } else { } if (rss_i > 3) { psrtype = psrtype | 1073741824U; } else if (rss_i > 1) { psrtype = psrtype | 536870912U; } else { } ixgbe_write_reg(hw, (u32 )((int )pool + (int )adapter->ring_feature[1].offset <= 15 ? (((int )pool + (int )adapter->ring_feature[1].offset) + 5408) * 4 : (((int )pool + (int )adapter->ring_feature[1].offset) + 14976) * 4), psrtype); return; } } static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring ) { struct device *dev ; unsigned long size ; u16 i ; struct ixgbe_rx_buffer *rx_buffer ; struct sk_buff *skb ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; { dev = rx_ring->dev; if ((unsigned long )rx_ring->__annonCompField118.rx_buffer_info == (unsigned long )((struct ixgbe_rx_buffer *)0)) { return; } else { } i = 0U; goto ldv_58529; ldv_58528: rx_buffer = rx_ring->__annonCompField118.rx_buffer_info + (unsigned long )i; if ((unsigned long )rx_buffer->skb != (unsigned long )((struct sk_buff *)0)) { skb = rx_buffer->skb; if ((int )((struct ixgbe_cb *)(& skb->cb))->page_released) { tmp = ixgbe_rx_bufsz(rx_ring); dma_unmap_page(dev, ((struct ixgbe_cb *)(& skb->cb))->dma, (size_t )tmp, 2); } else { } consume_skb(skb); rx_buffer->skb = (struct sk_buff *)0; } else { } if ((unsigned long )rx_buffer->page == (unsigned long )((struct page *)0)) { goto ldv_58527; } else { } tmp___0 = ixgbe_rx_pg_order(rx_ring); dma_unmap_page(dev, rx_buffer->dma, 4096UL << (int )tmp___0, 2); tmp___1 = ixgbe_rx_pg_order(rx_ring); __free_pages(rx_buffer->page, tmp___1); rx_buffer->page = (struct page *)0; ldv_58527: i = (u16 )((int )i + 1); ldv_58529: ; if ((int )rx_ring->count > (int )i) { goto ldv_58528; } else { } size = (unsigned long )rx_ring->count * 32UL; memset((void *)rx_ring->__annonCompField118.rx_buffer_info, 0, size); memset(rx_ring->desc, 0, (size_t )rx_ring->size); rx_ring->__annonCompField120.next_to_alloc = 0U; rx_ring->next_to_clean = 0U; rx_ring->next_to_use = 0U; return; } } static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter , struct ixgbe_ring *rx_ring ) { struct ixgbe_adapter *adapter ; int index ; { adapter = vadapter->real_adapter; index = (int )((unsigned int )rx_ring->queue_index + vadapter->rx_base_queue); ixgbe_disable_rx_queue(adapter, rx_ring); usleep_range(10000UL, 20000UL); ixgbe_irq_disable_queues(adapter, 1ULL << index); ixgbe_clean_rx_ring(rx_ring); rx_ring->l2_accel_priv = (struct ixgbe_fwd_adapter *)0; return; } } static int ixgbe_fwd_ring_down(struct net_device *vdev , struct ixgbe_fwd_adapter *accel ) { struct ixgbe_adapter *adapter ; unsigned int rxbase ; unsigned int txbase ; int i ; { adapter = accel->real_adapter; rxbase = accel->rx_base_queue; txbase = accel->tx_base_queue; netif_tx_stop_all_queues(vdev); i = 0; goto ldv_58546; ldv_58545: ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + (unsigned int )i]); (adapter->rx_ring[rxbase + (unsigned int )i])->netdev = adapter->netdev; i = i + 1; ldv_58546: ; if (adapter->num_rx_queues_per_pool > i) { goto ldv_58545; } else { } i = 0; goto ldv_58549; ldv_58548: (adapter->tx_ring[txbase + (unsigned int )i])->l2_accel_priv = (struct ixgbe_fwd_adapter *)0; (adapter->tx_ring[txbase + (unsigned int )i])->netdev = adapter->netdev; i = i + 1; ldv_58549: ; if (adapter->num_rx_queues_per_pool > i) { goto ldv_58548; } else { } return (0); } } static int ixgbe_fwd_ring_up(struct net_device *vdev , struct ixgbe_fwd_adapter *accel ) { struct ixgbe_adapter *adapter ; unsigned int rxbase ; unsigned int txbase ; unsigned int queues ; int i ; int baseq ; int err ; int tmp ; struct _ddebug descriptor ; long tmp___0 ; unsigned int __min1 ; unsigned int __min2 ; bool tmp___1 ; { adapter = accel->real_adapter; err = 0; tmp = variable_test_bit((long )accel->pool, (unsigned long const volatile *)(& adapter->fwd_bitmask)); if (tmp == 0) { return (0); } else { } baseq = accel->pool * adapter->num_rx_queues_per_pool; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_fwd_ring_up"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c"; descriptor.format = "pool %i:%i queues %i:%i VSI bitmask %lx\n"; descriptor.lineno = 4624U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", accel->pool, adapter->num_rx_pools, baseq, adapter->num_rx_queues_per_pool + baseq, adapter->fwd_bitmask); } else { } accel->netdev = vdev; rxbase = (unsigned int )baseq; accel->rx_base_queue = rxbase; txbase = (unsigned int )baseq; accel->tx_base_queue = txbase; i = 0; goto ldv_58565; ldv_58564: ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + (unsigned int )i]); i = i + 1; ldv_58565: ; if (adapter->num_rx_queues_per_pool > i) { goto ldv_58564; } else { } i = 0; goto ldv_58568; ldv_58567: (adapter->rx_ring[rxbase + (unsigned int )i])->netdev = vdev; (adapter->rx_ring[rxbase + (unsigned int )i])->l2_accel_priv = accel; ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + (unsigned int )i]); i = i + 1; ldv_58568: ; if (adapter->num_rx_queues_per_pool > i) { goto ldv_58567; } else { } i = 0; goto ldv_58571; ldv_58570: (adapter->tx_ring[txbase + (unsigned int )i])->netdev = vdev; (adapter->tx_ring[txbase + (unsigned int )i])->l2_accel_priv = accel; i = i + 1; ldv_58571: ; if (adapter->num_rx_queues_per_pool > i) { goto ldv_58570; } else { } __min1 = (unsigned int )adapter->num_rx_queues_per_pool; __min2 = vdev->num_tx_queues; queues = __min1 < __min2 ? __min1 : __min2; err = netif_set_real_num_tx_queues(vdev, queues); if (err != 0) { goto fwd_queue_err; } else { } err = netif_set_real_num_rx_queues(vdev, queues); if (err != 0) { goto fwd_queue_err; } else { } tmp___1 = is_valid_ether_addr((u8 const *)vdev->dev_addr); if ((int )tmp___1) { ixgbe_add_mac_filter(adapter, vdev->dev_addr, (int )((u16 )accel->pool)); } else { } ixgbe_fwd_psrtype(accel); ixgbe_macvlan_set_rx_mode(vdev, (unsigned int )accel->pool, adapter); return (err); fwd_queue_err: ixgbe_fwd_ring_down(vdev, accel); return (err); } } static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter ) { struct net_device *upper ; struct list_head *iter ; int err ; struct macvlan_dev *dfwd ; void *tmp ; struct ixgbe_fwd_adapter *vadapter ; bool tmp___0 ; { iter = & (adapter->netdev)->all_adj_list.upper; upper = netdev_all_upper_get_next_dev_rcu(adapter->netdev, & iter); goto ldv_58587; ldv_58586: tmp___0 = netif_is_macvlan(upper); if ((int )tmp___0) { tmp = netdev_priv((struct net_device const *)upper); dfwd = (struct macvlan_dev *)tmp; vadapter = (struct ixgbe_fwd_adapter *)dfwd->fwd_priv; if ((unsigned long )dfwd->fwd_priv != (unsigned long )((void *)0)) { err = ixgbe_fwd_ring_up(upper, vadapter); if (err != 0) { goto ldv_58585; } else { } } else { } } else { } ldv_58585: upper = netdev_all_upper_get_next_dev_rcu(adapter->netdev, & iter); ldv_58587: ; if ((unsigned long )upper != (unsigned long )((struct net_device *)0)) { goto ldv_58586; } else { } return; } } static void ixgbe_configure(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; { hw = & adapter->hw; ixgbe_configure_pb(adapter); ixgbe_configure_dcb(adapter); ixgbe_configure_virtualization(adapter); ixgbe_set_rx_mode(adapter->netdev); ixgbe_restore_vlan(adapter); switch ((unsigned int )hw->mac.type) { case 2U: ; case 3U: (*(hw->mac.ops.disable_rx_buff))(hw); goto ldv_58595; default: ; goto ldv_58595; } ldv_58595: ; if ((adapter->flags & 262144U) != 0U) { ixgbe_init_fdir_signature_82599(& adapter->hw, adapter->fdir_pballoc); } else if ((adapter->flags & 524288U) != 0U) { ixgbe_init_fdir_perfect_82599(& adapter->hw, adapter->fdir_pballoc); ixgbe_fdir_filter_restore(adapter); } else { } switch ((unsigned int )hw->mac.type) { case 2U: ; case 3U: (*(hw->mac.ops.enable_rx_buff))(hw); goto ldv_58599; default: ; goto ldv_58599; } ldv_58599: ixgbe_configure_fcoe(adapter); ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); ixgbe_configure_dfwd(adapter); return; } } static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter ) { { if ((unsigned int )adapter->hw.mac.type == 1U) { adapter->flags2 = adapter->flags2 | 16U; } else { } adapter->flags2 = adapter->flags2 | 32U; return; } } static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw ) { u32 speed ; bool autoneg ; bool link_up ; int ret ; { link_up = 0; ret = -8; if ((unsigned long )hw->mac.ops.check_link != (unsigned long )((s32 (*)(struct ixgbe_hw * , ixgbe_link_speed * , bool * , bool ))0)) { ret = (*(hw->mac.ops.check_link))(hw, & speed, & link_up, 0); } else { } if (ret != 0) { return (ret); } else { } speed = hw->phy.autoneg_advertised; if (speed == 0U && (unsigned long )hw->mac.ops.get_link_capabilities != (unsigned long )((s32 (*)(struct ixgbe_hw * , ixgbe_link_speed * , bool * ))0)) { ret = (*(hw->mac.ops.get_link_capabilities))(hw, & speed, & autoneg); } else { } if (ret != 0) { return (ret); } else { } if ((unsigned long )hw->mac.ops.setup_link != (unsigned long )((s32 (*)(struct ixgbe_hw * , ixgbe_link_speed , bool ))0)) { ret = (*(hw->mac.ops.setup_link))(hw, speed, (int )link_up); } else { } return (ret); } } static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 gpie ; { hw = & adapter->hw; gpie = 0U; if ((adapter->flags & 8U) != 0U) { gpie = 2147483696U; gpie = gpie | 1073741824U; switch ((unsigned int )hw->mac.type) { case 1U: ixgbe_write_reg(hw, 2192U, 65535U); goto ldv_58617; case 2U: ; case 3U: ; case 4U: ; case 5U: ; default: ixgbe_write_reg(hw, 2768U, 4294967295U); ixgbe_write_reg(hw, 2772U, 4294967295U); goto ldv_58617; } ldv_58617: ; } else { ixgbe_write_reg(hw, 2192U, 65535U); } if ((adapter->flags & 8388608U) != 0U) { gpie = gpie & 4294918143U; switch ((int )adapter->ring_feature[1].mask) { case 120: gpie = gpie | 16384U; goto ldv_58624; case 124: gpie = gpie | 32768U; goto ldv_58624; default: gpie = gpie | 49152U; goto ldv_58624; } ldv_58624: ; } else { } if ((adapter->flags2 & 4U) != 0U) { switch ((unsigned int )adapter->hw.mac.type) { case 2U: gpie = gpie | 1U; goto ldv_58628; case 3U: gpie = gpie | 8388608U; goto ldv_58628; default: ; goto ldv_58628; } ldv_58628: ; } else { } if ((adapter->flags & 32768U) != 0U) { gpie = (u32 )*(hw->mvals + 9UL) | gpie; } else { } if ((unsigned int )hw->mac.type == 2U) { gpie = gpie | 2U; gpie = gpie | 4U; } else { } ixgbe_write_reg(hw, 2200U, gpie); return; } } static void ixgbe_up_complete(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int err ; u32 ctrl_ext ; bool tmp ; u32 esdp ; u32 tmp___0 ; { hw = & adapter->hw; ixgbe_get_hw_control(adapter); ixgbe_setup_gpie(adapter); if ((adapter->flags & 8U) != 0U) { ixgbe_configure_msix(adapter); } else { ixgbe_configure_msi_and_legacy(adapter); } if ((unsigned long )hw->mac.ops.enable_tx_laser != (unsigned long )((void (*)(struct ixgbe_hw * ))0)) { (*(hw->mac.ops.enable_tx_laser))(hw); } else { } if ((unsigned long )hw->phy.ops.set_phy_power != (unsigned long )((s32 (*)(struct ixgbe_hw * , bool ))0)) { (*(hw->phy.ops.set_phy_power))(hw, 1); } else { } __asm__ volatile ("": : : "memory"); clear_bit(2L, (unsigned long volatile *)(& adapter->state)); ixgbe_napi_enable_all(adapter); tmp = ixgbe_is_sfp(hw); if ((int )tmp) { ixgbe_sfp_link_config(adapter); } else { err = ixgbe_non_sfp_link_config(hw); if (err != 0) { if (((int )adapter->msg_enable & 2) != 0) { netdev_err((struct net_device const *)adapter->netdev, "link_config FAILED %d\n", err); } else { } } else { } } ixgbe_read_reg(hw, 2048U); ixgbe_irq_enable(adapter, 1, 1); if ((adapter->flags & 32768U) != 0U) { tmp___0 = ixgbe_read_reg(hw, 32U); esdp = tmp___0; if ((esdp & 2U) != 0U) { if ((int )adapter->msg_enable & 1) { netdev_crit((struct net_device const *)adapter->netdev, "Fan has stopped, replace the adapter\n"); } else { } } else { } } else { } adapter->flags = adapter->flags | 65536U; adapter->link_check_timeout = jiffies; ldv_mod_timer_51(& adapter->service_timer, jiffies); ctrl_ext = ixgbe_read_reg(hw, 24U); ctrl_ext = ctrl_ext | 16384U; ixgbe_write_reg(hw, 24U, ctrl_ext); return; } } void ixgbe_reinit_locked(struct ixgbe_adapter *adapter ) { int __ret_warn_on ; int tmp ; long tmp___0 ; int tmp___1 ; { tmp = preempt_count(); __ret_warn_on = ((unsigned long )tmp & 2096896UL) != 0UL; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", 4926); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); (adapter->netdev)->trans_start = jiffies; goto ldv_58644; ldv_58643: usleep_range(1000UL, 2000UL); ldv_58644: tmp___1 = test_and_set_bit(1L, (unsigned long volatile *)(& adapter->state)); if (tmp___1 != 0) { goto ldv_58643; } else { } ixgbe_down(adapter); if ((adapter->flags & 8388608U) != 0U) { msleep(2000U); } else { } ixgbe_up(adapter); clear_bit(1L, (unsigned long volatile *)(& adapter->state)); return; } } void ixgbe_up(struct ixgbe_adapter *adapter ) { { ixgbe_configure(adapter); ixgbe_up_complete(adapter); return; } } void ixgbe_reset(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; struct net_device *netdev ; int err ; u8 old_addr[6U] ; bool tmp ; int tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; { hw = & adapter->hw; netdev = adapter->netdev; tmp = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp) { return; } else { } goto ldv_58657; ldv_58656: usleep_range(1000UL, 2000UL); ldv_58657: tmp___0 = test_and_set_bit(7L, (unsigned long volatile *)(& adapter->state)); if (tmp___0 != 0) { goto ldv_58656; } else { } adapter->flags2 = adapter->flags2 & 4294967247U; adapter->flags = adapter->flags & 4294836223U; err = (*(hw->mac.ops.init_hw))(hw); switch (err) { case 0: ; case -20: ; case -19: ; goto ldv_58662; case -12: dev_err((struct device const *)(& (adapter->pdev)->dev), "master disable timed out\n"); goto ldv_58662; case -24: dev_warn((struct device const *)(& (adapter->pdev)->dev), "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); goto ldv_58662; default: dev_err((struct device const *)(& (adapter->pdev)->dev), "Hardware Error: %d\n", err); } ldv_58662: clear_bit(7L, (unsigned long volatile *)(& adapter->state)); memcpy((void *)(& old_addr), (void const *)(& (adapter->mac_table)->addr), (size_t )netdev->addr_len); ixgbe_flush_sw_mac_table(adapter); ixgbe_mac_set_default_filter(adapter, (u8 *)(& old_addr)); if ((unsigned int )hw->mac.san_mac_rar_index != 0U) { (*(hw->mac.ops.set_vmdq_san_mac))(hw, (u32 )adapter->ring_feature[1].offset); } else { } tmp___1 = constant_test_bit(8L, (unsigned long const volatile *)(& adapter->state)); if (tmp___1 != 0) { ixgbe_ptp_reset(adapter); } else { } if ((unsigned long )hw->phy.ops.set_phy_power != (unsigned long )((s32 (*)(struct ixgbe_hw * , bool ))0)) { tmp___2 = netif_running((struct net_device const *)adapter->netdev); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3 && adapter->wol == 0U) { (*(hw->phy.ops.set_phy_power))(hw, 0); } else { (*(hw->phy.ops.set_phy_power))(hw, 1); } } else { } return; } } static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring ) { struct ixgbe_tx_buffer *tx_buffer_info ; unsigned long size ; u16 i ; struct netdev_queue *tmp ; { if ((unsigned long )tx_ring->__annonCompField118.tx_buffer_info == (unsigned long )((struct ixgbe_tx_buffer *)0)) { return; } else { } i = 0U; goto ldv_58673; ldv_58672: tx_buffer_info = tx_ring->__annonCompField118.tx_buffer_info + (unsigned long )i; ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); i = (u16 )((int )i + 1); ldv_58673: ; if ((int )tx_ring->count > (int )i) { goto ldv_58672; } else { } tmp = txring_txq((struct ixgbe_ring const *)tx_ring); netdev_tx_reset_queue(tmp); size = (unsigned long )tx_ring->count * 48UL; memset((void *)tx_ring->__annonCompField118.tx_buffer_info, 0, size); memset(tx_ring->desc, 0, (size_t )tx_ring->size); tx_ring->next_to_use = 0U; tx_ring->next_to_clean = 0U; return; } } static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter ) { int i ; { i = 0; goto ldv_58680; ldv_58679: ixgbe_clean_rx_ring(adapter->rx_ring[i]); i = i + 1; ldv_58680: ; if (adapter->num_rx_queues > i) { goto ldv_58679; } else { } return; } } static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter ) { int i ; { i = 0; goto ldv_58687; ldv_58686: ixgbe_clean_tx_ring(adapter->tx_ring[i]); i = i + 1; ldv_58687: ; if (adapter->num_tx_queues > i) { goto ldv_58686; } else { } return; } } static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter ) { struct hlist_node *node2 ; struct ixgbe_fdir_filter *filter ; struct hlist_node *____ptr ; struct hlist_node const *__mptr ; struct ixgbe_fdir_filter *tmp ; struct hlist_node *____ptr___0 ; struct hlist_node const *__mptr___0 ; struct ixgbe_fdir_filter *tmp___0 ; { spin_lock(& adapter->fdir_perfect_lock); ____ptr = adapter->fdir_filter_list.first; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp = (struct ixgbe_fdir_filter *)__mptr; } else { tmp = (struct ixgbe_fdir_filter *)0; } filter = tmp; goto ldv_58704; ldv_58703: hlist_del(& filter->fdir_node); kfree((void const *)filter); ____ptr___0 = node2; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___0 = (struct ixgbe_fdir_filter *)__mptr___0; } else { tmp___0 = (struct ixgbe_fdir_filter *)0; } filter = tmp___0; ldv_58704: ; if ((unsigned long )filter != (unsigned long )((struct ixgbe_fdir_filter *)0)) { node2 = filter->fdir_node.next; goto ldv_58703; } else { } adapter->fdir_filter_count = 0; spin_unlock(& adapter->fdir_perfect_lock); return; } } void ixgbe_down(struct ixgbe_adapter *adapter ) { struct net_device *netdev ; struct ixgbe_hw *hw ; struct net_device *upper ; struct list_head *iter ; int i ; int tmp ; struct macvlan_dev *vlan ; void *tmp___0 ; bool tmp___1 ; u8 reg_idx ; u32 tmp___2 ; int tmp___3 ; { netdev = adapter->netdev; hw = & adapter->hw; tmp = test_and_set_bit(2L, (unsigned long volatile *)(& adapter->state)); if (tmp != 0) { return; } else { } (*(hw->mac.ops.disable_rx))(hw); i = 0; goto ldv_58715; ldv_58714: ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); i = i + 1; ldv_58715: ; if (adapter->num_rx_queues > i) { goto ldv_58714; } else { } usleep_range(10000UL, 20000UL); netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); netif_tx_disable(netdev); iter = & (adapter->netdev)->all_adj_list.upper; upper = netdev_all_upper_get_next_dev_rcu(adapter->netdev, & iter); goto ldv_58719; ldv_58718: tmp___1 = netif_is_macvlan(upper); if ((int )tmp___1) { tmp___0 = netdev_priv((struct net_device const *)upper); vlan = (struct macvlan_dev *)tmp___0; if ((unsigned long )vlan->fwd_priv != (unsigned long )((void *)0)) { netif_tx_stop_all_queues(upper); netif_carrier_off(upper); netif_tx_disable(upper); } else { } } else { } upper = netdev_all_upper_get_next_dev_rcu(adapter->netdev, & iter); ldv_58719: ; if ((unsigned long )upper != (unsigned long )((struct net_device *)0)) { goto ldv_58718; } else { } ixgbe_irq_disable(adapter); ixgbe_napi_disable_all(adapter); adapter->flags2 = adapter->flags2 & 4294967103U; adapter->flags = adapter->flags & 4294901759U; ldv_del_timer_sync_52(& adapter->service_timer); if (adapter->num_vfs != 0U) { ixgbe_write_reg(& adapter->hw, 2196U, 0U); i = 0; goto ldv_58722; ldv_58721: (adapter->vfinfo + (unsigned long )i)->clear_to_send = 0; i = i + 1; ldv_58722: ; if ((unsigned int )i < adapter->num_vfs) { goto ldv_58721; } else { } ixgbe_ping_all_vfs(adapter); ixgbe_disable_tx_rx(adapter); } else { } i = 0; goto ldv_58726; ldv_58725: reg_idx = (adapter->tx_ring[i])->reg_idx; ixgbe_write_reg(hw, (u32 )((int )reg_idx * 64 + 24616), 67108864U); i = i + 1; ldv_58726: ; if (adapter->num_tx_queues > i) { goto ldv_58725; } else { } switch ((unsigned int )hw->mac.type) { case 2U: ; case 3U: ; case 4U: ; case 5U: tmp___2 = ixgbe_read_reg(hw, 19072U); ixgbe_write_reg(hw, 19072U, tmp___2 & 4294967294U); goto ldv_58732; default: ; goto ldv_58732; } ldv_58732: tmp___3 = pci_channel_offline(adapter->pdev); if (tmp___3 == 0) { ixgbe_reset(adapter); } else { } if ((unsigned long )hw->mac.ops.disable_tx_laser != (unsigned long )((void (*)(struct ixgbe_hw * ))0)) { (*(hw->mac.ops.disable_tx_laser))(hw); } else { } ixgbe_clean_all_tx_rings(adapter); ixgbe_clean_all_rx_rings(adapter); ixgbe_setup_dca(adapter); return; } } static void ixgbe_tx_timeout(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; ixgbe_tx_timeout_reset(adapter); return; } } static int ixgbe_sw_init(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; struct pci_dev *pdev ; unsigned int rss ; unsigned int fdir ; u32 fwsm ; int j ; struct tc_configuration *tc ; int __min1 ; u8 tmp ; int __min2 ; unsigned int tmp___0 ; int __min1___0 ; int __min2___0 ; unsigned int tmp___1 ; void *tmp___2 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; s32 tmp___3 ; { hw = & adapter->hw; pdev = adapter->pdev; hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->revision_id = pdev->revision; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; tmp = ixgbe_max_rss_indices(adapter); __min1 = (int )tmp; tmp___0 = cpumask_weight(cpu_online_mask); __min2 = (int )tmp___0; rss = (unsigned int )(__min1 < __min2 ? __min1 : __min2); adapter->ring_feature[2].limit = (u16 )rss; adapter->flags2 = adapter->flags2 | 1U; adapter->flags2 = adapter->flags2 | 2U; adapter->max_q_vectors = 64; adapter->atr_sample_rate = 20U; __min1___0 = 63; tmp___1 = cpumask_weight(cpu_online_mask); __min2___0 = (int )tmp___1; fdir = (unsigned int )(__min1___0 < __min2___0 ? __min1___0 : __min2___0); adapter->ring_feature[3].limit = (u16 )fdir; adapter->fdir_pballoc = 1U; adapter->flags = adapter->flags | 512U; adapter->flags = adapter->flags | 1048576U; adapter->flags = adapter->flags & 4292870143U; adapter->fcoe.up = 3U; tmp___2 = kzalloc((unsigned long )hw->mac.num_rar_entries * 10UL, 32U); adapter->mac_table = (struct ixgbe_mac_addr *)tmp___2; switch ((unsigned int )hw->mac.type) { case 1U: adapter->flags2 = adapter->flags2 & 4294967294U; adapter->flags2 = adapter->flags2 & 4294967293U; if ((unsigned int )hw->device_id == 4296U) { adapter->flags = adapter->flags | 32768U; } else { } adapter->max_q_vectors = 16; adapter->ring_feature[3].limit = 0U; adapter->atr_sample_rate = 0U; adapter->fdir_pballoc = 0U; adapter->flags = adapter->flags & 4293918719U; adapter->flags = adapter->flags & 4292870143U; adapter->fcoe.up = 0U; goto ldv_58755; case 2U: ; if ((unsigned int )hw->device_id == 5404U) { adapter->flags2 = adapter->flags2 | 4U; } else { } goto ldv_58755; case 3U: fwsm = ixgbe_read_reg(hw, *(hw->mvals + 7UL)); if ((int )fwsm & 1) { adapter->flags2 = adapter->flags2 | 4U; } else { } goto ldv_58755; case 5U: ; case 4U: adapter->flags = adapter->flags & 4294966783U; goto ldv_58755; default: ; goto ldv_58755; } ldv_58755: spinlock_check(& adapter->fcoe.lock); __raw_spin_lock_init(& adapter->fcoe.lock.__annonCompField18.rlock, "&(&adapter->fcoe.lock)->rlock", & __key); spinlock_check(& adapter->fdir_perfect_lock); __raw_spin_lock_init(& adapter->fdir_perfect_lock.__annonCompField18.rlock, "&(&adapter->fdir_perfect_lock)->rlock", & __key___0); switch ((unsigned int )hw->mac.type) { case 3U: ; case 4U: ; case 5U: adapter->dcb_cfg.num_tcs.pg_tcs = 4U; adapter->dcb_cfg.num_tcs.pfc_tcs = 4U; goto ldv_58766; default: adapter->dcb_cfg.num_tcs.pg_tcs = 8U; adapter->dcb_cfg.num_tcs.pfc_tcs = 8U; goto ldv_58766; } ldv_58766: j = 0; goto ldv_58769; ldv_58768: tc = (struct tc_configuration *)(& adapter->dcb_cfg.tc_config) + (unsigned long )j; tc->path[0].bwg_id = 0U; tc->path[0].bwg_percent = ((unsigned int )((u8 )j) & 1U) + 12U; tc->path[1].bwg_id = 0U; tc->path[1].bwg_percent = ((unsigned int )((u8 )j) & 1U) + 12U; tc->dcb_pfc = 0; j = j + 1; ldv_58769: ; if (j <= 7) { goto ldv_58768; } else { } tc = (struct tc_configuration *)(& adapter->dcb_cfg.tc_config); tc->path[0].up_to_tc_bitmap = 255U; tc->path[1].up_to_tc_bitmap = 255U; adapter->dcb_cfg.bw_percentage[0][0] = 100U; adapter->dcb_cfg.bw_percentage[1][0] = 100U; adapter->dcb_cfg.pfc_mode_enable = 0; adapter->dcb_set_bitmap = 0U; adapter->dcbx_cap = 5U; memcpy((void *)(& adapter->temp_dcb_cfg), (void const *)(& adapter->dcb_cfg), 296UL); hw->fc.requested_mode = 3; hw->fc.current_mode = 3; ixgbe_pbthresh_setup(adapter); hw->fc.pause_time = 65535U; hw->fc.send_xon = 1; hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); if (max_vfs != 0U) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n"); } else { } if ((unsigned int )hw->mac.type != 1U) { if (max_vfs > 63U) { adapter->num_vfs = 0U; dev_warn((struct device const *)(& (adapter->pdev)->dev), "max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); } else { adapter->num_vfs = max_vfs; } } else { } adapter->rx_itr_setting = 1U; adapter->tx_itr_setting = 1U; adapter->tx_ring_count = 512U; adapter->rx_ring_count = 512U; adapter->tx_work_limit = 256U; tmp___3 = ixgbe_init_eeprom_params_generic(hw); if (tmp___3 != 0) { dev_err((struct device const *)(& (adapter->pdev)->dev), "EEPROM initialization failed\n"); return (-5); } else { } set_bit(0L, (unsigned long volatile *)(& adapter->fwd_bitmask)); set_bit(2L, (unsigned long volatile *)(& adapter->state)); return (0); } } int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring ) { struct device *dev ; int orig_node ; int tmp ; int ring_node ; int size ; void *tmp___0 ; void *tmp___1 ; { dev = tx_ring->dev; tmp = dev_to_node(dev); orig_node = tmp; ring_node = -1; size = (int )((unsigned int )tx_ring->count * 48U); if ((unsigned long )tx_ring->q_vector != (unsigned long )((struct ixgbe_q_vector *)0)) { ring_node = (tx_ring->q_vector)->numa_node; } else { } tmp___0 = ldv_vzalloc_node_53((unsigned long )size, ring_node); tx_ring->__annonCompField118.tx_buffer_info = (struct ixgbe_tx_buffer *)tmp___0; if ((unsigned long )tx_ring->__annonCompField118.tx_buffer_info == (unsigned long )((struct ixgbe_tx_buffer *)0)) { tmp___1 = ldv_vzalloc_54((unsigned long )size); tx_ring->__annonCompField118.tx_buffer_info = (struct ixgbe_tx_buffer *)tmp___1; } else { } if ((unsigned long )tx_ring->__annonCompField118.tx_buffer_info == (unsigned long )((struct ixgbe_tx_buffer *)0)) { goto err; } else { } u64_stats_init(& tx_ring->syncp); tx_ring->size = (unsigned int )tx_ring->count * 16U; tx_ring->size = (tx_ring->size + 4095U) & 4294963200U; set_dev_node(dev, ring_node); tx_ring->desc = dma_alloc_attrs(dev, (size_t )tx_ring->size, & tx_ring->dma, 208U, (struct dma_attrs *)0); set_dev_node(dev, orig_node); if ((unsigned long )tx_ring->desc == (unsigned long )((void *)0)) { tx_ring->desc = dma_alloc_attrs(dev, (size_t )tx_ring->size, & tx_ring->dma, 208U, (struct dma_attrs *)0); } else { } if ((unsigned long )tx_ring->desc == (unsigned long )((void *)0)) { goto err; } else { } tx_ring->next_to_use = 0U; tx_ring->next_to_clean = 0U; return (0); err: vfree((void const *)tx_ring->__annonCompField118.tx_buffer_info); tx_ring->__annonCompField118.tx_buffer_info = (struct ixgbe_tx_buffer *)0; dev_err((struct device const *)dev, "Unable to allocate memory for the Tx descriptor ring\n"); return (-12); } } static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter ) { int i ; int err ; int tmp ; { err = 0; i = 0; goto ldv_58787; ldv_58786: err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); if (err == 0) { goto ldv_58784; } else { } if (((int )adapter->msg_enable & 2) != 0) { netdev_err((struct net_device const *)adapter->netdev, "Allocation for Tx Queue %u failed\n", i); } else { } goto err_setup_tx; ldv_58784: i = i + 1; ldv_58787: ; if (adapter->num_tx_queues > i) { goto ldv_58786; } else { } return (0); err_setup_tx: ; goto ldv_58790; ldv_58789: ixgbe_free_tx_resources(adapter->tx_ring[i]); ldv_58790: tmp = i; i = i - 1; if (tmp != 0) { goto ldv_58789; } else { } return (err); } } int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring ) { struct device *dev ; int orig_node ; int tmp ; int ring_node ; int size ; void *tmp___0 ; void *tmp___1 ; { dev = rx_ring->dev; tmp = dev_to_node(dev); orig_node = tmp; ring_node = -1; size = (int )((unsigned int )rx_ring->count * 32U); if ((unsigned long )rx_ring->q_vector != (unsigned long )((struct ixgbe_q_vector *)0)) { ring_node = (rx_ring->q_vector)->numa_node; } else { } tmp___0 = ldv_vzalloc_node_55((unsigned long )size, ring_node); rx_ring->__annonCompField118.rx_buffer_info = (struct ixgbe_rx_buffer *)tmp___0; if ((unsigned long )rx_ring->__annonCompField118.rx_buffer_info == (unsigned long )((struct ixgbe_rx_buffer *)0)) { tmp___1 = ldv_vzalloc_56((unsigned long )size); rx_ring->__annonCompField118.rx_buffer_info = (struct ixgbe_rx_buffer *)tmp___1; } else { } if ((unsigned long )rx_ring->__annonCompField118.rx_buffer_info == (unsigned long )((struct ixgbe_rx_buffer *)0)) { goto err; } else { } u64_stats_init(& rx_ring->syncp); rx_ring->size = (unsigned int )rx_ring->count * 16U; rx_ring->size = (rx_ring->size + 4095U) & 4294963200U; set_dev_node(dev, ring_node); rx_ring->desc = dma_alloc_attrs(dev, (size_t )rx_ring->size, & rx_ring->dma, 208U, (struct dma_attrs *)0); set_dev_node(dev, orig_node); if ((unsigned long )rx_ring->desc == (unsigned long )((void *)0)) { rx_ring->desc = dma_alloc_attrs(dev, (size_t )rx_ring->size, & rx_ring->dma, 208U, (struct dma_attrs *)0); } else { } if ((unsigned long )rx_ring->desc == (unsigned long )((void *)0)) { goto err; } else { } rx_ring->next_to_clean = 0U; rx_ring->next_to_use = 0U; return (0); err: vfree((void const *)rx_ring->__annonCompField118.rx_buffer_info); rx_ring->__annonCompField118.rx_buffer_info = (struct ixgbe_rx_buffer *)0; dev_err((struct device const *)dev, "Unable to allocate memory for the Rx descriptor ring\n"); return (-12); } } static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter ) { int i ; int err ; int tmp ; { err = 0; i = 0; goto ldv_58808; ldv_58807: err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); if (err == 0) { goto ldv_58805; } else { } if (((int )adapter->msg_enable & 2) != 0) { netdev_err((struct net_device const *)adapter->netdev, "Allocation for Rx Queue %u failed\n", i); } else { } goto err_setup_rx; ldv_58805: i = i + 1; ldv_58808: ; if (adapter->num_rx_queues > i) { goto ldv_58807; } else { } err = ixgbe_setup_fcoe_ddp_resources(adapter); if (err == 0) { return (0); } else { } err_setup_rx: ; goto ldv_58811; ldv_58810: ixgbe_free_rx_resources(adapter->rx_ring[i]); ldv_58811: tmp = i; i = i - 1; if (tmp != 0) { goto ldv_58810; } else { } return (err); } } void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring ) { { ixgbe_clean_tx_ring(tx_ring); vfree((void const *)tx_ring->__annonCompField118.tx_buffer_info); tx_ring->__annonCompField118.tx_buffer_info = (struct ixgbe_tx_buffer *)0; if ((unsigned long )tx_ring->desc == (unsigned long )((void *)0)) { return; } else { } dma_free_attrs(tx_ring->dev, (size_t )tx_ring->size, tx_ring->desc, tx_ring->dma, (struct dma_attrs *)0); tx_ring->desc = (void *)0; return; } } static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter ) { int i ; { i = 0; goto ldv_58821; ldv_58820: ; if ((unsigned long )(adapter->tx_ring[i])->desc != (unsigned long )((void *)0)) { ixgbe_free_tx_resources(adapter->tx_ring[i]); } else { } i = i + 1; ldv_58821: ; if (adapter->num_tx_queues > i) { goto ldv_58820; } else { } return; } } void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring ) { { ixgbe_clean_rx_ring(rx_ring); vfree((void const *)rx_ring->__annonCompField118.rx_buffer_info); rx_ring->__annonCompField118.rx_buffer_info = (struct ixgbe_rx_buffer *)0; if ((unsigned long )rx_ring->desc == (unsigned long )((void *)0)) { return; } else { } dma_free_attrs(rx_ring->dev, (size_t )rx_ring->size, rx_ring->desc, rx_ring->dma, (struct dma_attrs *)0); rx_ring->desc = (void *)0; return; } } static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter ) { int i ; { ixgbe_free_fcoe_ddp_resources(adapter); i = 0; goto ldv_58831; ldv_58830: ; if ((unsigned long )(adapter->rx_ring[i])->desc != (unsigned long )((void *)0)) { ixgbe_free_rx_resources(adapter->rx_ring[i]); } else { } i = i + 1; ldv_58831: ; if (adapter->num_rx_queues > i) { goto ldv_58830; } else { } return; } } static int ixgbe_change_mtu(struct net_device *netdev , int new_mtu ) { struct ixgbe_adapter *adapter ; void *tmp ; int max_frame ; bool tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; max_frame = new_mtu + 18; if (new_mtu <= 67 || max_frame > 9728) { return (-22); } else { } if (((adapter->flags & 8388608U) != 0U && (unsigned int )adapter->hw.mac.type == 2U) && max_frame > 1518) { if (((int )adapter->msg_enable & 2) != 0) { netdev_warn((struct net_device const *)adapter->netdev, "Setting MTU > 1500 will disable legacy VFs\n"); } else { } } else { } if (((int )adapter->msg_enable & 2) != 0) { netdev_info((struct net_device const *)adapter->netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); } else { } netdev->mtu = (unsigned int )new_mtu; tmp___0 = netif_running((struct net_device const *)netdev); if ((int )tmp___0) { ixgbe_reinit_locked(adapter); } else { } return (0); } } static int ixgbe_open(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; int err ; int queues ; int tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; tmp___0 = constant_test_bit(0L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 != 0) { return (-16); } else { } netif_carrier_off(netdev); err = ixgbe_setup_all_tx_resources(adapter); if (err != 0) { goto err_setup_tx; } else { } err = ixgbe_setup_all_rx_resources(adapter); if (err != 0) { goto err_setup_rx; } else { } ixgbe_configure(adapter); err = ixgbe_request_irq(adapter); if (err != 0) { goto err_req_irq; } else { } if (adapter->num_rx_pools > 1) { queues = adapter->num_rx_queues_per_pool; } else { queues = adapter->num_tx_queues; } err = netif_set_real_num_tx_queues(netdev, (unsigned int )queues); if (err != 0) { goto err_set_queues; } else { } if (adapter->num_rx_pools > 1 && adapter->num_rx_queues > 4) { queues = 4; } else { queues = adapter->num_rx_queues; } err = netif_set_real_num_rx_queues(netdev, (unsigned int )queues); if (err != 0) { goto err_set_queues; } else { } ixgbe_ptp_init(adapter); ixgbe_up_complete(adapter); vxlan_get_rx_port(netdev); return (0); err_set_queues: ixgbe_free_irq(adapter); err_req_irq: ixgbe_free_all_rx_resources(adapter); if ((unsigned long )hw->phy.ops.set_phy_power != (unsigned long )((s32 (*)(struct ixgbe_hw * , bool ))0) && adapter->wol == 0U) { (*(hw->phy.ops.set_phy_power))(& adapter->hw, 0); } else { } err_setup_rx: ixgbe_free_all_tx_resources(adapter); err_setup_tx: ixgbe_reset(adapter); return (err); } } static void ixgbe_close_suspend(struct ixgbe_adapter *adapter ) { { ixgbe_ptp_suspend(adapter); ixgbe_down(adapter); ixgbe_free_irq(adapter); ixgbe_free_all_tx_resources(adapter); ixgbe_free_all_rx_resources(adapter); return; } } static int ixgbe_close(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; ixgbe_ptp_stop(adapter); ixgbe_close_suspend(adapter); ixgbe_fdir_filter_exit(adapter); ixgbe_release_hw_control(adapter); return (0); } } static int ixgbe_resume(struct pci_dev *pdev ) { struct ixgbe_adapter *adapter ; void *tmp ; struct net_device *netdev ; u32 err ; int tmp___0 ; int tmp___1 ; int tmp___2 ; bool tmp___3 ; { tmp = pci_get_drvdata(pdev); adapter = (struct ixgbe_adapter *)tmp; netdev = adapter->netdev; adapter->hw.hw_addr = adapter->io_addr; pci_set_power_state(pdev, 0); pci_restore_state(pdev); pci_save_state(pdev); tmp___0 = pci_enable_device_mem(pdev); err = (u32 )tmp___0; if (err != 0U) { dev_err((struct device const *)(& (adapter->pdev)->dev), "Cannot enable PCI device from suspend\n"); return ((int )err); } else { } __asm__ volatile ("": : : "memory"); clear_bit(3L, (unsigned long volatile *)(& adapter->state)); pci_set_master(pdev); pci_wake_from_d3(pdev, 0); ixgbe_reset(adapter); ixgbe_write_reg(& adapter->hw, 22544U, 4294967295U); rtnl_lock(); tmp___1 = ixgbe_init_interrupt_scheme(adapter); err = (u32 )tmp___1; if (err == 0U) { tmp___3 = netif_running((struct net_device const *)netdev); if ((int )tmp___3) { tmp___2 = ixgbe_open(netdev); err = (u32 )tmp___2; } else { } } else { } rtnl_unlock(); if (err != 0U) { return ((int )err); } else { } netif_device_attach(netdev); return (0); } } static int __ixgbe_shutdown(struct pci_dev *pdev , bool *enable_wake ) { struct ixgbe_adapter *adapter ; void *tmp ; struct net_device *netdev ; struct ixgbe_hw *hw ; u32 ctrl ; u32 fctrl ; u32 wufc ; int retval ; bool tmp___0 ; int tmp___1 ; { tmp = pci_get_drvdata(pdev); adapter = (struct ixgbe_adapter *)tmp; netdev = adapter->netdev; hw = & adapter->hw; wufc = adapter->wol; retval = 0; netif_device_detach(netdev); rtnl_lock(); tmp___0 = netif_running((struct net_device const *)netdev); if ((int )tmp___0) { ixgbe_close_suspend(adapter); } else { } rtnl_unlock(); ixgbe_clear_interrupt_scheme(adapter); retval = pci_save_state(pdev); if (retval != 0) { return (retval); } else { } if ((unsigned long )hw->mac.ops.stop_link_on_d3 != (unsigned long )((void (*)(struct ixgbe_hw * ))0)) { (*(hw->mac.ops.stop_link_on_d3))(hw); } else { } if (wufc != 0U) { ixgbe_set_rx_mode(netdev); if ((unsigned long )hw->mac.ops.enable_tx_laser != (unsigned long )((void (*)(struct ixgbe_hw * ))0)) { (*(hw->mac.ops.enable_tx_laser))(hw); } else { } if ((wufc & 8U) != 0U) { fctrl = ixgbe_read_reg(hw, 20608U); fctrl = fctrl | 256U; ixgbe_write_reg(hw, 20608U, fctrl); } else { } ctrl = ixgbe_read_reg(hw, 0U); ctrl = ctrl | 4U; ixgbe_write_reg(hw, 0U, ctrl); ixgbe_write_reg(hw, 22536U, wufc); } else { ixgbe_write_reg(hw, 22528U, 0U); ixgbe_write_reg(hw, 22536U, 0U); } switch ((unsigned int )hw->mac.type) { case 1U: pci_wake_from_d3(pdev, 0); goto ldv_58875; case 2U: ; case 3U: ; case 4U: ; case 5U: pci_wake_from_d3(pdev, wufc != 0U); goto ldv_58875; default: ; goto ldv_58875; } ldv_58875: *enable_wake = wufc != 0U; if ((unsigned long )hw->phy.ops.set_phy_power != (unsigned long )((s32 (*)(struct ixgbe_hw * , bool ))0) && ! *enable_wake) { (*(hw->phy.ops.set_phy_power))(hw, 0); } else { } ixgbe_release_hw_control(adapter); tmp___1 = test_and_set_bit(3L, (unsigned long volatile *)(& adapter->state)); if (tmp___1 == 0) { pci_disable_device(pdev); } else { } return (0); } } static int ixgbe_suspend(struct pci_dev *pdev , pm_message_t state ) { int retval ; bool wake ; { retval = __ixgbe_shutdown(pdev, & wake); if (retval != 0) { return (retval); } else { } if ((int )wake) { pci_prepare_to_sleep(pdev); } else { pci_wake_from_d3(pdev, 0); pci_set_power_state(pdev, 3); } return (0); } } static void ixgbe_shutdown(struct pci_dev *pdev ) { bool wake ; { __ixgbe_shutdown(pdev, & wake); if ((unsigned int )system_state == 3U) { pci_wake_from_d3(pdev, (int )wake); pci_set_power_state(pdev, 3); } else { } return; } } void ixgbe_update_stats(struct ixgbe_adapter *adapter ) { struct net_device *netdev ; struct ixgbe_hw *hw ; struct ixgbe_hw_stats *hwstats ; u64 total_mpc ; u32 i ; u32 missed_rx ; u32 mpc ; u32 bprc ; u32 lxon ; u32 lxoff ; u32 xon_off_tot ; u64 non_eop_descs ; u64 restart_queue ; u64 tx_busy ; u64 alloc_rx_page_failed ; u64 alloc_rx_buff_failed ; u64 bytes ; u64 packets ; u64 hw_csum_rx_error ; int tmp ; int tmp___0 ; u64 rsc_count ; u64 rsc_flush ; struct ixgbe_ring *rx_ring ; struct ixgbe_ring *tx_ring ; u32 tmp___1 ; u32 tmp___2 ; u32 tmp___3 ; u32 tmp___4 ; u32 tmp___5 ; u32 tmp___6 ; u32 tmp___7 ; u32 tmp___8 ; u32 tmp___9 ; u32 tmp___10 ; u32 tmp___11 ; u32 tmp___12 ; u32 tmp___13 ; u32 tmp___14 ; u32 tmp___15 ; u32 tmp___16 ; u32 tmp___17 ; u32 tmp___18 ; u32 tmp___19 ; u32 tmp___20 ; u32 tmp___21 ; u32 tmp___22 ; u32 tmp___23 ; u32 tmp___24 ; u32 tmp___25 ; u32 tmp___26 ; u32 tmp___27 ; u32 tmp___28 ; u32 tmp___29 ; u32 tmp___30 ; u32 tmp___31 ; u32 tmp___32 ; u32 tmp___33 ; u32 tmp___34 ; struct ixgbe_fcoe *fcoe ; struct ixgbe_fcoe_ddp_pool *ddp_pool ; unsigned int cpu ; u64 noddp ; u64 noddp_ext_buff ; void const *__vpp_verify ; unsigned long __ptr ; u32 tmp___35 ; u32 tmp___36 ; u32 tmp___37 ; u32 tmp___38 ; u32 tmp___39 ; u32 tmp___40 ; u32 tmp___41 ; u32 tmp___42 ; u32 tmp___43 ; u32 tmp___44 ; u32 tmp___45 ; u32 tmp___46 ; u32 tmp___47 ; u32 tmp___48 ; u32 tmp___49 ; u32 tmp___50 ; u32 tmp___51 ; u32 tmp___52 ; u32 tmp___53 ; u32 tmp___54 ; u32 tmp___55 ; u32 tmp___56 ; { netdev = adapter->netdev; hw = & adapter->hw; hwstats = & adapter->stats; total_mpc = 0ULL; missed_rx = 0U; non_eop_descs = 0ULL; restart_queue = 0ULL; tx_busy = 0ULL; alloc_rx_page_failed = 0ULL; alloc_rx_buff_failed = 0ULL; bytes = 0ULL; packets = 0ULL; hw_csum_rx_error = 0ULL; tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp != 0) { return; } else { tmp___0 = constant_test_bit(1L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 != 0) { return; } else { } } if ((adapter->flags2 & 2U) != 0U) { rsc_count = 0ULL; rsc_flush = 0ULL; i = 0U; goto ldv_58916; ldv_58915: rsc_count = (adapter->rx_ring[i])->__annonCompField121.rx_stats.rsc_count + rsc_count; rsc_flush = (adapter->rx_ring[i])->__annonCompField121.rx_stats.rsc_flush + rsc_flush; i = i + 1U; ldv_58916: ; if ((u32 )adapter->num_rx_queues > i) { goto ldv_58915; } else { } adapter->rsc_total_count = rsc_count; adapter->rsc_total_flush = rsc_flush; } else { } i = 0U; goto ldv_58920; ldv_58919: rx_ring = adapter->rx_ring[i]; non_eop_descs = rx_ring->__annonCompField121.rx_stats.non_eop_descs + non_eop_descs; alloc_rx_page_failed = rx_ring->__annonCompField121.rx_stats.alloc_rx_page_failed + alloc_rx_page_failed; alloc_rx_buff_failed = rx_ring->__annonCompField121.rx_stats.alloc_rx_buff_failed + alloc_rx_buff_failed; hw_csum_rx_error = rx_ring->__annonCompField121.rx_stats.csum_err + hw_csum_rx_error; bytes = rx_ring->stats.bytes + bytes; packets = rx_ring->stats.packets + packets; i = i + 1U; ldv_58920: ; if ((u32 )adapter->num_rx_queues > i) { goto ldv_58919; } else { } adapter->non_eop_descs = non_eop_descs; adapter->alloc_rx_page_failed = (u32 )alloc_rx_page_failed; adapter->alloc_rx_buff_failed = (u32 )alloc_rx_buff_failed; adapter->hw_csum_rx_error = hw_csum_rx_error; netdev->stats.rx_bytes = (unsigned long )bytes; netdev->stats.rx_packets = (unsigned long )packets; bytes = 0ULL; packets = 0ULL; i = 0U; goto ldv_58924; ldv_58923: tx_ring = adapter->tx_ring[i]; restart_queue = tx_ring->__annonCompField121.tx_stats.restart_queue + restart_queue; tx_busy = tx_ring->__annonCompField121.tx_stats.tx_busy + tx_busy; bytes = tx_ring->stats.bytes + bytes; packets = tx_ring->stats.packets + packets; i = i + 1U; ldv_58924: ; if ((u32 )adapter->num_tx_queues > i) { goto ldv_58923; } else { } adapter->restart_queue = restart_queue; adapter->tx_busy = tx_busy; netdev->stats.tx_bytes = (unsigned long )bytes; netdev->stats.tx_packets = (unsigned long )packets; tmp___1 = ixgbe_read_reg(hw, 16384U); hwstats->crcerrs = hwstats->crcerrs + (u64 )tmp___1; i = 0U; goto ldv_58934; ldv_58933: mpc = ixgbe_read_reg(hw, (i + 4072U) * 4U); missed_rx = missed_rx + mpc; hwstats->mpc[i] = hwstats->mpc[i] + (u64 )mpc; total_mpc = hwstats->mpc[i] + total_mpc; tmp___2 = ixgbe_read_reg(hw, (i + 4032U) * 4U); hwstats->pxontxc[i] = hwstats->pxontxc[i] + (u64 )tmp___2; tmp___3 = ixgbe_read_reg(hw, (i + 4040U) * 4U); hwstats->pxofftxc[i] = hwstats->pxofftxc[i] + (u64 )tmp___3; switch ((unsigned int )hw->mac.type) { case 1U: tmp___4 = ixgbe_read_reg(hw, (i + 4080U) * 4U); hwstats->rnbc[i] = hwstats->rnbc[i] + (u64 )tmp___4; tmp___5 = ixgbe_read_reg(hw, i * 64U + 24628U); hwstats->qbtc[i] = hwstats->qbtc[i] + (u64 )tmp___5; tmp___6 = ixgbe_read_reg(hw, i * 64U + 4148U); hwstats->qbrc[i] = hwstats->qbrc[i] + (u64 )tmp___6; tmp___7 = ixgbe_read_reg(hw, (i + 13248U) * 4U); hwstats->pxonrxc[i] = hwstats->pxonrxc[i] + (u64 )tmp___7; goto ldv_58927; case 2U: ; case 3U: ; case 4U: ; case 5U: tmp___8 = ixgbe_read_reg(hw, (i + 4176U) * 4U); hwstats->pxonrxc[i] = hwstats->pxonrxc[i] + (u64 )tmp___8; goto ldv_58927; default: ; goto ldv_58927; } ldv_58927: i = i + 1U; ldv_58934: ; if (i <= 7U) { goto ldv_58933; } else { } i = 0U; goto ldv_58937; ldv_58936: tmp___9 = ixgbe_read_reg(hw, i * 64U + 24624U); hwstats->qptc[i] = hwstats->qptc[i] + (u64 )tmp___9; tmp___10 = ixgbe_read_reg(hw, i * 64U + 4144U); hwstats->qprc[i] = hwstats->qprc[i] + (u64 )tmp___10; if ((((unsigned int )hw->mac.type == 2U || (unsigned int )hw->mac.type == 3U) || (unsigned int )hw->mac.type == 4U) || (unsigned int )hw->mac.type == 5U) { tmp___11 = ixgbe_read_reg(hw, (i + 4320U) * 8U); hwstats->qbtc[i] = hwstats->qbtc[i] + (u64 )tmp___11; ixgbe_read_reg(hw, i * 8U + 34564U); tmp___12 = ixgbe_read_reg(hw, i * 64U + 4148U); hwstats->qbrc[i] = hwstats->qbrc[i] + (u64 )tmp___12; ixgbe_read_reg(hw, i * 64U + 4152U); } else { } i = i + 1U; ldv_58937: ; if (i <= 15U) { goto ldv_58936; } else { } tmp___13 = ixgbe_read_reg(hw, 16500U); hwstats->gprc = hwstats->gprc + (u64 )tmp___13; hwstats->gprc = hwstats->gprc - (u64 )missed_rx; ixgbe_update_xoff_received(adapter); switch ((unsigned int )hw->mac.type) { case 1U: tmp___14 = ixgbe_read_reg(hw, 53088U); hwstats->lxonrxc = hwstats->lxonrxc + (u64 )tmp___14; tmp___15 = ixgbe_read_reg(hw, 16524U); hwstats->gorc = hwstats->gorc + (u64 )tmp___15; tmp___16 = ixgbe_read_reg(hw, 16532U); hwstats->gotc = hwstats->gotc + (u64 )tmp___16; tmp___17 = ixgbe_read_reg(hw, 16580U); hwstats->tor = hwstats->tor + (u64 )tmp___17; goto ldv_58940; case 3U: ; case 4U: ; case 5U: tmp___18 = ixgbe_read_reg(hw, 16836U); hwstats->o2bgptc = hwstats->o2bgptc + (u64 )tmp___18; tmp___19 = ixgbe_read_reg(hw, 34736U); hwstats->o2bspc = hwstats->o2bspc + (u64 )tmp___19; tmp___20 = ixgbe_read_reg(hw, 16832U); hwstats->b2ospc = hwstats->b2ospc + (u64 )tmp___20; tmp___21 = ixgbe_read_reg(hw, 12176U); hwstats->b2ogprc = hwstats->b2ogprc + (u64 )tmp___21; case 2U: i = 0U; goto ldv_58946; ldv_58945: tmp___22 = ixgbe_read_reg(hw, i * 64U + 5168U); adapter->hw_rx_no_dma_resources = adapter->hw_rx_no_dma_resources + (u64 )tmp___22; i = i + 1U; ldv_58946: ; if (i <= 15U) { goto ldv_58945; } else { } tmp___23 = ixgbe_read_reg(hw, 16520U); hwstats->gorc = hwstats->gorc + (u64 )tmp___23; ixgbe_read_reg(hw, 16524U); tmp___24 = ixgbe_read_reg(hw, 16528U); hwstats->gotc = hwstats->gotc + (u64 )tmp___24; ixgbe_read_reg(hw, 16532U); tmp___25 = ixgbe_read_reg(hw, 16576U); hwstats->tor = hwstats->tor + (u64 )tmp___25; ixgbe_read_reg(hw, 16580U); tmp___26 = ixgbe_read_reg(hw, 16804U); hwstats->lxonrxc = hwstats->lxonrxc + (u64 )tmp___26; tmp___27 = ixgbe_read_reg(hw, 61016U); hwstats->fdirmatch = hwstats->fdirmatch + (u64 )tmp___27; tmp___28 = ixgbe_read_reg(hw, 61020U); hwstats->fdirmiss = hwstats->fdirmiss + (u64 )tmp___28; tmp___29 = ixgbe_read_reg(hw, 20760U); hwstats->fccrc = hwstats->fccrc + (u64 )tmp___29; tmp___30 = ixgbe_read_reg(hw, 9244U); hwstats->fcoerpdc = hwstats->fcoerpdc + (u64 )tmp___30; tmp___31 = ixgbe_read_reg(hw, 9256U); hwstats->fcoeprc = hwstats->fcoeprc + (u64 )tmp___31; tmp___32 = ixgbe_read_reg(hw, 34692U); hwstats->fcoeptc = hwstats->fcoeptc + (u64 )tmp___32; tmp___33 = ixgbe_read_reg(hw, 9260U); hwstats->fcoedwrc = hwstats->fcoedwrc + (u64 )tmp___33; tmp___34 = ixgbe_read_reg(hw, 34696U); hwstats->fcoedwtc = hwstats->fcoedwtc + (u64 )tmp___34; if ((unsigned long )adapter->fcoe.ddp_pool != (unsigned long )((struct ixgbe_fcoe_ddp_pool *)0)) { fcoe = & adapter->fcoe; noddp = 0ULL; noddp_ext_buff = 0ULL; cpu = 4294967295U; goto ldv_58958; ldv_58957: __vpp_verify = (void const *)0; __asm__ ("": "=r" (__ptr): "0" (fcoe->ddp_pool)); ddp_pool = (struct ixgbe_fcoe_ddp_pool *)(__per_cpu_offset[cpu] + __ptr); noddp = ddp_pool->noddp + noddp; noddp_ext_buff = ddp_pool->noddp_ext_buff + noddp_ext_buff; ldv_58958: cpu = cpumask_next((int )cpu, cpu_possible_mask); if ((unsigned int )nr_cpu_ids > cpu) { goto ldv_58957; } else { } hwstats->fcoe_noddp = noddp; hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; } else { } goto ldv_58940; default: ; goto ldv_58940; } ldv_58940: bprc = ixgbe_read_reg(hw, 16504U); hwstats->bprc = hwstats->bprc + (u64 )bprc; tmp___35 = ixgbe_read_reg(hw, 16508U); hwstats->mprc = hwstats->mprc + (u64 )tmp___35; if ((unsigned int )hw->mac.type == 1U) { hwstats->mprc = hwstats->mprc - (u64 )bprc; } else { } tmp___36 = ixgbe_read_reg(hw, 16556U); hwstats->roc = hwstats->roc + (u64 )tmp___36; tmp___37 = ixgbe_read_reg(hw, 16476U); hwstats->prc64 = hwstats->prc64 + (u64 )tmp___37; tmp___38 = ixgbe_read_reg(hw, 16480U); hwstats->prc127 = hwstats->prc127 + (u64 )tmp___38; tmp___39 = ixgbe_read_reg(hw, 16484U); hwstats->prc255 = hwstats->prc255 + (u64 )tmp___39; tmp___40 = ixgbe_read_reg(hw, 16488U); hwstats->prc511 = hwstats->prc511 + (u64 )tmp___40; tmp___41 = ixgbe_read_reg(hw, 16492U); hwstats->prc1023 = hwstats->prc1023 + (u64 )tmp___41; tmp___42 = ixgbe_read_reg(hw, 16496U); hwstats->prc1522 = hwstats->prc1522 + (u64 )tmp___42; tmp___43 = ixgbe_read_reg(hw, 16448U); hwstats->rlec = hwstats->rlec + (u64 )tmp___43; lxon = ixgbe_read_reg(hw, 16224U); hwstats->lxontxc = hwstats->lxontxc + (u64 )lxon; lxoff = ixgbe_read_reg(hw, 16232U); hwstats->lxofftxc = hwstats->lxofftxc + (u64 )lxoff; tmp___44 = ixgbe_read_reg(hw, 16512U); hwstats->gptc = hwstats->gptc + (u64 )tmp___44; tmp___45 = ixgbe_read_reg(hw, 16624U); hwstats->mptc = hwstats->mptc + (u64 )tmp___45; xon_off_tot = lxon + lxoff; hwstats->gptc = hwstats->gptc - (u64 )xon_off_tot; hwstats->mptc = hwstats->mptc - (u64 )xon_off_tot; hwstats->gotc = hwstats->gotc - (u64 )(xon_off_tot * 64U); tmp___46 = ixgbe_read_reg(hw, 16548U); hwstats->ruc = hwstats->ruc + (u64 )tmp___46; tmp___47 = ixgbe_read_reg(hw, 16552U); hwstats->rfc = hwstats->rfc + (u64 )tmp___47; tmp___48 = ixgbe_read_reg(hw, 16560U); hwstats->rjc = hwstats->rjc + (u64 )tmp___48; tmp___49 = ixgbe_read_reg(hw, 16592U); hwstats->tpr = hwstats->tpr + (u64 )tmp___49; tmp___50 = ixgbe_read_reg(hw, 16600U); hwstats->ptc64 = hwstats->ptc64 + (u64 )tmp___50; hwstats->ptc64 = hwstats->ptc64 - (u64 )xon_off_tot; tmp___51 = ixgbe_read_reg(hw, 16604U); hwstats->ptc127 = hwstats->ptc127 + (u64 )tmp___51; tmp___52 = ixgbe_read_reg(hw, 16608U); hwstats->ptc255 = hwstats->ptc255 + (u64 )tmp___52; tmp___53 = ixgbe_read_reg(hw, 16612U); hwstats->ptc511 = hwstats->ptc511 + (u64 )tmp___53; tmp___54 = ixgbe_read_reg(hw, 16616U); hwstats->ptc1023 = hwstats->ptc1023 + (u64 )tmp___54; tmp___55 = ixgbe_read_reg(hw, 16620U); hwstats->ptc1522 = hwstats->ptc1522 + (u64 )tmp___55; tmp___56 = ixgbe_read_reg(hw, 16628U); hwstats->bptc = hwstats->bptc + (u64 )tmp___56; netdev->stats.multicast = (unsigned long )hwstats->mprc; netdev->stats.rx_errors = (unsigned long )(hwstats->crcerrs + hwstats->rlec); netdev->stats.rx_dropped = 0UL; netdev->stats.rx_length_errors = (unsigned long )hwstats->rlec; netdev->stats.rx_crc_errors = (unsigned long )hwstats->crcerrs; netdev->stats.rx_missed_errors = (unsigned long )total_mpc; return; } } static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int i ; int tmp ; s32 tmp___0 ; { hw = & adapter->hw; if ((adapter->flags2 & 128U) == 0U) { return; } else { } adapter->flags2 = adapter->flags2 & 4294967167U; tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp != 0) { return; } else { } if ((adapter->flags & 262144U) == 0U) { return; } else { } adapter->fdir_overflow = adapter->fdir_overflow + 1UL; tmp___0 = ixgbe_reinit_fdir_tables_82599(hw); if (tmp___0 == 0) { i = 0; goto ldv_58967; ldv_58966: set_bit(0L, (unsigned long volatile *)(& (adapter->tx_ring[i])->state)); i = i + 1; ldv_58967: ; if (adapter->num_tx_queues > i) { goto ldv_58966; } else { } ixgbe_write_reg(hw, 2176U, 65536U); } else if (((int )adapter->msg_enable & 2) != 0) { netdev_err((struct net_device const *)adapter->netdev, "failed to finish FDIR re-initialization, ignored adding FDIR ATR filters\n"); } else { } return; } } static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u64 eics ; int i ; int tmp ; int tmp___0 ; int tmp___1 ; bool tmp___2 ; struct ixgbe_q_vector *qv ; { hw = & adapter->hw; eics = 0ULL; tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp != 0) { return; } else { tmp___0 = constant_test_bit(4L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 != 0) { return; } else { tmp___1 = constant_test_bit(1L, (unsigned long const volatile *)(& adapter->state)); if (tmp___1 != 0) { return; } else { } } } tmp___2 = netif_carrier_ok((struct net_device const *)adapter->netdev); if ((int )tmp___2) { i = 0; goto ldv_58976; ldv_58975: set_bit(2L, (unsigned long volatile *)(& (adapter->tx_ring[i])->state)); i = i + 1; ldv_58976: ; if (adapter->num_tx_queues > i) { goto ldv_58975; } else { } } else { } if ((adapter->flags & 8U) == 0U) { ixgbe_write_reg(hw, 2056U, 3221225472U); } else { i = 0; goto ldv_58980; ldv_58979: qv = adapter->q_vector[i]; if ((unsigned long )qv->rx.ring != (unsigned long )((struct ixgbe_ring *)0) || (unsigned long )qv->tx.ring != (unsigned long )((struct ixgbe_ring *)0)) { eics = (1ULL << i) | eics; } else { } i = i + 1; ldv_58980: ; if (adapter->num_q_vectors > i) { goto ldv_58979; } else { } } ixgbe_irq_rearm_queues(adapter, eics); return; } } static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 link_speed ; bool link_up ; bool pfc_en ; { hw = & adapter->hw; link_speed = adapter->link_speed; link_up = adapter->link_up; pfc_en = adapter->dcb_cfg.pfc_mode_enable; if ((adapter->flags & 65536U) == 0U) { return; } else { } if ((unsigned long )hw->mac.ops.check_link != (unsigned long )((s32 (*)(struct ixgbe_hw * , ixgbe_link_speed * , bool * , bool ))0)) { (*(hw->mac.ops.check_link))(hw, & link_speed, & link_up, 0); } else { link_speed = 128U; link_up = 1; } if ((unsigned long )adapter->ixgbe_ieee_pfc != (unsigned long )((struct ieee_pfc *)0)) { pfc_en = ((int )pfc_en | ((unsigned int )(adapter->ixgbe_ieee_pfc)->pfc_en != 0U)) != 0; } else { } if ((int )link_up && ((adapter->flags & 4096U) == 0U || ! pfc_en)) { (*(hw->mac.ops.fc_enable))(hw); ixgbe_set_rx_drop_en(adapter); } else { } if ((int )link_up || (long )((adapter->link_check_timeout - (unsigned long )jiffies) + 1000UL) < 0L) { adapter->flags = adapter->flags & 4294901759U; ixgbe_write_reg(hw, 2176U, 1048576U); ixgbe_read_reg(hw, 8U); } else { } adapter->link_up = link_up; adapter->link_speed = link_speed; return; } } static void ixgbe_update_default_up(struct ixgbe_adapter *adapter ) { struct net_device *netdev ; struct dcb_app app ; u8 up___0 ; int tmp ; { netdev = adapter->netdev; app.selector = 1U; app.priority = (unsigned char)0; app.protocol = 0U; up___0 = 0U; if (((int )adapter->dcbx_cap & 8) != 0) { up___0 = dcb_ieee_getapp_mask(netdev, & app); } else { } if ((unsigned int )up___0 > 1U) { tmp = ffs((int )up___0); adapter->default_up = (unsigned int )((u8 )tmp) + 255U; } else { adapter->default_up = 0U; } return; } } static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter ) { struct net_device *netdev ; struct ixgbe_hw *hw ; struct net_device *upper ; struct list_head *iter ; u32 link_speed ; bool flow_rx ; bool flow_tx ; bool tmp ; u32 frctl ; u32 tmp___0 ; u32 rmcs ; u32 tmp___1 ; u32 mflcn ; u32 tmp___2 ; u32 fccfg ; u32 tmp___3 ; int tmp___4 ; struct macvlan_dev *vlan ; void *tmp___5 ; bool tmp___6 ; { netdev = adapter->netdev; hw = & adapter->hw; link_speed = adapter->link_speed; tmp = netif_carrier_ok((struct net_device const *)netdev); if ((int )tmp) { return; } else { } adapter->flags2 = adapter->flags2 & 4294967279U; switch ((unsigned int )hw->mac.type) { case 1U: tmp___0 = ixgbe_read_reg(hw, 20608U); frctl = tmp___0; tmp___1 = ixgbe_read_reg(hw, 15616U); rmcs = tmp___1; flow_rx = (frctl & 32768U) != 0U; flow_tx = (rmcs & 8U) != 0U; goto ldv_59014; case 3U: ; case 4U: ; case 5U: ; case 2U: tmp___2 = ixgbe_read_reg(hw, 17044U); mflcn = tmp___2; tmp___3 = ixgbe_read_reg(hw, 15616U); fccfg = tmp___3; flow_rx = (mflcn & 8U) != 0U; flow_tx = (fccfg & 8U) != 0U; goto ldv_59014; default: flow_tx = 0; flow_rx = 0; goto ldv_59014; } ldv_59014: adapter->last_rx_ptp_check = jiffies; tmp___4 = constant_test_bit(8L, (unsigned long const volatile *)(& adapter->state)); if (tmp___4 != 0) { ixgbe_ptp_start_cyclecounter(adapter); } else { } if ((int )adapter->msg_enable & 1) { netdev_info((struct net_device const *)adapter->netdev, "NIC Link is Up %s, Flow Control: %s\n", link_speed != 128U ? (link_speed != 32U ? (link_speed == 8U ? (char *)"100 Mbps" : (char *)"unknown speed") : (char *)"1 Gbps") : (char *)"10 Gbps", ! flow_rx || ! flow_tx ? ((int )flow_rx ? (char *)"RX" : ((int )flow_tx ? (char *)"TX" : (char *)"None")) : (char *)"RX/TX"); } else { } netif_carrier_on(netdev); ixgbe_check_vf_rate_limit(adapter); netif_tx_wake_all_queues(adapter->netdev); rtnl_lock(); iter = & (adapter->netdev)->all_adj_list.upper; upper = netdev_all_upper_get_next_dev_rcu(adapter->netdev, & iter); goto ldv_59024; ldv_59023: tmp___6 = netif_is_macvlan(upper); if ((int )tmp___6) { tmp___5 = netdev_priv((struct net_device const *)upper); vlan = (struct macvlan_dev *)tmp___5; if ((unsigned long )vlan->fwd_priv != (unsigned long )((void *)0)) { netif_tx_wake_all_queues(upper); } else { } } else { } upper = netdev_all_upper_get_next_dev_rcu(adapter->netdev, & iter); ldv_59024: ; if ((unsigned long )upper != (unsigned long )((struct net_device *)0)) { goto ldv_59023; } else { } rtnl_unlock(); ixgbe_update_default_up(adapter); ixgbe_ping_all_vfs(adapter); return; } } static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter ) { struct net_device *netdev ; struct ixgbe_hw *hw ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { netdev = adapter->netdev; hw = & adapter->hw; adapter->link_up = 0; adapter->link_speed = 0U; tmp = netif_carrier_ok((struct net_device const *)netdev); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } tmp___1 = ixgbe_is_sfp(hw); if ((int )tmp___1 && (unsigned int )hw->mac.type == 1U) { adapter->flags2 = adapter->flags2 | 16U; } else { } tmp___2 = constant_test_bit(8L, (unsigned long const volatile *)(& adapter->state)); if (tmp___2 != 0) { ixgbe_ptp_start_cyclecounter(adapter); } else { } if ((int )adapter->msg_enable & 1) { netdev_info((struct net_device const *)adapter->netdev, "NIC Link is Down\n"); } else { } netif_carrier_off(netdev); ixgbe_ping_all_vfs(adapter); return; } } static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter ) { int i ; struct ixgbe_ring *tx_ring ; { i = 0; goto ldv_59037; ldv_59036: tx_ring = adapter->tx_ring[i]; if ((int )tx_ring->next_to_use != (int )tx_ring->next_to_clean) { return (1); } else { } i = i + 1; ldv_59037: ; if (adapter->num_tx_queues > i) { goto ldv_59036; } else { } return (0); } } static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; struct ixgbe_ring_feature *vmdq ; u32 q_per_pool ; int i ; int j ; u32 h ; u32 t ; { hw = & adapter->hw; vmdq = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 1UL; q_per_pool = (u32 )(- ((int )vmdq->mask) & (int )vmdq->mask); if (adapter->num_vfs == 0U) { return (0); } else { } if ((unsigned int )hw->mac.type > 3U) { return (0); } else { } i = 0; goto ldv_59053; ldv_59052: j = 0; goto ldv_59050; ldv_59049: h = ixgbe_read_reg(hw, (q_per_pool * (u32 )i + (u32 )j) * 64U + 24592U); t = ixgbe_read_reg(hw, (q_per_pool * (u32 )i + (u32 )j) * 64U + 24600U); if (h != t) { return (1); } else { } j = j + 1; ldv_59050: ; if ((u32 )j < q_per_pool) { goto ldv_59049; } else { } i = i + 1; ldv_59053: ; if ((unsigned int )i < adapter->num_vfs) { goto ldv_59052; } else { } return (0); } } static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter ) { bool tmp ; bool tmp___0 ; bool tmp___1 ; int tmp___2 ; { tmp___1 = netif_carrier_ok((struct net_device const *)adapter->netdev); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { tmp = ixgbe_ring_tx_pending(adapter); if ((int )tmp) { goto _L; } else { tmp___0 = ixgbe_vf_tx_pending(adapter); if ((int )tmp___0) { _L: /* CIL Label */ if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "initiating reset to clear Tx work after link loss\n"); } else { } adapter->flags2 = adapter->flags2 | 64U; } else { } } } else { } return; } } __inline static void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter , struct pci_dev *vfdev ) { int tmp ; char const *tmp___0 ; { tmp = pci_wait_for_pending_transaction(vfdev); if (tmp == 0) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "Issuing VFLR with pending transactions\n"); } else { } tmp___0 = pci_name((struct pci_dev const *)vfdev); dev_err((struct device const *)(& (adapter->pdev)->dev), "Issuing VFLR for VF %s\n", tmp___0); pcie_capability_set_word(vfdev, 8, 32768); msleep(100U); return; } } static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; struct pci_dev *pdev ; struct pci_dev *vfdev ; u32 gpc ; int pos ; unsigned short vf_id ; bool tmp ; int tmp___0 ; u16 status_reg ; { hw = & adapter->hw; pdev = adapter->pdev; tmp = netif_carrier_ok((struct net_device const *)adapter->netdev); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } gpc = ixgbe_read_reg(hw, 34720U); if (gpc != 0U) { return; } else { } if ((unsigned long )pdev == (unsigned long )((struct pci_dev *)0)) { return; } else { } pos = pci_find_ext_capability(pdev, 16); if (pos == 0) { return; } else { } pci_read_config_word((struct pci_dev const *)pdev, pos + 26, & vf_id); vfdev = pci_get_device((unsigned int )pdev->vendor, (unsigned int )vf_id, (struct pci_dev *)0); goto ldv_59073; ldv_59072: ; if ((unsigned int )*((unsigned char *)vfdev + 2531UL) != 0U && (unsigned long )vfdev->__annonCompField58.physfn == (unsigned long )pdev) { pci_read_config_word((struct pci_dev const *)vfdev, 6, & status_reg); if (((int )status_reg & 8192) != 0) { ixgbe_issue_vf_flr(adapter, vfdev); } else { } } else { } vfdev = pci_get_device((unsigned int )pdev->vendor, (unsigned int )vf_id, vfdev); ldv_59073: ; if ((unsigned long )vfdev != (unsigned long )((struct pci_dev *)0)) { goto ldv_59072; } else { } return; } } static void ixgbe_spoof_check(struct ixgbe_adapter *adapter ) { u32 ssvpc ; { if ((unsigned int )adapter->hw.mac.type == 1U || adapter->num_vfs == 0U) { return; } else { } ssvpc = ixgbe_read_reg(& adapter->hw, 34688U); if (ssvpc == 0U) { return; } else { } if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "%u Spoofed packets detected\n", ssvpc); } else { } return; } } static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter ) { int tmp ; int tmp___0 ; int tmp___1 ; { tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp != 0) { return; } else { tmp___0 = constant_test_bit(4L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 != 0) { return; } else { tmp___1 = constant_test_bit(1L, (unsigned long const volatile *)(& adapter->state)); if (tmp___1 != 0) { return; } else { } } } ixgbe_watchdog_update_link(adapter); if ((int )adapter->link_up) { ixgbe_watchdog_link_is_up(adapter); } else { ixgbe_watchdog_link_is_down(adapter); } ixgbe_check_for_bad_vf(adapter); ixgbe_spoof_check(adapter); ixgbe_update_stats(adapter); ixgbe_watchdog_flush_tx(adapter); return; } } static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; s32 err ; int tmp ; { hw = & adapter->hw; if ((adapter->flags2 & 16U) == 0U && (adapter->flags2 & 32U) == 0U) { return; } else { } tmp = test_and_set_bit(7L, (unsigned long volatile *)(& adapter->state)); if (tmp != 0) { return; } else { } err = (*(hw->phy.ops.identify_sfp))(hw); if (err == -19) { goto sfp_out; } else { } if (err == -20) { adapter->flags2 = adapter->flags2 | 32U; } else { } if (err != 0) { goto sfp_out; } else { } if ((adapter->flags2 & 32U) == 0U) { goto sfp_out; } else { } adapter->flags2 = adapter->flags2 & 4294967263U; if ((unsigned int )hw->mac.type == 1U) { err = (*(hw->phy.ops.reset))(hw); } else { err = (*(hw->mac.ops.setup_sfp))(hw); } if (err == -19) { goto sfp_out; } else { } adapter->flags = adapter->flags | 131072U; if (((int )adapter->msg_enable & 2) != 0) { netdev_info((struct net_device const *)adapter->netdev, "detected SFP+: %d\n", (unsigned int )hw->phy.sfp_type); } else { } sfp_out: clear_bit(7L, (unsigned long volatile *)(& adapter->state)); if (err == -19 && (unsigned int )(adapter->netdev)->reg_state == 1U) { dev_err((struct device const *)(& (adapter->pdev)->dev), "failed to initialize because an unsupported SFP+ module type was detected.\n"); dev_err((struct device const *)(& (adapter->pdev)->dev), "Reload the driver after installing a supported module.\n"); ldv_unregister_netdev_57(adapter->netdev); } else { } return; } } static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 speed ; bool autoneg ; int tmp ; { hw = & adapter->hw; autoneg = 0; if ((adapter->flags & 131072U) == 0U) { return; } else { } tmp = test_and_set_bit(7L, (unsigned long volatile *)(& adapter->state)); if (tmp != 0) { return; } else { } adapter->flags = adapter->flags & 4294836223U; speed = hw->phy.autoneg_advertised; if (speed == 0U && (unsigned long )hw->mac.ops.get_link_capabilities != (unsigned long )((s32 (*)(struct ixgbe_hw * , ixgbe_link_speed * , bool * ))0)) { (*(hw->mac.ops.get_link_capabilities))(hw, & speed, & autoneg); if (! autoneg) { if ((speed & 128U) != 0U) { speed = 128U; } else { } } else { } } else { } if ((unsigned long )hw->mac.ops.setup_link != (unsigned long )((s32 (*)(struct ixgbe_hw * , ixgbe_link_speed , bool ))0)) { (*(hw->mac.ops.setup_link))(hw, speed, 1); } else { } adapter->flags = adapter->flags | 65536U; adapter->link_check_timeout = jiffies; clear_bit(7L, (unsigned long volatile *)(& adapter->state)); return; } } static void ixgbe_service_timer(unsigned long data ) { struct ixgbe_adapter *adapter ; unsigned long next_event_offset ; { adapter = (struct ixgbe_adapter *)data; if ((adapter->flags & 65536U) != 0U) { next_event_offset = 25UL; } else { next_event_offset = 500UL; } ldv_mod_timer_58(& adapter->service_timer, next_event_offset + (unsigned long )jiffies); ixgbe_service_event_schedule(adapter); return; } } static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 status ; s32 tmp ; { hw = & adapter->hw; if ((adapter->flags2 & 2048U) == 0U) { return; } else { } adapter->flags2 = adapter->flags2 & 4294965247U; if ((unsigned long )hw->phy.ops.handle_lasi == (unsigned long )((s32 (*)(struct ixgbe_hw * ))0)) { return; } else { } tmp = (*(hw->phy.ops.handle_lasi))(& adapter->hw); status = (u32 )tmp; if (status != 4294967270U) { return; } else { } if ((int )adapter->msg_enable & 1) { netdev_crit((struct net_device const *)adapter->netdev, "%s\n", (char const *)(& ixgbe_overheat_msg)); } else { } return; } } static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter ) { int tmp ; int tmp___0 ; int tmp___1 ; { if ((adapter->flags2 & 64U) == 0U) { return; } else { } adapter->flags2 = adapter->flags2 & 4294967231U; tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp != 0) { return; } else { tmp___0 = constant_test_bit(4L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 != 0) { return; } else { tmp___1 = constant_test_bit(1L, (unsigned long const volatile *)(& adapter->state)); if (tmp___1 != 0) { return; } else { } } } ixgbe_dump(adapter); netdev_err((struct net_device const *)adapter->netdev, "Reset adapter\n"); adapter->tx_timeout_count = adapter->tx_timeout_count + 1U; rtnl_lock(); ixgbe_reinit_locked(adapter); rtnl_unlock(); return; } } static void ixgbe_service_task(struct work_struct *work ) { struct ixgbe_adapter *adapter ; struct work_struct const *__mptr ; int tmp ; bool tmp___0 ; int tmp___1 ; { __mptr = (struct work_struct const *)work; adapter = (struct ixgbe_adapter *)__mptr + 0xffffffffffff61d0UL; tmp___0 = ixgbe_removed((void *)adapter->hw.hw_addr); if ((int )tmp___0) { tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp == 0) { rtnl_lock(); ixgbe_down(adapter); rtnl_unlock(); } else { } ixgbe_service_event_complete(adapter); return; } else { } ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); ixgbe_sfp_link_config_subtask(adapter); ixgbe_check_overtemp_subtask(adapter); ixgbe_watchdog_subtask(adapter); ixgbe_fdir_reinit_subtask(adapter); ixgbe_check_hang_subtask(adapter); tmp___1 = constant_test_bit(8L, (unsigned long const volatile *)(& adapter->state)); if (tmp___1 != 0) { ixgbe_ptp_overflow_check(adapter); ixgbe_ptp_rx_hang(adapter); } else { } ixgbe_service_event_complete(adapter); return; } } static int ixgbe_tso(struct ixgbe_ring *tx_ring , struct ixgbe_tx_buffer *first , u8 *hdr_len ) { struct sk_buff *skb ; u32 vlan_macip_lens ; u32 type_tucmd ; u32 mss_l4len_idx ; u32 l4len ; int err ; bool tmp ; int tmp___0 ; struct iphdr *iph ; struct iphdr *tmp___1 ; struct tcphdr *tmp___2 ; __sum16 tmp___3 ; struct ipv6hdr *tmp___4 ; struct tcphdr *tmp___5 ; struct ipv6hdr *tmp___6 ; struct ipv6hdr *tmp___7 ; __sum16 tmp___8 ; bool tmp___9 ; int tmp___10 ; unsigned char *tmp___11 ; unsigned char *tmp___12 ; int tmp___13 ; { skb = first->skb; if ((unsigned int )*((unsigned char *)skb + 145UL) != 6U) { return (0); } else { } tmp = skb_is_gso((struct sk_buff const *)skb); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } err = skb_cow_head(skb, 0U); if (err < 0) { return (err); } else { } type_tucmd = 2048U; if ((unsigned int )first->protocol == 8U) { tmp___1 = ip_hdr((struct sk_buff const *)skb); iph = tmp___1; iph->tot_len = 0U; iph->check = 0U; tmp___2 = tcp_hdr((struct sk_buff const *)skb); tmp___3 = csum_tcpudp_magic(iph->saddr, iph->daddr, 0, 6, 0U); tmp___2->check = ~ ((int )tmp___3); type_tucmd = type_tucmd | 1024U; first->tx_flags = first->tx_flags | 50U; } else { tmp___9 = skb_is_gso_v6((struct sk_buff const *)skb); if ((int )tmp___9) { tmp___4 = ipv6_hdr((struct sk_buff const *)skb); tmp___4->payload_len = 0U; tmp___5 = tcp_hdr((struct sk_buff const *)skb); tmp___6 = ipv6_hdr((struct sk_buff const *)skb); tmp___7 = ipv6_hdr((struct sk_buff const *)skb); tmp___8 = csum_ipv6_magic((struct in6_addr const *)(& tmp___7->saddr), (struct in6_addr const *)(& tmp___6->daddr), 0U, 6, 0U); tmp___5->check = ~ ((int )tmp___8); first->tx_flags = first->tx_flags | 34U; } else { } } l4len = tcp_hdrlen((struct sk_buff const *)skb); tmp___10 = skb_transport_offset((struct sk_buff const *)skb); *hdr_len = (int )((u8 )tmp___10) + (int )((u8 )l4len); tmp___11 = skb_end_pointer((struct sk_buff const *)skb); first->gso_segs = ((struct skb_shared_info *)tmp___11)->gso_segs; first->bytecount = first->bytecount + (unsigned int )(((int )first->gso_segs + -1) * (int )*hdr_len); mss_l4len_idx = l4len << 8; tmp___12 = skb_end_pointer((struct sk_buff const *)skb); mss_l4len_idx = (u32 )((int )((struct skb_shared_info *)tmp___12)->gso_size << 16) | mss_l4len_idx; vlan_macip_lens = skb_network_header_len((struct sk_buff const *)skb); tmp___13 = skb_network_offset((struct sk_buff const *)skb); vlan_macip_lens = (u32 )(tmp___13 << 9) | vlan_macip_lens; vlan_macip_lens = (first->tx_flags & 4294901760U) | vlan_macip_lens; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0U, type_tucmd, mss_l4len_idx); return (1); } } static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring , struct ixgbe_tx_buffer *first ) { struct sk_buff *skb ; u32 vlan_macip_lens ; u32 mss_l4len_idx ; u32 type_tucmd ; u8 l4_hdr ; u32 tmp ; struct iphdr *tmp___0 ; u32 tmp___1 ; struct ipv6hdr *tmp___2 ; int tmp___3 ; long tmp___4 ; unsigned int tmp___5 ; int tmp___6 ; long tmp___7 ; int tmp___8 ; { skb = first->skb; vlan_macip_lens = 0U; mss_l4len_idx = 0U; type_tucmd = 0U; if ((unsigned int )*((unsigned char *)skb + 145UL) != 6U) { if ((first->tx_flags & 1U) == 0U && (first->tx_flags & 8U) == 0U) { return; } else { } } else { l4_hdr = 0U; switch ((int )first->protocol) { case 8: tmp = skb_network_header_len((struct sk_buff const *)skb); vlan_macip_lens = tmp | vlan_macip_lens; type_tucmd = type_tucmd | 1024U; tmp___0 = ip_hdr((struct sk_buff const *)skb); l4_hdr = tmp___0->protocol; goto ldv_59135; case 56710: tmp___1 = skb_network_header_len((struct sk_buff const *)skb); vlan_macip_lens = tmp___1 | vlan_macip_lens; tmp___2 = ipv6_hdr((struct sk_buff const *)skb); l4_hdr = tmp___2->nexthdr; goto ldv_59135; default: tmp___3 = net_ratelimit(); tmp___4 = ldv__builtin_expect(tmp___3 != 0, 0L); if (tmp___4 != 0L) { dev_warn((struct device const *)tx_ring->dev, "partial checksum but proto=%x!\n", (int )first->protocol); } else { } goto ldv_59135; } ldv_59135: ; switch ((int )l4_hdr) { case 6: type_tucmd = type_tucmd | 2048U; tmp___5 = tcp_hdrlen((struct sk_buff const *)skb); mss_l4len_idx = tmp___5 << 8; goto ldv_59139; case 132: type_tucmd = type_tucmd | 4096U; mss_l4len_idx = 3072U; goto ldv_59139; case 17: mss_l4len_idx = 2048U; goto ldv_59139; default: tmp___6 = net_ratelimit(); tmp___7 = ldv__builtin_expect(tmp___6 != 0, 0L); if (tmp___7 != 0L) { dev_warn((struct device const *)tx_ring->dev, "partial checksum but l4 proto=%x!\n", (int )l4_hdr); } else { } goto ldv_59139; } ldv_59139: first->tx_flags = first->tx_flags | 32U; } tmp___8 = skb_network_offset((struct sk_buff const *)skb); vlan_macip_lens = (u32 )(tmp___8 << 9) | vlan_macip_lens; vlan_macip_lens = (first->tx_flags & 4294901760U) | vlan_macip_lens; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0U, type_tucmd, mss_l4len_idx); return; } } static u32 ixgbe_tx_cmd_type(struct sk_buff *skb , u32 tx_flags ) { u32 cmd_type ; { cmd_type = 573571072U; cmd_type = (tx_flags & 1U) * 1073741824U | cmd_type; cmd_type = (tx_flags & 2U) * 1073741824U | cmd_type; cmd_type = (tx_flags & 4U) * 131072U | cmd_type; cmd_type = (unsigned int )skb->no_fcs * 33554432U ^ cmd_type; return (cmd_type); } } static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc , u32 tx_flags , unsigned int paylen ) { u32 olinfo_status ; { olinfo_status = paylen << 14; olinfo_status = (tx_flags & 32U) * 16U | olinfo_status; olinfo_status = (tx_flags & 16U) * 16U | olinfo_status; olinfo_status = (tx_flags & 8U) * 16U | olinfo_status; tx_desc->read.olinfo_status = olinfo_status; return; } } static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring , u16 size ) { u16 tmp ; long tmp___0 ; { netif_stop_subqueue(tx_ring->netdev, (int )tx_ring->queue_index); __asm__ volatile ("mfence": : : "memory"); tmp = ixgbe_desc_unused(tx_ring); tmp___0 = ldv__builtin_expect((int )tmp < (int )size, 1L); if (tmp___0 != 0L) { return (-16); } else { } netif_start_subqueue(tx_ring->netdev, (int )tx_ring->queue_index); tx_ring->__annonCompField121.tx_stats.restart_queue = tx_ring->__annonCompField121.tx_stats.restart_queue + 1ULL; return (0); } } __inline static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring , u16 size ) { u16 tmp ; long tmp___0 ; int tmp___1 ; { tmp = ixgbe_desc_unused(tx_ring); tmp___0 = ldv__builtin_expect((int )tmp >= (int )size, 1L); if (tmp___0 != 0L) { return (0); } else { } tmp___1 = __ixgbe_maybe_stop_tx(tx_ring, (int )size); return (tmp___1); } } static void ixgbe_tx_map(struct ixgbe_ring *tx_ring , struct ixgbe_tx_buffer *first , u8 const hdr_len ) { struct sk_buff *skb ; struct ixgbe_tx_buffer *tx_buffer ; union ixgbe_adv_tx_desc *tx_desc ; struct skb_frag_struct *frag ; dma_addr_t dma ; unsigned int data_len ; unsigned int size ; u32 tx_flags ; u32 cmd_type ; u32 tmp ; u16 i ; unsigned char *tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; unsigned int __min1 ; unsigned int __min2 ; unsigned int tmp___4 ; struct netdev_queue *tmp___5 ; struct netdev_queue *tmp___6 ; bool tmp___7 ; { skb = first->skb; tx_flags = first->tx_flags; tmp = ixgbe_tx_cmd_type(skb, tx_flags); cmd_type = tmp; i = tx_ring->next_to_use; tx_desc = (union ixgbe_adv_tx_desc *)tx_ring->desc + (unsigned long )i; ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - (unsigned int )hdr_len); size = skb_headlen((struct sk_buff const *)skb); data_len = skb->data_len; if ((tx_flags & 128U) != 0U) { if (data_len <= 7U) { size = (size + data_len) - 8U; data_len = 0U; } else { data_len = data_len - 8U; } } else { } dma = dma_map_single_attrs(tx_ring->dev, (void *)skb->data, (size_t )size, 1, (struct dma_attrs *)0); tx_buffer = first; tmp___0 = skb_end_pointer((struct sk_buff const *)skb); frag = (struct skb_frag_struct *)(& ((struct skb_shared_info *)tmp___0)->frags); ldv_59185: tmp___1 = dma_mapping_error(tx_ring->dev, dma); if (tmp___1 != 0) { goto dma_error; } else { } tx_buffer->len = size; tx_buffer->dma = dma; tx_desc->read.buffer_addr = dma; goto ldv_59179; ldv_59178: tx_desc->read.cmd_type_len = cmd_type ^ 16384U; i = (u16 )((int )i + 1); tx_desc = tx_desc + 1; if ((int )tx_ring->count == (int )i) { tx_desc = (union ixgbe_adv_tx_desc *)tx_ring->desc; i = 0U; } else { } tx_desc->read.olinfo_status = 0U; dma = dma + 16384ULL; size = size - 16384U; tx_desc->read.buffer_addr = dma; ldv_59179: tmp___2 = ldv__builtin_expect(size > 16384U, 0L); if (tmp___2 != 0L) { goto ldv_59178; } else { } tmp___3 = ldv__builtin_expect(data_len == 0U, 1L); if (tmp___3 != 0L) { goto ldv_59181; } else { } tx_desc->read.cmd_type_len = cmd_type ^ size; i = (u16 )((int )i + 1); tx_desc = tx_desc + 1; if ((int )tx_ring->count == (int )i) { tx_desc = (union ixgbe_adv_tx_desc *)tx_ring->desc; i = 0U; } else { } tx_desc->read.olinfo_status = 0U; __min1 = data_len; tmp___4 = skb_frag_size((skb_frag_t const *)frag); __min2 = tmp___4; size = __min1 < __min2 ? __min1 : __min2; data_len = data_len - size; dma = skb_frag_dma_map(tx_ring->dev, (skb_frag_t const *)frag, 0UL, (size_t )size, 1); tx_buffer = tx_ring->__annonCompField118.tx_buffer_info + (unsigned long )i; frag = frag + 1; goto ldv_59185; ldv_59181: cmd_type = (size | cmd_type) | 150994944U; tx_desc->read.cmd_type_len = cmd_type; tmp___5 = txring_txq((struct ixgbe_ring const *)tx_ring); netdev_tx_sent_queue(tmp___5, first->bytecount); first->time_stamp = jiffies; __asm__ volatile ("sfence": : : "memory"); first->next_to_watch = tx_desc; i = (u16 )((int )i + 1); if ((int )tx_ring->count == (int )i) { i = 0U; } else { } tx_ring->next_to_use = i; ixgbe_maybe_stop_tx(tx_ring, 21); tmp___6 = txring_txq((struct ixgbe_ring const *)tx_ring); tmp___7 = netif_xmit_stopped((struct netdev_queue const *)tmp___6); if ((int )tmp___7 || (unsigned int )*((unsigned char *)skb + 142UL) == 0U) { writel((unsigned int )i, (void volatile *)tx_ring->tail); __asm__ volatile ("": : : "memory"); } else { } return; dma_error: dev_err((struct device const *)tx_ring->dev, "TX DMA map failed\n"); ldv_59187: tx_buffer = tx_ring->__annonCompField118.tx_buffer_info + (unsigned long )i; ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); if ((unsigned long )tx_buffer == (unsigned long )first) { goto ldv_59186; } else { } if ((unsigned int )i == 0U) { i = tx_ring->count; } else { } i = (u16 )((int )i - 1); goto ldv_59187; ldv_59186: tx_ring->next_to_use = i; return; } } static void ixgbe_atr(struct ixgbe_ring *ring , struct ixgbe_tx_buffer *first ) { struct ixgbe_q_vector *q_vector ; union ixgbe_atr_hash_dword input ; union ixgbe_atr_hash_dword common ; union __anonunion_hdr_412 hdr ; struct tcphdr *th ; __be16 vlan_id ; __u16 tmp ; { q_vector = ring->q_vector; input.dword = 0U; common.dword = 0U; if ((unsigned long )q_vector == (unsigned long )((struct ixgbe_q_vector *)0)) { return; } else { } if ((unsigned int )ring->__annonCompField120.__annonCompField119.atr_sample_rate == 0U) { return; } else { } ring->__annonCompField120.__annonCompField119.atr_count = (u8 )((int )ring->__annonCompField120.__annonCompField119.atr_count + 1); hdr.network = skb_network_header((struct sk_buff const *)first->skb); if (((unsigned int )first->protocol != 56710U || (unsigned int )(hdr.ipv6)->nexthdr != 6U) && ((unsigned int )first->protocol != 8U || (unsigned int )(hdr.ipv4)->protocol != 6U)) { return; } else { } th = tcp_hdr((struct sk_buff const *)first->skb); if ((unsigned long )th == (unsigned long )((struct tcphdr *)0) || (unsigned int )*((unsigned char *)th + 13UL) != 0U) { return; } else { } if ((unsigned int )*((unsigned char *)th + 13UL) == 0U && (int )ring->__annonCompField120.__annonCompField119.atr_count < (int )ring->__annonCompField120.__annonCompField119.atr_sample_rate) { return; } else { } ring->__annonCompField120.__annonCompField119.atr_count = 0U; tmp = __fswab16((int )((__u16 )(first->tx_flags >> 16))); vlan_id = tmp; input.formatted.vlan_id = vlan_id; if ((first->tx_flags & 65U) != 0U) { common.port.src = (__be16 )((unsigned int )((int )common.port.src ^ (int )th->dest) ^ 129U); } else { common.port.src = (__be16 )((int )common.port.src ^ ((int )th->dest ^ (int )first->protocol)); } common.port.dst = (__be16 )((int )common.port.dst ^ (int )th->source); if ((unsigned int )first->protocol == 8U) { input.formatted.flow_type = 2U; common.ip = common.ip ^ ((hdr.ipv4)->saddr ^ (hdr.ipv4)->daddr); } else { input.formatted.flow_type = 6U; common.ip = common.ip ^ ((((((((hdr.ipv6)->saddr.in6_u.u6_addr32[0] ^ (hdr.ipv6)->saddr.in6_u.u6_addr32[1]) ^ (hdr.ipv6)->saddr.in6_u.u6_addr32[2]) ^ (hdr.ipv6)->saddr.in6_u.u6_addr32[3]) ^ (hdr.ipv6)->daddr.in6_u.u6_addr32[0]) ^ (hdr.ipv6)->daddr.in6_u.u6_addr32[1]) ^ (hdr.ipv6)->daddr.in6_u.u6_addr32[2]) ^ (hdr.ipv6)->daddr.in6_u.u6_addr32[3]); } ixgbe_fdir_add_signature_filter_82599(& (q_vector->adapter)->hw, input, common, (int )ring->queue_index); return; } } static u16 ixgbe_select_queue(struct net_device *dev , struct sk_buff *skb , void *accel_priv , u16 (*fallback)(struct net_device * , struct sk_buff * ) ) { struct ixgbe_fwd_adapter *fwd_adapter ; struct ixgbe_adapter *adapter ; struct ixgbe_ring_feature *f ; int txq ; __be16 tmp ; void *tmp___0 ; u16 tmp___1 ; u16 tmp___3 ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; bool tmp___4 ; { fwd_adapter = (struct ixgbe_fwd_adapter *)accel_priv; if ((unsigned long )fwd_adapter != (unsigned long )((struct ixgbe_fwd_adapter *)0)) { return ((int )skb->queue_mapping + (int )((u16 )fwd_adapter->tx_base_queue)); } else { } tmp = vlan_get_protocol(skb); switch ((int )tmp) { case 1673: ; case 5257: tmp___0 = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp___0; if ((adapter->flags & 2097152U) != 0U) { goto ldv_59214; } else { } default: tmp___1 = (*fallback)(dev, skb); return (tmp___1); } ldv_59214: f = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 4UL; tmp___4 = skb_rx_queue_recorded((struct sk_buff const *)skb); if ((int )tmp___4) { tmp___3 = skb_get_rx_queue((struct sk_buff const *)skb); txq = (int )tmp___3; } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_59221; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_59221; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_59221; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_59221; default: __bad_percpu_size(); } ldv_59221: pscr_ret__ = pfo_ret__; goto ldv_59227; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_59231; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_59231; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_59231; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_59231; default: __bad_percpu_size(); } ldv_59231: pscr_ret__ = pfo_ret_____0; goto ldv_59227; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_59240; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_59240; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_59240; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_59240; default: __bad_percpu_size(); } ldv_59240: pscr_ret__ = pfo_ret_____1; goto ldv_59227; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_59249; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_59249; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_59249; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_59249; default: __bad_percpu_size(); } ldv_59249: pscr_ret__ = pfo_ret_____2; goto ldv_59227; default: __bad_size_call_parameter(); goto ldv_59227; } ldv_59227: txq = pscr_ret__; } goto ldv_59258; ldv_59257: txq = txq - (int )f->indices; ldv_59258: ; if ((int )f->indices <= txq) { goto ldv_59257; } else { } return ((int )f->offset + (int )((u16 )txq)); } } netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb , struct ixgbe_adapter *adapter , struct ixgbe_ring *tx_ring ) { struct ixgbe_tx_buffer *first ; int tso ; u32 tx_flags ; unsigned short f ; u16 count ; unsigned int tmp ; __be16 protocol ; u8 hdr_len ; unsigned char *tmp___0 ; unsigned char *tmp___1 ; int tmp___2 ; struct vlan_hdr *vhdr ; struct vlan_hdr _vhdr ; void *tmp___3 ; __u16 tmp___4 ; unsigned char *tmp___5 ; unsigned char *tmp___6 ; unsigned char *tmp___7 ; long tmp___8 ; int tmp___9 ; struct vlan_ethhdr *vhdr___0 ; int tmp___10 ; __u16 tmp___11 ; int tmp___12 ; { tx_flags = 0U; tmp = skb_headlen((struct sk_buff const *)skb); count = (u16 )((tmp + 16383U) / 16384U); protocol = skb->protocol; hdr_len = 0U; f = 0U; goto ldv_59273; ldv_59272: tmp___0 = skb_end_pointer((struct sk_buff const *)skb); count = (int )((u16 )((((struct skb_shared_info *)tmp___0)->frags[(int )f].size + 16383U) / 16384U)) + (int )count; f = (unsigned short )((int )f + 1); ldv_59273: tmp___1 = skb_end_pointer((struct sk_buff const *)skb); if ((int )((unsigned short )((struct skb_shared_info *)tmp___1)->nr_frags) > (int )f) { goto ldv_59272; } else { } tmp___2 = ixgbe_maybe_stop_tx(tx_ring, (int )((unsigned int )count + 3U)); if (tmp___2 != 0) { tx_ring->__annonCompField121.tx_stats.tx_busy = tx_ring->__annonCompField121.tx_stats.tx_busy + 1ULL; return (16); } else { } first = tx_ring->__annonCompField118.tx_buffer_info + (unsigned long )tx_ring->next_to_use; first->skb = skb; first->bytecount = skb->len; first->gso_segs = 1U; if (((int )skb->vlan_tci & 4096) != 0) { tx_flags = (u32 )(((int )skb->vlan_tci & -4097) << 16) | tx_flags; tx_flags = tx_flags | 1U; } else if ((unsigned int )protocol == 129U) { tmp___3 = skb_header_pointer((struct sk_buff const *)skb, 14, 4, (void *)(& _vhdr)); vhdr = (struct vlan_hdr *)tmp___3; if ((unsigned long )vhdr == (unsigned long )((struct vlan_hdr *)0)) { goto out_drop; } else { } tmp___4 = __fswab16((int )vhdr->h_vlan_TCI); tx_flags = (u32 )((int )tmp___4 << 16) | tx_flags; tx_flags = tx_flags | 64U; } else { } protocol = vlan_get_protocol(skb); tmp___7 = skb_end_pointer((struct sk_buff const *)skb); tmp___8 = ldv__builtin_expect((long )((struct skb_shared_info *)tmp___7)->tx_flags & 1L, 0L); if (tmp___8 != 0L && (unsigned long )adapter->ptp_clock != (unsigned long )((struct ptp_clock *)0)) { tmp___9 = test_and_set_bit_lock(9L, (unsigned long volatile *)(& adapter->state)); if (tmp___9 == 0) { tmp___5 = skb_end_pointer((struct sk_buff const *)skb); tmp___6 = skb_end_pointer((struct sk_buff const *)skb); ((struct skb_shared_info *)tmp___5)->tx_flags = (__u8 )((unsigned int )((struct skb_shared_info *)tmp___6)->tx_flags | 4U); tx_flags = tx_flags | 4U; adapter->ptp_tx_skb = skb_get(skb); adapter->ptp_tx_start = jiffies; schedule_work(& adapter->ptp_tx_work); } else { } } else { } skb_tx_timestamp(skb); if ((adapter->flags & 8388608U) != 0U) { tx_flags = tx_flags | 8U; } else { } if ((adapter->flags & 4096U) != 0U && ((tx_flags & 65U) != 0U || skb->priority != 7U)) { tx_flags = tx_flags & 536870911U; tx_flags = (skb->priority << 29) | tx_flags; if ((tx_flags & 64U) != 0U) { tmp___10 = skb_cow_head(skb, 0U); if (tmp___10 != 0) { goto out_drop; } else { } vhdr___0 = (struct vlan_ethhdr *)skb->data; tmp___11 = __fswab16((int )((__u16 )(tx_flags >> 16))); vhdr___0->h_vlan_TCI = tmp___11; } else { tx_flags = tx_flags | 1U; } } else { } first->tx_flags = tx_flags; first->protocol = protocol; if ((unsigned int )protocol == 1673U && ((tx_ring->netdev)->features & 538968064ULL) != 0ULL) { tso = ixgbe_fso(tx_ring, first, & hdr_len); if (tso < 0) { goto out_drop; } else { } goto xmit_fcoe; } else { } tso = ixgbe_tso(tx_ring, first, & hdr_len); if (tso < 0) { goto out_drop; } else if (tso == 0) { ixgbe_tx_csum(tx_ring, first); } else { } tmp___12 = constant_test_bit(0L, (unsigned long const volatile *)(& tx_ring->state)); if (tmp___12 != 0) { ixgbe_atr(tx_ring, first); } else { } xmit_fcoe: ixgbe_tx_map(tx_ring, first, (int )hdr_len); return (0); out_drop: dev_kfree_skb_any(first->skb); first->skb = (struct sk_buff *)0; return (0); } } static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb , struct net_device *netdev , struct ixgbe_ring *ring ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_ring *tx_ring ; int tmp___0 ; netdev_tx_t tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; tmp___0 = skb_put_padto(skb, 17U); if (tmp___0 != 0) { return (0); } else { } tx_ring = (unsigned long )ring == (unsigned long )((struct ixgbe_ring *)0) ? adapter->tx_ring[(int )skb->queue_mapping] : ring; tmp___1 = ixgbe_xmit_frame_ring(skb, adapter, tx_ring); return (tmp___1); } } static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb , struct net_device *netdev ) { netdev_tx_t tmp ; { tmp = __ixgbe_xmit_frame(skb, netdev, (struct ixgbe_ring *)0); return (tmp); } } static int ixgbe_set_mac(struct net_device *netdev , void *p ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; struct sockaddr *addr ; int ret ; bool tmp___0 ; int tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; addr = (struct sockaddr *)p; tmp___0 = is_valid_ether_addr((u8 const *)(& addr->sa_data)); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (-99); } else { } ixgbe_del_mac_filter(adapter, (u8 *)(& hw->mac.addr), (int )adapter->ring_feature[1].offset); memcpy((void *)netdev->dev_addr, (void const *)(& addr->sa_data), (size_t )netdev->addr_len); memcpy((void *)(& hw->mac.addr), (void const *)(& addr->sa_data), (size_t )netdev->addr_len); ret = ixgbe_add_mac_filter(adapter, (u8 *)(& hw->mac.addr), (int )adapter->ring_feature[1].offset); return (0 < ret ? 0 : ret); } } static int ixgbe_mdio_read(struct net_device *netdev , int prtad , int devad , u16 addr ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; u16 value ; int rc ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; if (hw->phy.mdio.prtad != prtad) { return (-22); } else { } rc = (*(hw->phy.ops.read_reg))(hw, (u32 )addr, (u32 )devad, & value); if (rc == 0) { rc = (int )value; } else { } return (rc); } } static int ixgbe_mdio_write(struct net_device *netdev , int prtad , int devad , u16 addr , u16 value ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; s32 tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; if (hw->phy.mdio.prtad != prtad) { return (-22); } else { } tmp___0 = (*(hw->phy.ops.write_reg))(hw, (u32 )addr, (u32 )devad, (int )value); return (tmp___0); } } static int ixgbe_ioctl(struct net_device *netdev , struct ifreq *req , int cmd ) { struct ixgbe_adapter *adapter ; void *tmp ; int tmp___0 ; int tmp___1 ; struct mii_ioctl_data *tmp___2 ; int tmp___3 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; switch (cmd) { case 35248: tmp___0 = ixgbe_ptp_set_ts_config(adapter, req); return (tmp___0); case 35249: tmp___1 = ixgbe_ptp_get_ts_config(adapter, req); return (tmp___1); default: tmp___2 = if_mii(req); tmp___3 = mdio_mii_ioctl((struct mdio_if_info const *)(& adapter->hw.phy.mdio), tmp___2, cmd); return (tmp___3); } } } static int ixgbe_add_sanmac_netdev(struct net_device *dev ) { int err ; struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; bool tmp___0 ; { err = 0; tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; tmp___0 = is_valid_ether_addr((u8 const *)(& hw->mac.san_addr)); if ((int )tmp___0) { rtnl_lock(); err = dev_addr_add(dev, (unsigned char const *)(& hw->mac.san_addr), 2); rtnl_unlock(); (*(hw->mac.ops.set_vmdq_san_mac))(hw, (u32 )adapter->ring_feature[1].offset); } else { } return (err); } } static int ixgbe_del_sanmac_netdev(struct net_device *dev ) { int err ; struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_mac_info *mac ; bool tmp___0 ; { err = 0; tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; mac = & adapter->hw.mac; tmp___0 = is_valid_ether_addr((u8 const *)(& mac->san_addr)); if ((int )tmp___0) { rtnl_lock(); err = dev_addr_del(dev, (unsigned char const *)(& mac->san_addr), 2); rtnl_unlock(); } else { } return (err); } } static void ixgbe_netpoll(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; int i ; int tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 != 0) { return; } else { } i = 0; goto ldv_59345; ldv_59344: ixgbe_msix_clean_rings(0, (void *)adapter->q_vector[i]); i = i + 1; ldv_59345: ; if (adapter->num_q_vectors > i) { goto ldv_59344; } else { } return; } } static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev , struct rtnl_link_stats64 *stats ) { struct ixgbe_adapter *adapter ; void *tmp ; int i ; struct ixgbe_ring *ring ; struct ixgbe_ring *__var ; u64 bytes ; u64 packets ; unsigned int start ; bool tmp___0 ; struct ixgbe_ring *ring___0 ; struct ixgbe_ring *__var___0 ; u64 bytes___0 ; u64 packets___0 ; unsigned int start___0 ; bool tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; rcu_read_lock(); i = 0; goto ldv_59362; ldv_59361: __var = (struct ixgbe_ring *)0; ring = *((struct ixgbe_ring * volatile *)(& adapter->rx_ring) + (unsigned long )i); if ((unsigned long )ring != (unsigned long )((struct ixgbe_ring *)0)) { ldv_59359: start = u64_stats_fetch_begin_irq((struct u64_stats_sync const *)(& ring->syncp)); packets = ring->stats.packets; bytes = ring->stats.bytes; tmp___0 = u64_stats_fetch_retry_irq((struct u64_stats_sync const *)(& ring->syncp), start); if ((int )tmp___0) { goto ldv_59359; } else { } stats->rx_packets = stats->rx_packets + packets; stats->rx_bytes = stats->rx_bytes + bytes; } else { } i = i + 1; ldv_59362: ; if (adapter->num_rx_queues > i) { goto ldv_59361; } else { } i = 0; goto ldv_59373; ldv_59372: __var___0 = (struct ixgbe_ring *)0; ring___0 = *((struct ixgbe_ring * volatile *)(& adapter->tx_ring) + (unsigned long )i); if ((unsigned long )ring___0 != (unsigned long )((struct ixgbe_ring *)0)) { ldv_59370: start___0 = u64_stats_fetch_begin_irq((struct u64_stats_sync const *)(& ring___0->syncp)); packets___0 = ring___0->stats.packets; bytes___0 = ring___0->stats.bytes; tmp___1 = u64_stats_fetch_retry_irq((struct u64_stats_sync const *)(& ring___0->syncp), start___0); if ((int )tmp___1) { goto ldv_59370; } else { } stats->tx_packets = stats->tx_packets + packets___0; stats->tx_bytes = stats->tx_bytes + bytes___0; } else { } i = i + 1; ldv_59373: ; if (adapter->num_tx_queues > i) { goto ldv_59372; } else { } rcu_read_unlock(); stats->multicast = (__u64 )netdev->stats.multicast; stats->rx_errors = (__u64 )netdev->stats.rx_errors; stats->rx_length_errors = (__u64 )netdev->stats.rx_length_errors; stats->rx_crc_errors = (__u64 )netdev->stats.rx_crc_errors; stats->rx_missed_errors = (__u64 )netdev->stats.rx_missed_errors; return (stats); } } static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter , u8 tc ) { struct ixgbe_hw *hw ; u32 reg ; u32 rsave ; int i ; u8 up2tc ; { hw = & adapter->hw; if ((unsigned int )hw->mac.type == 1U) { return; } else { } reg = ixgbe_read_reg(hw, 12320U); rsave = reg; i = 0; goto ldv_59385; ldv_59384: up2tc = (u8 )(reg >> i * 3); if ((int )up2tc > (int )tc) { reg = reg & 4294967239U; } else { } i = i + 1; ldv_59385: ; if (i <= 7) { goto ldv_59384; } else { } if (reg != rsave) { ixgbe_write_reg(hw, 12320U, reg); } else { } return; } } static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter ) { struct net_device *dev ; struct ixgbe_dcb_config *dcb_cfg ; struct ieee_ets *ets ; u8 prio ; u8 tc ; { dev = adapter->netdev; dcb_cfg = & adapter->dcb_cfg; ets = adapter->ixgbe_ieee_ets; prio = 0U; goto ldv_59396; ldv_59395: tc = 0U; if (((int )adapter->dcbx_cap & 4) != 0) { tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, (int )prio); } else if ((unsigned long )ets != (unsigned long )((struct ieee_ets *)0)) { tc = ets->prio_tc[(int )prio]; } else { } netdev_set_prio_tc_map(dev, (int )prio, (int )tc); prio = (u8 )((int )prio + 1); ldv_59396: ; if ((unsigned int )prio <= 7U) { goto ldv_59395; } else { } return; } } int ixgbe_setup_tc(struct net_device *dev , u8 tc ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; bool pools ; unsigned long tmp___0 ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; if ((int )adapter->dcb_cfg.num_tcs.pg_tcs < (int )tc || ((unsigned int )hw->mac.type == 1U && (unsigned int )tc <= 7U)) { return (-22); } else { } tmp___0 = find_first_zero_bit((unsigned long const *)(& adapter->fwd_bitmask), 32UL); pools = tmp___0 > 1UL; if (((unsigned int )tc != 0U && (int )pools) && adapter->num_rx_pools > 8) { return (-16); } else { } tmp___1 = netif_running((struct net_device const *)dev); if ((int )tmp___1) { ixgbe_close(dev); } else { } ixgbe_clear_interrupt_scheme(adapter); if ((unsigned int )tc != 0U) { netdev_set_num_tc(dev, (int )tc); ixgbe_set_prio_tc_map(adapter); adapter->flags = adapter->flags | 4096U; if ((unsigned int )adapter->hw.mac.type == 1U) { adapter->last_lfc_mode = adapter->hw.fc.requested_mode; adapter->hw.fc.requested_mode = 0; } else { } } else { netdev_reset_tc(dev); if ((unsigned int )adapter->hw.mac.type == 1U) { adapter->hw.fc.requested_mode = adapter->last_lfc_mode; } else { } adapter->flags = adapter->flags & 4294963199U; adapter->temp_dcb_cfg.pfc_mode_enable = 0; adapter->dcb_cfg.pfc_mode_enable = 0; } ixgbe_validate_rtr(adapter, (int )tc); ixgbe_init_interrupt_scheme(adapter); tmp___3 = netif_running((struct net_device const *)dev); if ((int )tmp___3) { tmp___2 = ixgbe_open(dev); return (tmp___2); } else { } return (0); } } void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter ) { struct net_device *netdev ; int tmp ; { netdev = adapter->netdev; rtnl_lock(); tmp = netdev_get_num_tc(netdev); ixgbe_setup_tc(netdev, (int )((u8 )tmp)); rtnl_unlock(); return; } } void ixgbe_do_reset(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; bool tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; tmp___0 = netif_running((struct net_device const *)netdev); if ((int )tmp___0) { ixgbe_reinit_locked(adapter); } else { ixgbe_reset(adapter); } return; } } static netdev_features_t ixgbe_fix_features(struct net_device *netdev , netdev_features_t features ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; if ((features & 17179869184ULL) == 0ULL) { features = features & 0xffffffffffff7fffULL; } else { } if ((adapter->flags2 & 1U) == 0U) { features = features & 0xffffffffffff7fffULL; } else { } return (features); } } static int ixgbe_set_features(struct net_device *netdev , netdev_features_t features ) { struct ixgbe_adapter *adapter ; void *tmp ; netdev_features_t changed ; bool need_reset ; int tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; changed = netdev->features ^ features; need_reset = 0; if ((features & 32768ULL) == 0ULL) { if ((adapter->flags2 & 2U) != 0U) { need_reset = 1; } else { } adapter->flags2 = adapter->flags2 & 4294967293U; } else if ((int )adapter->flags2 & 1 && (adapter->flags2 & 2U) == 0U) { if ((unsigned int )adapter->rx_itr_setting == 1U || (unsigned int )adapter->rx_itr_setting > 24U) { adapter->flags2 = adapter->flags2 | 2U; need_reset = 1; } else if (((changed ^ features) & 32768ULL) != 0ULL) { if (((int )adapter->msg_enable & 2) != 0) { netdev_info((struct net_device const *)adapter->netdev, "rx-usecs set too low, disabling RSC\n"); } else { } } else { } } else { } switch (features & 4294967296ULL) { case 0ULL: ; if ((adapter->flags & 524288U) == 0U) { need_reset = 1; } else { } adapter->flags = adapter->flags & 4294705151U; adapter->flags = adapter->flags | 524288U; goto ldv_59426; default: ; if ((adapter->flags & 524288U) != 0U) { need_reset = 1; } else { } adapter->flags = adapter->flags & 4294443007U; if ((adapter->flags & 8388608U) != 0U) { goto ldv_59426; } else { } tmp___0 = netdev_get_num_tc(netdev); if (tmp___0 > 1) { goto ldv_59426; } else { } if ((unsigned int )adapter->ring_feature[2].limit <= 1U) { goto ldv_59426; } else { } if (adapter->atr_sample_rate == 0U) { goto ldv_59426; } else { } adapter->flags = adapter->flags | 262144U; goto ldv_59426; } ldv_59426: ; if ((features & 256ULL) != 0ULL) { ixgbe_vlan_strip_enable(adapter); } else { ixgbe_vlan_strip_disable(adapter); } if ((changed & 274877906944ULL) != 0ULL) { need_reset = 1; } else { } netdev->features = features; if ((int )need_reset) { ixgbe_do_reset(netdev); } else { } return (0); } } static void ixgbe_add_vxlan_port(struct net_device *dev , sa_family_t sa_family , __be16 port ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; u16 new_port ; __u16 tmp___0 ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; tmp___0 = __fswab16((int )port); new_port = tmp___0; if ((unsigned int )sa_family == 10U) { return; } else { } if ((int )adapter->vxlan_port == (int )new_port) { netdev_info((struct net_device const *)dev, "Port %d already offloaded\n", (int )new_port); return; } else { } if ((unsigned int )adapter->vxlan_port != 0U) { netdev_info((struct net_device const *)dev, "Hit Max num of UDP ports, not adding port %d\n", (int )new_port); return; } else { } adapter->vxlan_port = new_port; ixgbe_write_reg(hw, 20604U, (u32 )new_port); return; } } static void ixgbe_del_vxlan_port(struct net_device *dev , sa_family_t sa_family , __be16 port ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; u16 new_port ; __u16 tmp___0 ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; tmp___0 = __fswab16((int )port); new_port = tmp___0; if ((unsigned int )sa_family == 10U) { return; } else { } if ((int )adapter->vxlan_port != (int )new_port) { netdev_info((struct net_device const *)dev, "Port %d was not found, not deleting\n", (int )new_port); return; } else { } adapter->vxlan_port = 0U; ixgbe_write_reg(hw, 20604U, 0U); return; } } static int ixgbe_ndo_fdb_add(struct ndmsg *ndm , struct nlattr **tb , struct net_device *dev , unsigned char const *addr , u16 vid , u16 flags ) { bool tmp ; bool tmp___0 ; int tmp___1 ; { tmp = is_unicast_ether_addr(addr); if ((int )tmp) { goto _L; } else { tmp___0 = is_link_local_ether_addr(addr); if ((int )tmp___0) { _L: /* CIL Label */ if (dev->uc.count > 14) { return (-12); } else { } } else { } } tmp___1 = ndo_dflt_fdb_add(ndm, tb, dev, addr, (int )vid, (int )flags); return (tmp___1); } } static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter , __u16 mode ) { struct ixgbe_hw *hw ; unsigned int p ; unsigned int num_pools ; u32 vmdctl ; { hw = & adapter->hw; switch ((int )mode) { case 1: ixgbe_write_reg(& adapter->hw, 33312U, 0U); vmdctl = ixgbe_read_reg(hw, 22556U); vmdctl = vmdctl | 1073741824U; ixgbe_write_reg(hw, 22556U, vmdctl); num_pools = adapter->num_vfs + (unsigned int )adapter->num_rx_pools; p = 0U; goto ldv_59462; ldv_59461: ; if ((unsigned long )hw->mac.ops.set_source_address_pruning != (unsigned long )((void (*)(struct ixgbe_hw * , bool , unsigned int ))0)) { (*(hw->mac.ops.set_source_address_pruning))(hw, 1, p); } else { } p = p + 1U; ldv_59462: ; if (p < num_pools) { goto ldv_59461; } else { } goto ldv_59464; case 0: ixgbe_write_reg(& adapter->hw, 33312U, 1U); vmdctl = ixgbe_read_reg(hw, 22556U); if (adapter->num_vfs == 0U) { vmdctl = vmdctl & 3221225471U; } else { } ixgbe_write_reg(hw, 22556U, vmdctl); num_pools = adapter->num_vfs + (unsigned int )adapter->num_rx_pools; p = 0U; goto ldv_59467; ldv_59466: ; if ((unsigned long )hw->mac.ops.set_source_address_pruning != (unsigned long )((void (*)(struct ixgbe_hw * , bool , unsigned int ))0)) { (*(hw->mac.ops.set_source_address_pruning))(hw, 0, p); } else { } p = p + 1U; ldv_59467: ; if (p < num_pools) { goto ldv_59466; } else { } goto ldv_59464; default: ; return (-22); } ldv_59464: adapter->bridge_mode = mode; if ((int )adapter->msg_enable & 1) { netdev_info((struct net_device const *)adapter->netdev, "enabling bridge mode: %s\n", (unsigned int )mode == 1U ? (char *)"VEPA" : (char *)"VEB"); } else { } return (0); } } static int ixgbe_ndo_bridge_setlink(struct net_device *dev , struct nlmsghdr *nlh , u16 flags ) { struct ixgbe_adapter *adapter ; void *tmp ; struct nlattr *attr ; struct nlattr *br_spec ; int rem ; void *tmp___0 ; int status ; __u16 mode ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; if ((adapter->flags & 8388608U) == 0U) { return (-95); } else { } br_spec = nlmsg_find_attr((struct nlmsghdr const *)nlh, 16, 26); if ((unsigned long )br_spec == (unsigned long )((struct nlattr *)0)) { return (-22); } else { } tmp___0 = nla_data((struct nlattr const *)br_spec); attr = (struct nlattr *)tmp___0; rem = nla_len((struct nlattr const *)br_spec); goto ldv_59484; ldv_59483: tmp___1 = nla_type((struct nlattr const *)attr); if (tmp___1 != 1) { goto ldv_59481; } else { } tmp___2 = nla_len((struct nlattr const *)attr); if ((unsigned int )tmp___2 <= 1U) { return (-22); } else { } mode = nla_get_u16((struct nlattr const *)attr); status = ixgbe_configure_bridge_mode(adapter, (int )mode); if (status != 0) { return (status); } else { } goto ldv_59482; ldv_59481: attr = nla_next((struct nlattr const *)attr, & rem); ldv_59484: tmp___3 = nla_ok((struct nlattr const *)attr, rem); if (tmp___3 != 0) { goto ldv_59483; } else { } ldv_59482: ; return (0); } } static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb , u32 pid , u32 seq , struct net_device *dev , u32 filter_mask , int nlflags ) { struct ixgbe_adapter *adapter ; void *tmp ; int tmp___0 ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; if ((adapter->flags & 8388608U) == 0U) { return (0); } else { } tmp___0 = ndo_dflt_bridge_getlink(skb, pid, seq, dev, (int )adapter->bridge_mode, 0U, 0U, nlflags, filter_mask, (int (*)(struct sk_buff * , struct net_device * , u32 ))0); return (tmp___0); } } static void *ixgbe_fwd_add(struct net_device *pdev , struct net_device *vdev ) { struct ixgbe_fwd_adapter *fwd_adapter ; struct ixgbe_adapter *adapter ; void *tmp ; int used_pools ; unsigned int limit ; int pool ; int err ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; void *tmp___3 ; void *tmp___4 ; void *tmp___5 ; unsigned long tmp___6 ; unsigned long tmp___7 ; int tmp___8 ; void *tmp___9 ; { fwd_adapter = (struct ixgbe_fwd_adapter *)0; tmp = netdev_priv((struct net_device const *)pdev); adapter = (struct ixgbe_adapter *)tmp; used_pools = (int )(adapter->num_vfs + (unsigned int )adapter->num_rx_pools); if (used_pools > 63) { tmp___0 = ERR_PTR(-22L); return (tmp___0); } else { } if (vdev->num_rx_queues != vdev->num_tx_queues) { netdev_info((struct net_device const *)pdev, "%s: Only supports a single queue count for TX and RX\n", (char *)(& vdev->name)); tmp___1 = ERR_PTR(-22L); return (tmp___1); } else { } if (vdev->num_tx_queues > 4U || vdev->num_tx_queues == 3U) { netdev_info((struct net_device const *)pdev, "%s: Supports RX/TX Queue counts 1,2, and 4\n", (char *)(& pdev->name)); tmp___2 = ERR_PTR(-22L); return (tmp___2); } else { } if (((adapter->flags & 4096U) != 0U && adapter->num_rx_pools > 7) || adapter->num_rx_pools > 31) { tmp___3 = ERR_PTR(-16L); return (tmp___3); } else { } tmp___4 = kcalloc(1UL, 544UL, 208U); fwd_adapter = (struct ixgbe_fwd_adapter *)tmp___4; if ((unsigned long )fwd_adapter == (unsigned long )((struct ixgbe_fwd_adapter *)0)) { tmp___5 = ERR_PTR(-12L); return (tmp___5); } else { } tmp___6 = find_first_zero_bit((unsigned long const *)(& adapter->fwd_bitmask), 32UL); pool = (int )tmp___6; adapter->num_rx_pools = adapter->num_rx_pools + 1; set_bit((long )pool, (unsigned long volatile *)(& adapter->fwd_bitmask)); tmp___7 = find_last_bit((unsigned long const *)(& adapter->fwd_bitmask), 32UL); limit = (unsigned int )tmp___7; adapter->flags = adapter->flags | 8404992U; adapter->ring_feature[1].limit = (unsigned int )((u16 )limit) + 1U; adapter->ring_feature[2].limit = (u16 )vdev->num_tx_queues; tmp___8 = netdev_get_num_tc(pdev); err = ixgbe_setup_tc(pdev, (int )((u8 )tmp___8)); if (err != 0) { goto fwd_add_err; } else { } fwd_adapter->pool = pool; fwd_adapter->real_adapter = adapter; err = ixgbe_fwd_ring_up(vdev, fwd_adapter); if (err != 0) { goto fwd_add_err; } else { } netif_tx_start_all_queues(vdev); return ((void *)fwd_adapter); fwd_add_err: netdev_info((struct net_device const *)pdev, "%s: dfwd hardware acceleration failed\n", (char *)(& vdev->name)); clear_bit((long )pool, (unsigned long volatile *)(& adapter->fwd_bitmask)); adapter->num_rx_pools = adapter->num_rx_pools - 1; kfree((void const *)fwd_adapter); tmp___9 = ERR_PTR((long )err); return (tmp___9); } } static void ixgbe_fwd_del(struct net_device *pdev , void *priv ) { struct ixgbe_fwd_adapter *fwd_adapter ; struct ixgbe_adapter *adapter ; unsigned int limit ; unsigned long tmp ; int tmp___0 ; struct _ddebug descriptor ; long tmp___1 ; { fwd_adapter = (struct ixgbe_fwd_adapter *)priv; adapter = fwd_adapter->real_adapter; clear_bit((long )fwd_adapter->pool, (unsigned long volatile *)(& adapter->fwd_bitmask)); adapter->num_rx_pools = adapter->num_rx_pools - 1; tmp = find_last_bit((unsigned long const *)(& adapter->fwd_bitmask), 32UL); limit = (unsigned int )tmp; adapter->ring_feature[1].limit = (unsigned int )((u16 )limit) + 1U; ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); tmp___0 = netdev_get_num_tc(pdev); ixgbe_setup_tc(pdev, (int )((u8 )tmp___0)); descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_fwd_del"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c"; descriptor.format = "pool %i:%i queues %i:%i VSI bitmask %lx\n"; descriptor.lineno = 8190U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", fwd_adapter->pool, adapter->num_rx_pools, fwd_adapter->rx_base_queue, fwd_adapter->rx_base_queue + (unsigned int )adapter->num_rx_queues_per_pool, adapter->fwd_bitmask); } else { } kfree((void const *)fwd_adapter); return; } } static struct net_device_ops const ixgbe_netdev_ops = {0, 0, & ixgbe_open, & ixgbe_close, & ixgbe_xmit_frame, & ixgbe_select_queue, 0, & ixgbe_set_rx_mode, & ixgbe_set_mac, & eth_validate_addr, & ixgbe_ioctl, 0, & ixgbe_change_mtu, 0, & ixgbe_tx_timeout, & ixgbe_get_stats64, 0, & ixgbe_vlan_rx_add_vid, & ixgbe_vlan_rx_kill_vid, & ixgbe_netpoll, 0, 0, & ixgbe_low_latency_recv, & ixgbe_ndo_set_vf_mac, & ixgbe_ndo_set_vf_vlan, & ixgbe_ndo_set_vf_bw, & ixgbe_ndo_set_vf_spoofchk, & ixgbe_ndo_get_vf_config, 0, 0, 0, 0, & ixgbe_ndo_set_vf_rss_query_en, & ixgbe_setup_tc, & ixgbe_fcoe_enable, & ixgbe_fcoe_disable, & ixgbe_fcoe_ddp_get, & ixgbe_fcoe_ddp_put, & ixgbe_fcoe_ddp_target, & ixgbe_fcoe_get_hbainfo, & ixgbe_fcoe_get_wwn, 0, 0, 0, & ixgbe_fix_features, & ixgbe_set_features, 0, 0, & ixgbe_ndo_fdb_add, 0, 0, & ixgbe_ndo_bridge_setlink, & ixgbe_ndo_bridge_getlink, 0, 0, 0, 0, & ixgbe_add_vxlan_port, & ixgbe_del_vxlan_port, & ixgbe_fwd_add, & ixgbe_fwd_del, 0, 0, 0, 0, 0}; __inline static int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter ) { struct pci_dev *entry ; struct pci_dev *pdev ; int physfns ; bool tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { pdev = adapter->pdev; physfns = 0; tmp = ixgbe_pcie_from_parent(& adapter->hw); if ((int )tmp) { physfns = 4; } else { } __mptr = (struct list_head const *)((adapter->pdev)->bus)->devices.next; entry = (struct pci_dev *)__mptr; goto ldv_59527; ldv_59526: ; if ((unsigned int )*((unsigned char *)entry + 2531UL) != 0U) { goto ldv_59525; } else { } if ((int )entry->vendor != (int )pdev->vendor || (int )entry->device != (int )pdev->device) { return (-1); } else { } physfns = physfns + 1; ldv_59525: __mptr___0 = (struct list_head const *)entry->bus_list.next; entry = (struct pci_dev *)__mptr___0; ldv_59527: ; if ((unsigned long )(& entry->bus_list) != (unsigned long )(& ((adapter->pdev)->bus)->devices)) { goto ldv_59526; } else { } return (physfns); } } int ixgbe_wol_supported(struct ixgbe_adapter *adapter , u16 device_id , u16 subdevice_id ) { struct ixgbe_hw *hw ; u16 wol_cap ; int is_wol_supported ; { hw = & adapter->hw; wol_cap = (unsigned int )adapter->eeprom_cap & 12U; is_wol_supported = 0; switch ((int )device_id) { case 4347: ; switch ((int )subdevice_id) { case 4209: ; case 6096: ; if ((unsigned int )hw->bus.func != 0U) { goto ldv_59540; } else { } case 8475: ; case 4521: ; case 8050: ; case 1136: ; case 35190: is_wol_supported = 1; goto ldv_59540; } ldv_59540: ; goto ldv_59546; case 5463: ; switch ((int )subdevice_id) { case 1: is_wol_supported = 1; goto ldv_59549; } ldv_59549: ; goto ldv_59546; case 4344: ; if ((unsigned int )subdevice_id != 12U) { is_wol_supported = 1; } else { } goto ldv_59546; case 4343: is_wol_supported = 1; goto ldv_59546; case 5416: ; case 5472: ; case 5475: ; case 5546: ; case 5547: ; case 5549: ; if ((unsigned int )wol_cap == 4U || ((unsigned int )wol_cap == 8U && (unsigned int )hw->bus.func == 0U)) { is_wol_supported = 1; } else { } goto ldv_59546; } ldv_59546: ; return (is_wol_supported); } } static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter ) { struct device_node *dp ; struct device_node *tmp ; struct ixgbe_hw *hw ; unsigned char const *addr ; void const *tmp___0 ; { tmp = pci_device_to_OF_node((struct pci_dev const *)adapter->pdev); dp = tmp; hw = & adapter->hw; tmp___0 = of_get_mac_address(dp); addr = (unsigned char const *)tmp___0; if ((unsigned long )addr != (unsigned long )((unsigned char const *)0U)) { ether_addr_copy((u8 *)(& hw->mac.perm_addr), addr); return; } else { } return; } } static int ixgbe_probe(struct pci_dev *pdev , struct pci_device_id const *ent ) { struct net_device *netdev ; struct ixgbe_adapter *adapter ; struct ixgbe_hw *hw ; struct ixgbe_info const *ii ; int i ; int err ; int pci_using_dac ; int expected_gts ; unsigned int indices ; u8 part_str[11U] ; bool disable_dev ; u16 device_caps ; u32 eec ; int __ret_warn_on ; char const *tmp ; long tmp___0 ; int tmp___1 ; int tmp___2 ; void *tmp___3 ; u32 tmp___4 ; void *tmp___5 ; char const *tmp___6 ; bool tmp___7 ; u32 esdp ; u32 tmp___8 ; unsigned int fcoe_l ; int __min1 ; int __min2 ; unsigned int tmp___9 ; s32 tmp___10 ; bool tmp___11 ; int tmp___12 ; bool tmp___13 ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; int tmp___14 ; bool tmp___15 ; int _min1 ; int tmp___16 ; int _min2 ; int tmp___17 ; bool tmp___18 ; int tmp___19 ; int tmp___20 ; bool tmp___21 ; bool tmp___22 ; int tmp___23 ; int tmp___24 ; { adapter = (struct ixgbe_adapter *)0; ii = ixgbe_info_tbl[ent->driver_data]; indices = 64U; disable_dev = 0; if ((unsigned int )*((unsigned char *)pdev + 2531UL) != 0U) { __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { tmp = pci_name((struct pci_dev const *)pdev); warn_slowpath_fmt("/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", 8408, "\v%s (%hx:%hx) should not be a VF!\n", tmp, (int )pdev->vendor, (int )pdev->device); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return (-22); } else { } err = pci_enable_device_mem(pdev); if (err != 0) { return (err); } else { } tmp___1 = dma_set_mask_and_coherent(& pdev->dev, 0xffffffffffffffffULL); if (tmp___1 == 0) { pci_using_dac = 1; } else { err = dma_set_mask_and_coherent(& pdev->dev, 4294967295ULL); if (err != 0) { dev_err((struct device const *)(& pdev->dev), "No usable DMA configuration, aborting\n"); goto err_dma; } else { } pci_using_dac = 0; } tmp___2 = pci_select_bars(pdev, 512UL); err = pci_request_selected_regions(pdev, tmp___2, (char const *)(& ixgbe_driver_name)); if (err != 0) { dev_err((struct device const *)(& pdev->dev), "pci_request_selected_regions failed 0x%x\n", err); goto err_pci_reg; } else { } pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); pci_save_state(pdev); if ((unsigned int )ii->mac == 1U) { indices = 32U; } else { } netdev = alloc_etherdev_mqs(143360, indices, indices); if ((unsigned long )netdev == (unsigned long )((struct net_device *)0)) { err = -12; goto err_alloc_etherdev; } else { } netdev->dev.parent = & pdev->dev; tmp___3 = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp___3; adapter->netdev = netdev; adapter->pdev = pdev; hw = & adapter->hw; hw->back = (void *)adapter; tmp___4 = netif_msg_init(debug, 7); adapter->msg_enable = (u16 )tmp___4; tmp___5 = ioremap(pdev->resource[0].start, pdev->resource[0].start != 0ULL || pdev->resource[0].end != pdev->resource[0].start ? (unsigned long )((pdev->resource[0].end - pdev->resource[0].start) + 1ULL) : 0UL); hw->hw_addr = (u8 *)tmp___5; adapter->io_addr = hw->hw_addr; if ((unsigned long )hw->hw_addr == (unsigned long )((u8 *)0U)) { err = -5; goto err_ioremap; } else { } netdev->netdev_ops = & ixgbe_netdev_ops; ixgbe_set_ethtool_ops(netdev); netdev->watchdog_timeo = 1250; tmp___6 = pci_name((struct pci_dev const *)pdev); strlcpy((char *)(& netdev->name), tmp___6, 16UL); memcpy((void *)(& hw->mac.ops), (void const *)ii->mac_ops, 472UL); hw->mac.type = ii->mac; hw->mvals = ii->mvals; memcpy((void *)(& hw->eeprom.ops), (void const *)ii->eeprom_ops, 64UL); eec = ixgbe_read_reg(hw, *(hw->mvals)); tmp___7 = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp___7) { err = -5; goto err_ioremap; } else { } if ((eec & 256U) == 0U) { hw->eeprom.ops.read = & ixgbe_read_eeprom_bit_bang_generic; } else { } memcpy((void *)(& hw->phy.ops), (void const *)ii->phy_ops, 184UL); hw->phy.sfp_type = 65535; hw->phy.mdio.prtad = -1; hw->phy.mdio.mmds = 0U; hw->phy.mdio.mode_support = 6U; hw->phy.mdio.dev = netdev; hw->phy.mdio.mdio_read = & ixgbe_mdio_read; hw->phy.mdio.mdio_write = & ixgbe_mdio_write; (*(ii->get_invariants))(hw); err = ixgbe_sw_init(adapter); if (err != 0) { goto err_sw_init; } else { } switch ((unsigned int )adapter->hw.mac.type) { case 2U: ; case 3U: ; case 4U: ; case 5U: ixgbe_write_reg(& adapter->hw, 22544U, 4294967295U); goto ldv_59592; default: ; goto ldv_59592; } ldv_59592: ; if ((adapter->flags & 32768U) != 0U) { tmp___8 = ixgbe_read_reg(hw, 32U); esdp = tmp___8; if ((esdp & 2U) != 0U) { if (((int )adapter->msg_enable & 2) != 0) { netdev_crit((struct net_device const *)adapter->netdev, "Fan has stopped, replace the adapter\n"); } else { } } else { } } else { } if (allow_unsupported_sfp != 0U) { hw->allow_unsupported_sfp = allow_unsupported_sfp != 0U; } else { } hw->phy.reset_if_overtemp = 1; err = (*(hw->mac.ops.reset_hw))(hw); hw->phy.reset_if_overtemp = 0; if (err == -20 && (unsigned int )hw->mac.type == 1U) { err = 0; } else if (err == -19) { dev_err((struct device const *)(& (adapter->pdev)->dev), "failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); dev_err((struct device const *)(& (adapter->pdev)->dev), "Reload the driver after installing a supported module.\n"); goto err_sw_init; } else if (err != 0) { dev_err((struct device const *)(& (adapter->pdev)->dev), "HW Init failed: %d\n", err); goto err_sw_init; } else { } if ((unsigned int )adapter->hw.mac.type == 1U) { goto skip_sriov; } else { } ixgbe_init_mbx_params_pf(hw); memcpy((void *)(& hw->mbx.ops), (void const *)ii->mbx_ops, 64UL); pci_sriov_set_totalvfs(pdev, 63); ixgbe_enable_sriov(adapter); skip_sriov: netdev->features = 25770918291ULL; netdev->hw_features = netdev->features | 4398046511104ULL; switch ((unsigned int )adapter->hw.mac.type) { case 2U: ; case 3U: ; case 4U: ; case 5U: netdev->features = netdev->features | 1073741824ULL; netdev->hw_features = netdev->hw_features | 5368709120ULL; goto ldv_59600; default: ; goto ldv_59600; } ldv_59600: netdev->hw_features = netdev->hw_features | 274877906944ULL; netdev->features = netdev->features | 512ULL; netdev->vlan_features = netdev->vlan_features | 65536ULL; netdev->vlan_features = netdev->vlan_features | 1048576ULL; netdev->vlan_features = netdev->vlan_features | 2ULL; netdev->vlan_features = netdev->vlan_features | 16ULL; netdev->vlan_features = netdev->vlan_features | 1ULL; netdev->priv_flags = netdev->priv_flags | 131072U; netdev->priv_flags = netdev->priv_flags | 524288U; switch ((unsigned int )adapter->hw.mac.type) { case 4U: ; case 5U: netdev->hw_enc_features = netdev->hw_enc_features | 17179869184ULL; goto ldv_59604; default: ; goto ldv_59604; } ldv_59604: netdev->dcbnl_ops = & dcbnl_ops; if ((adapter->flags & 1048576U) != 0U) { if ((unsigned long )hw->mac.ops.get_device_caps != (unsigned long )((s32 (*)(struct ixgbe_hw * , u16 * ))0)) { (*(hw->mac.ops.get_device_caps))(hw, & device_caps); if (((int )device_caps & 2) != 0) { adapter->flags = adapter->flags & 4293918719U; } else { } } else { } __min1 = 8; tmp___9 = cpumask_weight(cpu_online_mask); __min2 = (int )tmp___9; fcoe_l = (unsigned int )(__min1 < __min2 ? __min1 : __min2); adapter->ring_feature[4].limit = (u16 )fcoe_l; netdev->features = netdev->features | 538968064ULL; netdev->vlan_features = netdev->vlan_features | 2686451712ULL; } else { } if (pci_using_dac != 0) { netdev->features = netdev->features | 32ULL; netdev->vlan_features = netdev->vlan_features | 32ULL; } else { } if ((int )adapter->flags2 & 1) { netdev->hw_features = netdev->hw_features | 32768ULL; } else { } if ((adapter->flags2 & 2U) != 0U) { netdev->features = netdev->features | 32768ULL; } else { } tmp___10 = (*(hw->eeprom.ops.validate_checksum))(hw, (u16 *)0U); if (tmp___10 < 0) { dev_err((struct device const *)(& (adapter->pdev)->dev), "The EEPROM Checksum Is Not Valid\n"); err = -5; goto err_sw_init; } else { } ixgbe_get_platform_mac_addr(adapter); memcpy((void *)netdev->dev_addr, (void const *)(& hw->mac.perm_addr), (size_t )netdev->addr_len); tmp___11 = is_valid_ether_addr((u8 const *)netdev->dev_addr); if (tmp___11) { tmp___12 = 0; } else { tmp___12 = 1; } if (tmp___12) { dev_err((struct device const *)(& (adapter->pdev)->dev), "invalid MAC address\n"); err = -5; goto err_sw_init; } else { } ixgbe_mac_set_default_filter(adapter, (u8 *)(& hw->mac.perm_addr)); reg_timer_10(& adapter->service_timer, & ixgbe_service_timer, (unsigned long )adapter); tmp___13 = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp___13) { err = -5; goto err_sw_init; } else { } __init_work(& adapter->service_task, 0); __constr_expr_0.counter = 137438953408L; adapter->service_task.data = __constr_expr_0; lockdep_init_map(& adapter->service_task.lockdep_map, "(&adapter->service_task)", & __key, 0); INIT_LIST_HEAD(& adapter->service_task.entry); adapter->service_task.func = & ixgbe_service_task; set_bit(6L, (unsigned long volatile *)(& adapter->state)); clear_bit(5L, (unsigned long volatile *)(& adapter->state)); err = ixgbe_init_interrupt_scheme(adapter); if (err != 0) { goto err_sw_init; } else { } adapter->wol = 0U; (*(hw->eeprom.ops.read))(hw, 44, & adapter->eeprom_cap); tmp___14 = ixgbe_wol_supported(adapter, (int )pdev->device, (int )pdev->subsystem_device); hw->wol_enabled = tmp___14 != 0; if ((int )hw->wol_enabled) { adapter->wol = 2U; } else { } device_set_wakeup_enable(& (adapter->pdev)->dev, adapter->wol != 0U); (*(hw->eeprom.ops.read))(hw, 46, & adapter->eeprom_verh); (*(hw->eeprom.ops.read))(hw, 45, & adapter->eeprom_verl); (*(hw->mac.ops.get_bus_info))(hw); tmp___15 = ixgbe_pcie_from_parent(hw); if ((int )tmp___15) { ixgbe_get_parent_bus_info(adapter); } else { } switch ((unsigned int )hw->mac.type) { case 1U: tmp___16 = ixgbe_enumerate_functions(adapter); _min1 = tmp___16 * 10; _min2 = 16; expected_gts = _min1 < _min2 ? _min1 : _min2; goto ldv_59616; default: tmp___17 = ixgbe_enumerate_functions(adapter); expected_gts = tmp___17 * 10; goto ldv_59616; } ldv_59616: ; if (expected_gts > 0) { ixgbe_check_minimum_link(adapter, expected_gts); } else { } err = ixgbe_read_pba_string_generic(hw, (u8 *)(& part_str), 11U); if (err != 0) { strlcpy((char *)(& part_str), "Unknown", 11UL); } else { } tmp___18 = ixgbe_is_sfp(hw); if ((int )tmp___18 && (unsigned int )hw->phy.sfp_type != 65534U) { _dev_info((struct device const *)(& (adapter->pdev)->dev), "MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", (unsigned int )hw->mac.type, (unsigned int )hw->phy.type, (unsigned int )hw->phy.sfp_type, (u8 *)(& part_str)); } else { _dev_info((struct device const *)(& (adapter->pdev)->dev), "MAC: %d, PHY: %d, PBA No: %s\n", (unsigned int )hw->mac.type, (unsigned int )hw->phy.type, (u8 *)(& part_str)); } _dev_info((struct device const *)(& (adapter->pdev)->dev), "%pM\n", netdev->dev_addr); err = (*(hw->mac.ops.start_hw))(hw); if (err == -24) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); } else { } strcpy((char *)(& netdev->name), "eth%d"); err = ldv_register_netdev_59(netdev); if (err != 0) { goto err_register; } else { } pci_set_drvdata(pdev, (void *)adapter); if ((unsigned long )hw->mac.ops.disable_tx_laser != (unsigned long )((void (*)(struct ixgbe_hw * ))0)) { (*(hw->mac.ops.disable_tx_laser))(hw); } else { } netif_carrier_off(netdev); tmp___19 = dca_add_requester(& pdev->dev); if (tmp___19 == 0) { adapter->flags = adapter->flags | 256U; ixgbe_setup_dca(adapter); } else { } if ((adapter->flags & 8388608U) != 0U) { if (((int )adapter->msg_enable & 2) != 0) { netdev_info((struct net_device const *)adapter->netdev, "IOV is enabled with %d VFs\n", adapter->num_vfs); } else { } i = 0; goto ldv_59620; ldv_59619: ixgbe_vf_configuration(pdev, (unsigned int )(i | 268435456)); i = i + 1; ldv_59620: ; if ((unsigned int )i < adapter->num_vfs) { goto ldv_59619; } else { } } else { } if ((unsigned long )hw->mac.ops.set_fw_drv_ver != (unsigned long )((s32 (*)(struct ixgbe_hw * , u8 , u8 , u8 , u8 ))0)) { (*(hw->mac.ops.set_fw_drv_ver))(hw, 255, 255, 255, 255); } else { } ixgbe_add_sanmac_netdev(netdev); _dev_info((struct device const *)(& (adapter->pdev)->dev), "%s\n", (char *)(& ixgbe_default_device_descr)); tmp___20 = ixgbe_sysfs_init(adapter); if (tmp___20 != 0) { if (((int )adapter->msg_enable & 2) != 0) { netdev_err((struct net_device const *)adapter->netdev, "failed to allocate sysfs resources\n"); } else { } } else { } ixgbe_dbg_adapter_init(adapter); tmp___21 = ixgbe_mng_enabled(hw); if ((int )tmp___21) { tmp___22 = ixgbe_is_sfp(hw); if ((int )tmp___22) { if ((unsigned long )hw->mac.ops.setup_link != (unsigned long )((s32 (*)(struct ixgbe_hw * , ixgbe_link_speed , bool ))0)) { (*(hw->mac.ops.setup_link))(hw, 160U, 1); } else { } } else { } } else { } return (0); err_register: ixgbe_release_hw_control(adapter); ixgbe_clear_interrupt_scheme(adapter); err_sw_init: ixgbe_disable_sriov(adapter); adapter->flags2 = adapter->flags2 & 4294967279U; iounmap((void volatile *)adapter->io_addr); kfree((void const *)adapter->mac_table); err_ioremap: tmp___23 = test_and_set_bit(3L, (unsigned long volatile *)(& adapter->state)); disable_dev = tmp___23 == 0; ldv_free_netdev_60(netdev); err_alloc_etherdev: tmp___24 = pci_select_bars(pdev, 512UL); pci_release_selected_regions(pdev, tmp___24); err_pci_reg: ; err_dma: ; if ((unsigned long )adapter == (unsigned long )((struct ixgbe_adapter *)0) || (int )disable_dev) { pci_disable_device(pdev); } else { } return (err); } } static void ixgbe_remove(struct pci_dev *pdev ) { struct ixgbe_adapter *adapter ; void *tmp ; struct net_device *netdev ; bool disable_dev ; int tmp___0 ; int tmp___1 ; { tmp = pci_get_drvdata(pdev); adapter = (struct ixgbe_adapter *)tmp; if ((unsigned long )adapter == (unsigned long )((struct ixgbe_adapter *)0)) { return; } else { } netdev = adapter->netdev; ixgbe_dbg_adapter_exit(adapter); set_bit(4L, (unsigned long volatile *)(& adapter->state)); ldv_cancel_work_sync_61(& adapter->service_task); if ((adapter->flags & 256U) != 0U) { adapter->flags = adapter->flags & 4294967039U; dca_remove_requester(& pdev->dev); ixgbe_write_reg(& adapter->hw, 69748U, 1U); } else { } ixgbe_sysfs_exit(adapter); ixgbe_del_sanmac_netdev(netdev); if ((unsigned int )netdev->reg_state == 1U) { ldv_unregister_netdev_62(netdev); } else { } if (max_vfs != 0U) { ixgbe_disable_sriov(adapter); } else { } ixgbe_clear_interrupt_scheme(adapter); ixgbe_release_hw_control(adapter); kfree((void const *)adapter->ixgbe_ieee_pfc); kfree((void const *)adapter->ixgbe_ieee_ets); iounmap((void volatile *)adapter->io_addr); tmp___0 = pci_select_bars(pdev, 512UL); pci_release_selected_regions(pdev, tmp___0); _dev_info((struct device const *)(& (adapter->pdev)->dev), "complete\n"); kfree((void const *)adapter->mac_table); tmp___1 = test_and_set_bit(3L, (unsigned long volatile *)(& adapter->state)); disable_dev = tmp___1 == 0; ldv_free_netdev_63(netdev); pci_disable_pcie_error_reporting(pdev); if ((int )disable_dev) { pci_disable_device(pdev); } else { } return; } } static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev , pci_channel_state_t state ) { struct ixgbe_adapter *adapter ; void *tmp ; struct net_device *netdev ; struct ixgbe_hw *hw ; struct pci_dev *bdev ; struct pci_dev *vfdev ; u32 dw0 ; u32 dw1 ; u32 dw2 ; u32 dw3 ; int vf ; int pos ; u16 req_id ; u16 pf_func ; int tmp___0 ; bool tmp___1 ; unsigned int device_id ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; { tmp = pci_get_drvdata(pdev); adapter = (struct ixgbe_adapter *)tmp; netdev = adapter->netdev; hw = & adapter->hw; if ((unsigned int )adapter->hw.mac.type == 1U || adapter->num_vfs == 0U) { goto skip_bad_vf_detection; } else { } bdev = (pdev->bus)->self; goto ldv_59647; ldv_59646: bdev = (bdev->bus)->self; ldv_59647: ; if ((unsigned long )bdev != (unsigned long )((struct pci_dev *)0)) { tmp___0 = pci_pcie_type((struct pci_dev const *)bdev); if (tmp___0 != 4) { goto ldv_59646; } else { goto ldv_59648; } } else { } ldv_59648: ; if ((unsigned long )bdev == (unsigned long )((struct pci_dev *)0)) { goto skip_bad_vf_detection; } else { } pos = pci_find_ext_capability(bdev, 1); if (pos == 0) { goto skip_bad_vf_detection; } else { } dw0 = ixgbe_read_pci_cfg_dword(hw, (u32 )(pos + 28)); dw1 = ixgbe_read_pci_cfg_dword(hw, (u32 )(pos + 32)); dw2 = ixgbe_read_pci_cfg_dword(hw, (u32 )(pos + 36)); dw3 = ixgbe_read_pci_cfg_dword(hw, (u32 )(pos + 40)); tmp___1 = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp___1) { goto skip_bad_vf_detection; } else { } req_id = (u16 )(dw1 >> 16); if (((int )req_id & 128) == 0) { goto skip_bad_vf_detection; } else { } pf_func = (unsigned int )req_id & 1U; if ((((unsigned int )pf_func ^ pdev->devfn) & 1U) == 0U) { vf = ((int )req_id & 127) >> 1; dev_err((struct device const *)(& (adapter->pdev)->dev), "VF %d has caused a PCIe error\n", vf); dev_err((struct device const *)(& (adapter->pdev)->dev), "TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: %8.8x\tdw3: %8.8x\n", dw0, dw1, dw2, dw3); switch ((unsigned int )adapter->hw.mac.type) { case 2U: device_id = 4333U; goto ldv_59651; case 3U: device_id = 5397U; goto ldv_59651; case 4U: device_id = 5477U; goto ldv_59651; case 5U: device_id = 5544U; goto ldv_59651; default: device_id = 0U; goto ldv_59651; } ldv_59651: vfdev = pci_get_device(32902U, device_id, (struct pci_dev *)0); goto ldv_59658; ldv_59657: ; if (vfdev->devfn == ((unsigned int )req_id & 255U)) { goto ldv_59656; } else { } vfdev = pci_get_device(32902U, device_id, vfdev); ldv_59658: ; if ((unsigned long )vfdev != (unsigned long )((struct pci_dev *)0)) { goto ldv_59657; } else { } ldv_59656: ; if ((unsigned long )vfdev != (unsigned long )((struct pci_dev *)0)) { ixgbe_issue_vf_flr(adapter, vfdev); pci_dev_put(vfdev); } else { } pci_cleanup_aer_uncorrect_error_status(pdev); } else { } adapter->vferr_refcount = adapter->vferr_refcount + 1U; return (5U); skip_bad_vf_detection: tmp___2 = constant_test_bit(6L, (unsigned long const volatile *)(& adapter->state)); if (tmp___2 == 0) { return (4U); } else { } rtnl_lock(); netif_device_detach(netdev); if (state == 3U) { rtnl_unlock(); return (4U); } else { } tmp___3 = netif_running((struct net_device const *)netdev); if ((int )tmp___3) { ixgbe_down(adapter); } else { } tmp___4 = test_and_set_bit(3L, (unsigned long volatile *)(& adapter->state)); if (tmp___4 == 0) { pci_disable_device(pdev); } else { } rtnl_unlock(); return (3U); } } static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev ) { struct ixgbe_adapter *adapter ; void *tmp ; pci_ers_result_t result ; int err ; int tmp___0 ; { tmp = pci_get_drvdata(pdev); adapter = (struct ixgbe_adapter *)tmp; tmp___0 = pci_enable_device_mem(pdev); if (tmp___0 != 0) { if (((int )adapter->msg_enable & 2) != 0) { netdev_err((struct net_device const *)adapter->netdev, "Cannot re-enable PCI device after reset.\n"); } else { } result = 4U; } else { __asm__ volatile ("": : : "memory"); clear_bit(3L, (unsigned long volatile *)(& adapter->state)); adapter->hw.hw_addr = adapter->io_addr; pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); pci_wake_from_d3(pdev, 0); ixgbe_reset(adapter); ixgbe_write_reg(& adapter->hw, 22544U, 4294967295U); result = 5U; } err = pci_cleanup_aer_uncorrect_error_status(pdev); if (err != 0) { dev_err((struct device const *)(& (adapter->pdev)->dev), "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err); } else { } return (result); } } static void ixgbe_io_resume(struct pci_dev *pdev ) { struct ixgbe_adapter *adapter ; void *tmp ; struct net_device *netdev ; bool tmp___0 ; { tmp = pci_get_drvdata(pdev); adapter = (struct ixgbe_adapter *)tmp; netdev = adapter->netdev; if (adapter->vferr_refcount != 0U) { if ((int )adapter->msg_enable & 1) { netdev_info((struct net_device const *)adapter->netdev, "Resuming after VF err\n"); } else { } adapter->vferr_refcount = adapter->vferr_refcount - 1U; return; } else { } tmp___0 = netif_running((struct net_device const *)netdev); if ((int )tmp___0) { ixgbe_up(adapter); } else { } netif_device_attach(netdev); return; } } static struct pci_error_handlers const ixgbe_err_handler = {(pci_ers_result_t (*)(struct pci_dev * , enum pci_channel_state ))(& ixgbe_io_error_detected), 0, 0, & ixgbe_io_slot_reset, 0, & ixgbe_io_resume}; static struct pci_driver ixgbe_driver = {{0, 0}, (char const *)(& ixgbe_driver_name), (struct pci_device_id const *)(& ixgbe_pci_tbl), & ixgbe_probe, & ixgbe_remove, & ixgbe_suspend, 0, 0, & ixgbe_resume, & ixgbe_shutdown, & ixgbe_pci_sriov_configure, & ixgbe_err_handler, {0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{{{{{0}}, 0U, 0U, 0, {0, {0, 0}, 0, 0, 0UL}}}}, {0, 0}}}; static int ixgbe_init_module(void) { int ret ; { printk("\016ixgbe: %s - version %s\n", (char const *)(& ixgbe_driver_string), (char const *)(& ixgbe_driver_version)); printk("\016ixgbe: %s\n", (char const *)(& ixgbe_copyright)); ixgbe_dbg_init(); ret = ldv___pci_register_driver_64(& ixgbe_driver, & __this_module, "ixgbe"); if (ret != 0) { ixgbe_dbg_exit(); return (ret); } else { } dca_register_notify(& dca_notifier); return (0); } } static void ixgbe_exit_module(void) { { dca_unregister_notify(& dca_notifier); ldv_pci_unregister_driver_65(& ixgbe_driver); ixgbe_dbg_exit(); return; } } static int ixgbe_notify_dca(struct notifier_block *nb , unsigned long event , void *p ) { int ret_val ; { ret_val = driver_for_each_device(& ixgbe_driver.driver, (struct device *)0, (void *)(& event), & __ixgbe_notify_dca); return (ret_val != 0 ? 32770 : 0); } } extern int ldv_resume_early_36(void) ; int ldv_retval_2 ; int ldv_retval_5 ; extern int ldv_ndo_uninit_38(void) ; int ldv_retval_6 ; int ldv_retval_1 ; extern void ldv_initialize(void) ; extern int ldv_probe_37(void) ; int ldv_retval_9 ; extern int ldv_suspend_37(void) ; int ldv_retval_4 ; extern int ldv_release_37(void) ; extern int ldv_ndo_init_38(void) ; extern void ldv_check_final_state(void) ; int ldv_retval_8 ; extern int ldv_suspend_late_36(void) ; int ldv_retval_3 ; void activate_suitable_irq_4(int line , void *data ) { { if (ldv_irq_4_0 == 0) { ldv_irq_line_4_0 = line; ldv_irq_data_4_0 = data; ldv_irq_4_0 = 1; return; } else { } if (ldv_irq_4_1 == 0) { ldv_irq_line_4_1 = line; ldv_irq_data_4_1 = data; ldv_irq_4_1 = 1; return; } else { } if (ldv_irq_4_2 == 0) { ldv_irq_line_4_2 = line; ldv_irq_data_4_2 = data; ldv_irq_4_2 = 1; return; } else { } if (ldv_irq_4_3 == 0) { ldv_irq_line_4_3 = line; ldv_irq_data_4_3 = data; ldv_irq_4_3 = 1; return; } else { } return; } } int ldv_irq_3(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = ixgbe_intr(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_59732; default: ldv_stop(); } ldv_59732: ; } else { } return (state); } } void disable_suitable_irq_2(int line , void *data ) { { if (ldv_irq_2_0 != 0 && line == ldv_irq_line_2_0) { ldv_irq_2_0 = 0; return; } else { } if (ldv_irq_2_1 != 0 && line == ldv_irq_line_2_1) { ldv_irq_2_1 = 0; return; } else { } if (ldv_irq_2_2 != 0 && line == ldv_irq_line_2_2) { ldv_irq_2_2 = 0; return; } else { } if (ldv_irq_2_3 != 0 && line == ldv_irq_line_2_3) { ldv_irq_2_3 = 0; return; } else { } return; } } void activate_suitable_irq_3(int line , void *data ) { { if (ldv_irq_3_0 == 0) { ldv_irq_line_3_0 = line; ldv_irq_data_3_0 = data; ldv_irq_3_0 = 1; return; } else { } if (ldv_irq_3_1 == 0) { ldv_irq_line_3_1 = line; ldv_irq_data_3_1 = data; ldv_irq_3_1 = 1; return; } else { } if (ldv_irq_3_2 == 0) { ldv_irq_line_3_2 = line; ldv_irq_data_3_2 = data; ldv_irq_3_2 = 1; return; } else { } if (ldv_irq_3_3 == 0) { ldv_irq_line_3_3 = line; ldv_irq_data_3_3 = data; ldv_irq_3_3 = 1; return; } else { } return; } } void ldv_net_device_ops_38(void) { void *tmp ; { tmp = ldv_init_zalloc(3008UL); ixgbe_netdev_ops_group1 = (struct net_device *)tmp; return; } } int reg_check_1(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& ixgbe_msix_clean_rings)) { return (1); } else { } return (0); } } void choose_interrupt_4(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_4_0 = ldv_irq_4(ldv_irq_4_0, ldv_irq_line_4_0, ldv_irq_data_4_0); goto ldv_59754; case 1: ldv_irq_4_0 = ldv_irq_4(ldv_irq_4_1, ldv_irq_line_4_1, ldv_irq_data_4_1); goto ldv_59754; case 2: ldv_irq_4_0 = ldv_irq_4(ldv_irq_4_2, ldv_irq_line_4_2, ldv_irq_data_4_2); goto ldv_59754; case 3: ldv_irq_4_0 = ldv_irq_4(ldv_irq_4_3, ldv_irq_line_4_3, ldv_irq_data_4_3); goto ldv_59754; default: ldv_stop(); } ldv_59754: ; return; } } void invoke_work_8(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_8_0 == 2 || ldv_work_8_0 == 3) { ldv_work_8_0 = 4; ixgbe_service_task(ldv_work_struct_8_0); ldv_work_8_0 = 1; } else { } goto ldv_59765; case 1: ; if (ldv_work_8_1 == 2 || ldv_work_8_1 == 3) { ldv_work_8_1 = 4; ixgbe_service_task(ldv_work_struct_8_0); ldv_work_8_1 = 1; } else { } goto ldv_59765; case 2: ; if (ldv_work_8_2 == 2 || ldv_work_8_2 == 3) { ldv_work_8_2 = 4; ixgbe_service_task(ldv_work_struct_8_0); ldv_work_8_2 = 1; } else { } goto ldv_59765; case 3: ; if (ldv_work_8_3 == 2 || ldv_work_8_3 == 3) { ldv_work_8_3 = 4; ixgbe_service_task(ldv_work_struct_8_0); ldv_work_8_3 = 1; } else { } goto ldv_59765; default: ldv_stop(); } ldv_59765: ; return; } } int reg_timer_10(struct timer_list *timer , void (*function)(unsigned long ) , unsigned long data ) { { if ((unsigned long )function == (unsigned long )(& ixgbe_service_timer)) { activate_suitable_timer_10(timer, data); } else { } return (0); } } void ldv_timer_10(int state , struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; ixgbe_service_timer(timer->data); LDV_IN_INTERRUPT = 1; return; } } void activate_suitable_irq_2(int line , void *data ) { { if (ldv_irq_2_0 == 0) { ldv_irq_line_2_0 = line; ldv_irq_data_2_0 = data; ldv_irq_2_0 = 1; return; } else { } if (ldv_irq_2_1 == 0) { ldv_irq_line_2_1 = line; ldv_irq_data_2_1 = data; ldv_irq_2_1 = 1; return; } else { } if (ldv_irq_2_2 == 0) { ldv_irq_line_2_2 = line; ldv_irq_data_2_2 = data; ldv_irq_2_2 = 1; return; } else { } if (ldv_irq_2_3 == 0) { ldv_irq_line_2_3 = line; ldv_irq_data_2_3 = data; ldv_irq_2_3 = 1; return; } else { } return; } } void work_init_8(void) { { ldv_work_8_0 = 0; ldv_work_8_1 = 0; ldv_work_8_2 = 0; ldv_work_8_3 = 0; return; } } void activate_suitable_timer_10(struct timer_list *timer , unsigned long data ) { { if (ldv_timer_10_0 == 0 || ldv_timer_10_0 == 2) { ldv_timer_list_10_0 = timer; ldv_timer_list_10_0->data = data; ldv_timer_10_0 = 1; return; } else { } if (ldv_timer_10_1 == 0 || ldv_timer_10_1 == 2) { ldv_timer_list_10_1 = timer; ldv_timer_list_10_1->data = data; ldv_timer_10_1 = 1; return; } else { } if (ldv_timer_10_2 == 0 || ldv_timer_10_2 == 2) { ldv_timer_list_10_2 = timer; ldv_timer_list_10_2->data = data; ldv_timer_10_2 = 1; return; } else { } if (ldv_timer_10_3 == 0 || ldv_timer_10_3 == 2) { ldv_timer_list_10_3 = timer; ldv_timer_list_10_3->data = data; ldv_timer_10_3 = 1; return; } else { } return; } } void choose_interrupt_1(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_1_0 = ldv_irq_1(ldv_irq_1_0, ldv_irq_line_1_0, ldv_irq_data_1_0); goto ldv_59795; case 1: ldv_irq_1_0 = ldv_irq_1(ldv_irq_1_1, ldv_irq_line_1_1, ldv_irq_data_1_1); goto ldv_59795; case 2: ldv_irq_1_0 = ldv_irq_1(ldv_irq_1_2, ldv_irq_line_1_2, ldv_irq_data_1_2); goto ldv_59795; case 3: ldv_irq_1_0 = ldv_irq_1(ldv_irq_1_3, ldv_irq_line_1_3, ldv_irq_data_1_3); goto ldv_59795; default: ldv_stop(); } ldv_59795: ; return; } } int reg_check_2(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& ixgbe_msix_other)) { return (1); } else { } return (0); } } void call_and_disable_work_8(struct work_struct *work ) { { if ((ldv_work_8_0 == 2 || ldv_work_8_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_8_0) { ixgbe_service_task(work); ldv_work_8_0 = 1; return; } else { } if ((ldv_work_8_1 == 2 || ldv_work_8_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_8_1) { ixgbe_service_task(work); ldv_work_8_1 = 1; return; } else { } if ((ldv_work_8_2 == 2 || ldv_work_8_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_8_2) { ixgbe_service_task(work); ldv_work_8_2 = 1; return; } else { } if ((ldv_work_8_3 == 2 || ldv_work_8_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_8_3) { ixgbe_service_task(work); ldv_work_8_3 = 1; return; } else { } return; } } int reg_check_3(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& ixgbe_intr)) { return (1); } else { } return (0); } } void disable_work_8(struct work_struct *work ) { { if ((ldv_work_8_0 == 3 || ldv_work_8_0 == 2) && (unsigned long )ldv_work_struct_8_0 == (unsigned long )work) { ldv_work_8_0 = 1; } else { } if ((ldv_work_8_1 == 3 || ldv_work_8_1 == 2) && (unsigned long )ldv_work_struct_8_1 == (unsigned long )work) { ldv_work_8_1 = 1; } else { } if ((ldv_work_8_2 == 3 || ldv_work_8_2 == 2) && (unsigned long )ldv_work_struct_8_2 == (unsigned long )work) { ldv_work_8_2 = 1; } else { } if ((ldv_work_8_3 == 3 || ldv_work_8_3 == 2) && (unsigned long )ldv_work_struct_8_3 == (unsigned long )work) { ldv_work_8_3 = 1; } else { } return; } } int ldv_irq_4(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = ixgbe_intr(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_59825; default: ldv_stop(); } ldv_59825: ; } else { } return (state); } } void activate_pending_timer_10(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_10_0 == (unsigned long )timer) { if (ldv_timer_10_0 == 2 || pending_flag != 0) { ldv_timer_list_10_0 = timer; ldv_timer_list_10_0->data = data; ldv_timer_10_0 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_10_1 == (unsigned long )timer) { if (ldv_timer_10_1 == 2 || pending_flag != 0) { ldv_timer_list_10_1 = timer; ldv_timer_list_10_1->data = data; ldv_timer_10_1 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_10_2 == (unsigned long )timer) { if (ldv_timer_10_2 == 2 || pending_flag != 0) { ldv_timer_list_10_2 = timer; ldv_timer_list_10_2->data = data; ldv_timer_10_2 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_10_3 == (unsigned long )timer) { if (ldv_timer_10_3 == 2 || pending_flag != 0) { ldv_timer_list_10_3 = timer; ldv_timer_list_10_3->data = data; ldv_timer_10_3 = 1; } else { } return; } else { } activate_suitable_timer_10(timer, data); return; } } void call_and_disable_all_8(int state ) { { if (ldv_work_8_0 == state) { call_and_disable_work_8(ldv_work_struct_8_0); } else { } if (ldv_work_8_1 == state) { call_and_disable_work_8(ldv_work_struct_8_1); } else { } if (ldv_work_8_2 == state) { call_and_disable_work_8(ldv_work_struct_8_2); } else { } if (ldv_work_8_3 == state) { call_and_disable_work_8(ldv_work_struct_8_3); } else { } return; } } void disable_suitable_irq_1(int line , void *data ) { { if (ldv_irq_1_0 != 0 && line == ldv_irq_line_1_0) { ldv_irq_1_0 = 0; return; } else { } if (ldv_irq_1_1 != 0 && line == ldv_irq_line_1_1) { ldv_irq_1_1 = 0; return; } else { } if (ldv_irq_1_2 != 0 && line == ldv_irq_line_1_2) { ldv_irq_1_2 = 0; return; } else { } if (ldv_irq_1_3 != 0 && line == ldv_irq_line_1_3) { ldv_irq_1_3 = 0; return; } else { } return; } } void activate_suitable_irq_1(int line , void *data ) { { if (ldv_irq_1_0 == 0) { ldv_irq_line_1_0 = line; ldv_irq_data_1_0 = data; ldv_irq_1_0 = 1; return; } else { } if (ldv_irq_1_1 == 0) { ldv_irq_line_1_1 = line; ldv_irq_data_1_1 = data; ldv_irq_1_1 = 1; return; } else { } if (ldv_irq_1_2 == 0) { ldv_irq_line_1_2 = line; ldv_irq_data_1_2 = data; ldv_irq_1_2 = 1; return; } else { } if (ldv_irq_1_3 == 0) { ldv_irq_line_1_3 = line; ldv_irq_data_1_3 = data; ldv_irq_1_3 = 1; return; } else { } return; } } int reg_check_4(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& ixgbe_intr)) { return (1); } else { } return (0); } } void activate_work_8(struct work_struct *work , int state ) { { if (ldv_work_8_0 == 0) { ldv_work_struct_8_0 = work; ldv_work_8_0 = state; return; } else { } if (ldv_work_8_1 == 0) { ldv_work_struct_8_1 = work; ldv_work_8_1 = state; return; } else { } if (ldv_work_8_2 == 0) { ldv_work_struct_8_2 = work; ldv_work_8_2 = state; return; } else { } if (ldv_work_8_3 == 0) { ldv_work_struct_8_3 = work; ldv_work_8_3 = state; return; } else { } return; } } int ldv_irq_2(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = ixgbe_msix_other(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_59859; default: ldv_stop(); } ldv_59859: ; } else { } return (state); } } void ldv_pci_driver_36(void) { void *tmp ; { tmp = ldv_init_zalloc(2976UL); ixgbe_driver_group1 = (struct pci_dev *)tmp; return; } } void choose_interrupt_2(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_2_0 = ldv_irq_2(ldv_irq_2_0, ldv_irq_line_2_0, ldv_irq_data_2_0); goto ldv_59868; case 1: ldv_irq_2_0 = ldv_irq_2(ldv_irq_2_1, ldv_irq_line_2_1, ldv_irq_data_2_1); goto ldv_59868; case 2: ldv_irq_2_0 = ldv_irq_2(ldv_irq_2_2, ldv_irq_line_2_2, ldv_irq_data_2_2); goto ldv_59868; case 3: ldv_irq_2_0 = ldv_irq_2(ldv_irq_2_3, ldv_irq_line_2_3, ldv_irq_data_2_3); goto ldv_59868; default: ldv_stop(); } ldv_59868: ; return; } } void ldv_initialize_pci_error_handlers_37(void) { void *tmp ; { tmp = __VERIFIER_nondet_pointer(); ixgbe_err_handler_group0 = (struct pci_dev *)tmp; return; } } void disable_suitable_timer_10(struct timer_list *timer ) { { if (ldv_timer_10_0 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_10_0) { ldv_timer_10_0 = 0; return; } else { } if (ldv_timer_10_1 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_10_1) { ldv_timer_10_1 = 0; return; } else { } if (ldv_timer_10_2 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_10_2) { ldv_timer_10_2 = 0; return; } else { } if (ldv_timer_10_3 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_10_3) { ldv_timer_10_3 = 0; return; } else { } return; } } void disable_suitable_irq_4(int line , void *data ) { { if (ldv_irq_4_0 != 0 && line == ldv_irq_line_4_0) { ldv_irq_4_0 = 0; return; } else { } if (ldv_irq_4_1 != 0 && line == ldv_irq_line_4_1) { ldv_irq_4_1 = 0; return; } else { } if (ldv_irq_4_2 != 0 && line == ldv_irq_line_4_2) { ldv_irq_4_2 = 0; return; } else { } if (ldv_irq_4_3 != 0 && line == ldv_irq_line_4_3) { ldv_irq_4_3 = 0; return; } else { } return; } } void disable_suitable_irq_3(int line , void *data ) { { if (ldv_irq_3_0 != 0 && line == ldv_irq_line_3_0) { ldv_irq_3_0 = 0; return; } else { } if (ldv_irq_3_1 != 0 && line == ldv_irq_line_3_1) { ldv_irq_3_1 = 0; return; } else { } if (ldv_irq_3_2 != 0 && line == ldv_irq_line_3_2) { ldv_irq_3_2 = 0; return; } else { } if (ldv_irq_3_3 != 0 && line == ldv_irq_line_3_3) { ldv_irq_3_3 = 0; return; } else { } return; } } void timer_init_10(void) { { ldv_timer_10_0 = 0; ldv_timer_10_1 = 0; ldv_timer_10_2 = 0; ldv_timer_10_3 = 0; return; } } int ldv_irq_1(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = ixgbe_msix_clean_rings(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_59897; default: ldv_stop(); } ldv_59897: ; } else { } return (state); } } void choose_timer_10(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_timer_10_0 == 1) { ldv_timer_10_0 = 2; ldv_timer_10(ldv_timer_10_0, ldv_timer_list_10_0); } else { } goto ldv_59905; case 1: ; if (ldv_timer_10_1 == 1) { ldv_timer_10_1 = 2; ldv_timer_10(ldv_timer_10_1, ldv_timer_list_10_1); } else { } goto ldv_59905; case 2: ; if (ldv_timer_10_2 == 1) { ldv_timer_10_2 = 2; ldv_timer_10(ldv_timer_10_2, ldv_timer_list_10_2); } else { } goto ldv_59905; case 3: ; if (ldv_timer_10_3 == 1) { ldv_timer_10_3 = 2; ldv_timer_10(ldv_timer_10_3, ldv_timer_list_10_3); } else { } goto ldv_59905; default: ldv_stop(); } ldv_59905: ; return; } } void choose_interrupt_3(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_3_0 = ldv_irq_3(ldv_irq_3_0, ldv_irq_line_3_0, ldv_irq_data_3_0); goto ldv_59914; case 1: ldv_irq_3_0 = ldv_irq_3(ldv_irq_3_1, ldv_irq_line_3_1, ldv_irq_data_3_1); goto ldv_59914; case 2: ldv_irq_3_0 = ldv_irq_3(ldv_irq_3_2, ldv_irq_line_3_2, ldv_irq_data_3_2); goto ldv_59914; case 3: ldv_irq_3_0 = ldv_irq_3(ldv_irq_3_3, ldv_irq_line_3_3, ldv_irq_data_3_3); goto ldv_59914; default: ldv_stop(); } ldv_59914: ; return; } } void ldv_main_exported_13(void) ; void ldv_main_exported_33(void) ; void ldv_main_exported_32(void) ; void ldv_main_exported_34(void) ; void ldv_main_exported_31(void) ; void ldv_main_exported_25(void) ; void ldv_main_exported_22(void) ; void ldv_main_exported_24(void) ; void ldv_main_exported_23(void) ; void ldv_main_exported_35(void) ; void ldv_main_exported_26(void) ; void ldv_main_exported_11(void) ; void ldv_main_exported_12(void) ; void ldv_main_exported_21(void) ; void ldv_main_exported_17(void) ; void ldv_main_exported_20(void) ; void ldv_main_exported_15(void) ; void ldv_main_exported_14(void) ; void ldv_main_exported_18(void) ; void ldv_main_exported_16(void) ; void ldv_main_exported_19(void) ; void ldv_main_exported_27(void) ; void ldv_main_exported_28(void) ; void ldv_main_exported_30(void) ; void ldv_main_exported_29(void) ; int main(void) { void *ldvarg431 ; void *tmp ; unsigned long ldvarg432 ; struct notifier_block *ldvarg430 ; void *tmp___0 ; int ldvarg433 ; pm_message_t ldvarg435 ; struct pci_device_id *ldvarg434 ; void *tmp___1 ; u32 ldvarg469 ; u16 ldvarg458 ; u64 *ldvarg463 ; void *tmp___2 ; struct netdev_fcoe_hbainfo *ldvarg487 ; void *tmp___3 ; struct ifreq *ldvarg500 ; void *tmp___4 ; __be16 ldvarg476 ; u16 ldvarg479 ; struct scatterlist *ldvarg473 ; void *tmp___5 ; int ldvarg485 ; unsigned int ldvarg464 ; unsigned int ldvarg472 ; struct napi_struct *ldvarg461 ; void *tmp___6 ; bool ldvarg452 ; u16 (*ldvarg449)(struct net_device * , struct sk_buff * ) ; struct nlattr **ldvarg457 ; void *tmp___7 ; void *ldvarg450 ; void *tmp___8 ; netdev_features_t ldvarg446 ; void *ldvarg488 ; void *tmp___9 ; struct sk_buff *ldvarg468 ; void *tmp___10 ; struct sk_buff *ldvarg496 ; void *tmp___11 ; u8 *ldvarg484 ; void *tmp___12 ; int ldvarg483 ; u16 ldvarg490 ; struct rtnl_link_stats64 *ldvarg443 ; void *tmp___13 ; int ldvarg499 ; int ldvarg498 ; netdev_features_t ldvarg493 ; __be16 ldvarg480 ; u8 ldvarg489 ; void *ldvarg445 ; void *tmp___14 ; u16 ldvarg466 ; sa_family_t ldvarg448 ; struct ifla_vf_info *ldvarg497 ; void *tmp___15 ; u16 ldvarg474 ; struct scatterlist *ldvarg465 ; void *tmp___16 ; int ldvarg495 ; int ldvarg482 ; u16 ldvarg477 ; __be16 ldvarg459 ; u16 ldvarg475 ; __be16 ldvarg447 ; int ldvarg462 ; int ldvarg491 ; struct nlmsghdr *ldvarg478 ; void *tmp___17 ; sa_family_t ldvarg460 ; u32 ldvarg471 ; bool ldvarg494 ; struct sk_buff *ldvarg451 ; void *tmp___18 ; int ldvarg486 ; unsigned char *ldvarg455 ; void *tmp___19 ; u16 ldvarg454 ; int ldvarg481 ; int ldvarg467 ; struct ndmsg *ldvarg456 ; void *tmp___20 ; u16 ldvarg492 ; int ldvarg453 ; u8 ldvarg444 ; u32 ldvarg470 ; enum pci_channel_state ldvarg553 ; int tmp___21 ; int tmp___22 ; int tmp___23 ; int tmp___24 ; int tmp___25 ; int tmp___26 ; { tmp = ldv_init_zalloc(1UL); ldvarg431 = tmp; tmp___0 = ldv_init_zalloc(24UL); ldvarg430 = (struct notifier_block *)tmp___0; tmp___1 = ldv_init_zalloc(32UL); ldvarg434 = (struct pci_device_id *)tmp___1; tmp___2 = ldv_init_zalloc(8UL); ldvarg463 = (u64 *)tmp___2; tmp___3 = ldv_init_zalloc(896UL); ldvarg487 = (struct netdev_fcoe_hbainfo *)tmp___3; tmp___4 = ldv_init_zalloc(40UL); ldvarg500 = (struct ifreq *)tmp___4; tmp___5 = ldv_init_zalloc(40UL); ldvarg473 = (struct scatterlist *)tmp___5; tmp___6 = ldv_init_zalloc(280UL); ldvarg461 = (struct napi_struct *)tmp___6; tmp___7 = ldv_init_zalloc(8UL); ldvarg457 = (struct nlattr **)tmp___7; tmp___8 = ldv_init_zalloc(1UL); ldvarg450 = tmp___8; tmp___9 = ldv_init_zalloc(1UL); ldvarg488 = tmp___9; tmp___10 = ldv_init_zalloc(232UL); ldvarg468 = (struct sk_buff *)tmp___10; tmp___11 = ldv_init_zalloc(232UL); ldvarg496 = (struct sk_buff *)tmp___11; tmp___12 = ldv_init_zalloc(1UL); ldvarg484 = (u8 *)tmp___12; tmp___13 = ldv_init_zalloc(184UL); ldvarg443 = (struct rtnl_link_stats64 *)tmp___13; tmp___14 = ldv_init_zalloc(1UL); ldvarg445 = tmp___14; tmp___15 = ldv_init_zalloc(64UL); ldvarg497 = (struct ifla_vf_info *)tmp___15; tmp___16 = ldv_init_zalloc(40UL); ldvarg465 = (struct scatterlist *)tmp___16; tmp___17 = ldv_init_zalloc(16UL); ldvarg478 = (struct nlmsghdr *)tmp___17; tmp___18 = ldv_init_zalloc(232UL); ldvarg451 = (struct sk_buff *)tmp___18; tmp___19 = ldv_init_zalloc(1UL); ldvarg455 = (unsigned char *)tmp___19; tmp___20 = ldv_init_zalloc(12UL); ldvarg456 = (struct ndmsg *)tmp___20; ldv_initialize(); ldv_memset((void *)(& ldvarg432), 0, 8UL); ldv_memset((void *)(& ldvarg433), 0, 4UL); ldv_memset((void *)(& ldvarg435), 0, 4UL); ldv_memset((void *)(& ldvarg469), 0, 4UL); ldv_memset((void *)(& ldvarg458), 0, 2UL); ldv_memset((void *)(& ldvarg476), 0, 2UL); ldv_memset((void *)(& ldvarg479), 0, 2UL); ldv_memset((void *)(& ldvarg485), 0, 4UL); ldv_memset((void *)(& ldvarg464), 0, 4UL); ldv_memset((void *)(& ldvarg472), 0, 4UL); ldv_memset((void *)(& ldvarg452), 0, 1UL); ldv_memset((void *)(& ldvarg449), 0, 8UL); ldv_memset((void *)(& ldvarg446), 0, 8UL); ldv_memset((void *)(& ldvarg483), 0, 4UL); ldv_memset((void *)(& ldvarg490), 0, 2UL); ldv_memset((void *)(& ldvarg499), 0, 4UL); ldv_memset((void *)(& ldvarg498), 0, 4UL); ldv_memset((void *)(& ldvarg493), 0, 8UL); ldv_memset((void *)(& ldvarg480), 0, 2UL); ldv_memset((void *)(& ldvarg489), 0, 1UL); ldv_memset((void *)(& ldvarg466), 0, 2UL); ldv_memset((void *)(& ldvarg448), 0, 2UL); ldv_memset((void *)(& ldvarg474), 0, 2UL); ldv_memset((void *)(& ldvarg495), 0, 4UL); ldv_memset((void *)(& ldvarg482), 0, 4UL); ldv_memset((void *)(& ldvarg477), 0, 2UL); ldv_memset((void *)(& ldvarg459), 0, 2UL); ldv_memset((void *)(& ldvarg475), 0, 2UL); ldv_memset((void *)(& ldvarg447), 0, 2UL); ldv_memset((void *)(& ldvarg462), 0, 4UL); ldv_memset((void *)(& ldvarg491), 0, 4UL); ldv_memset((void *)(& ldvarg460), 0, 2UL); ldv_memset((void *)(& ldvarg471), 0, 4UL); ldv_memset((void *)(& ldvarg494), 0, 1UL); ldv_memset((void *)(& ldvarg486), 0, 4UL); ldv_memset((void *)(& ldvarg454), 0, 2UL); ldv_memset((void *)(& ldvarg481), 0, 4UL); ldv_memset((void *)(& ldvarg467), 0, 4UL); ldv_memset((void *)(& ldvarg492), 0, 2UL); ldv_memset((void *)(& ldvarg453), 0, 4UL); ldv_memset((void *)(& ldvarg444), 0, 1UL); ldv_memset((void *)(& ldvarg470), 0, 4UL); ldv_memset((void *)(& ldvarg553), 0, 4UL); ldv_state_variable_33 = 0; ldv_state_variable_32 = 0; ldv_state_variable_21 = 0; ldv_state_variable_7 = 1; ldv_state_variable_26 = 0; ldv_state_variable_17 = 0; ldv_state_variable_2 = 1; ldv_state_variable_1 = 1; ldv_state_variable_18 = 0; ldv_state_variable_30 = 0; ldv_state_variable_16 = 0; ldv_state_variable_27 = 0; ldv_state_variable_25 = 0; ldv_state_variable_28 = 0; ldv_state_variable_20 = 0; ldv_state_variable_14 = 0; ldv_state_variable_24 = 0; timer_init_10(); ldv_state_variable_10 = 1; ldv_state_variable_31 = 0; ldv_state_variable_35 = 0; ldv_state_variable_11 = 0; ldv_state_variable_22 = 0; ref_cnt = 0; ldv_state_variable_0 = 1; ldv_state_variable_13 = 0; ldv_state_variable_23 = 0; ldv_state_variable_29 = 0; ldv_state_variable_6 = 1; ldv_state_variable_39 = 0; ldv_state_variable_36 = 0; ldv_state_variable_3 = 1; work_init_9(); ldv_state_variable_9 = 1; ldv_state_variable_12 = 0; ldv_state_variable_15 = 0; ldv_state_variable_38 = 0; work_init_8(); ldv_state_variable_8 = 1; ldv_state_variable_4 = 1; ldv_state_variable_34 = 0; ldv_state_variable_37 = 0; ldv_state_variable_19 = 0; ldv_state_variable_5 = 1; ldv_60161: tmp___21 = __VERIFIER_nondet_int(); switch (tmp___21) { case 0: ; if (ldv_state_variable_33 != 0) { ldv_main_exported_33(); } else { } goto ldv_60052; case 1: ; if (ldv_state_variable_32 != 0) { ldv_main_exported_32(); } else { } goto ldv_60052; case 2: ; if (ldv_state_variable_21 != 0) { ldv_main_exported_21(); } else { } goto ldv_60052; case 3: ; goto ldv_60052; case 4: ; if (ldv_state_variable_26 != 0) { ldv_main_exported_26(); } else { } goto ldv_60052; case 5: ; if (ldv_state_variable_17 != 0) { ldv_main_exported_17(); } else { } goto ldv_60052; case 6: ; if (ldv_state_variable_2 != 0) { choose_interrupt_2(); } else { } goto ldv_60052; case 7: ; if (ldv_state_variable_1 != 0) { choose_interrupt_1(); } else { } goto ldv_60052; case 8: ; if (ldv_state_variable_18 != 0) { ldv_main_exported_18(); } else { } goto ldv_60052; case 9: ; if (ldv_state_variable_30 != 0) { ldv_main_exported_30(); } else { } goto ldv_60052; case 10: ; if (ldv_state_variable_16 != 0) { ldv_main_exported_16(); } else { } goto ldv_60052; case 11: ; if (ldv_state_variable_27 != 0) { ldv_main_exported_27(); } else { } goto ldv_60052; case 12: ; if (ldv_state_variable_25 != 0) { ldv_main_exported_25(); } else { } goto ldv_60052; case 13: ; if (ldv_state_variable_28 != 0) { ldv_main_exported_28(); } else { } goto ldv_60052; case 14: ; if (ldv_state_variable_20 != 0) { ldv_main_exported_20(); } else { } goto ldv_60052; case 15: ; if (ldv_state_variable_14 != 0) { ldv_main_exported_14(); } else { } goto ldv_60052; case 16: ; if (ldv_state_variable_24 != 0) { ldv_main_exported_24(); } else { } goto ldv_60052; case 17: ; if (ldv_state_variable_10 != 0) { choose_timer_10(); } else { } goto ldv_60052; case 18: ; if (ldv_state_variable_31 != 0) { ldv_main_exported_31(); } else { } goto ldv_60052; case 19: ; if (ldv_state_variable_35 != 0) { ldv_main_exported_35(); } else { } goto ldv_60052; case 20: ; if (ldv_state_variable_11 != 0) { ldv_main_exported_11(); } else { } goto ldv_60052; case 21: ; if (ldv_state_variable_22 != 0) { ldv_main_exported_22(); } else { } goto ldv_60052; case 22: ; if (ldv_state_variable_0 != 0) { tmp___22 = __VERIFIER_nondet_int(); switch (tmp___22) { case 0: ; if (ldv_state_variable_0 == 3 && ref_cnt == 0) { ixgbe_exit_module(); ldv_state_variable_0 = 2; goto ldv_final; } else { } goto ldv_60077; case 1: ; if (ldv_state_variable_0 == 1) { ldv_retval_1 = ixgbe_init_module(); if (ldv_retval_1 == 0) { ldv_state_variable_0 = 3; ldv_state_variable_31 = 1; ldv_state_variable_19 = 1; ldv_initialize_ixgbe_eeprom_operations_19(); ldv_state_variable_24 = 1; ldv_initialize_ixgbe_eeprom_operations_24(); ldv_state_variable_37 = 1; ldv_initialize_pci_error_handlers_37(); ldv_state_variable_34 = 1; ldv_initialize_ixgbe_mac_operations_34(); ldv_state_variable_20 = 1; ldv_initialize_ixgbe_mac_operations_20(); ldv_state_variable_15 = 1; ldv_state_variable_14 = 1; ldv_state_variable_12 = 1; ldv_file_operations_12(); ldv_state_variable_28 = 1; ldv_initialize_ixgbe_phy_operations_28(); ldv_state_variable_39 = 1; ldv_state_variable_25 = 1; ldv_initialize_ixgbe_mac_operations_25(); ldv_state_variable_27 = 1; ldv_state_variable_29 = 1; ldv_initialize_ixgbe_eeprom_operations_29(); ldv_state_variable_16 = 1; ldv_initialize_ixgbe_phy_operations_16(); ldv_state_variable_13 = 1; ldv_initialize_dcbnl_rtnl_ops_13(); ldv_state_variable_23 = 1; ldv_initialize_ixgbe_phy_operations_23(); ldv_state_variable_30 = 1; ldv_initialize_ixgbe_mac_operations_30(); ldv_state_variable_18 = 1; ldv_initialize_ixgbe_eeprom_operations_18(); ldv_state_variable_22 = 1; ldv_state_variable_17 = 1; ldv_initialize_ixgbe_phy_operations_17(); ldv_state_variable_26 = 1; ldv_initialize_ixgbe_mbx_operations_26(); ldv_state_variable_21 = 1; ldv_initialize_ixgbe_mac_operations_21(); ldv_state_variable_11 = 1; ldv_file_operations_11(); ldv_state_variable_32 = 1; ldv_initialize_ixgbe_phy_operations_32(); ldv_state_variable_33 = 1; ldv_initialize_ixgbe_eeprom_operations_33(); ldv_state_variable_35 = 1; ldv_initialize_ethtool_ops_35(); } else { } if (ldv_retval_1 != 0) { ldv_state_variable_0 = 2; goto ldv_final; } else { } } else { } goto ldv_60077; default: ldv_stop(); } ldv_60077: ; } else { } goto ldv_60052; case 23: ; if (ldv_state_variable_13 != 0) { ldv_main_exported_13(); } else { } goto ldv_60052; case 24: ; if (ldv_state_variable_23 != 0) { ldv_main_exported_23(); } else { } goto ldv_60052; case 25: ; if (ldv_state_variable_29 != 0) { ldv_main_exported_29(); } else { } goto ldv_60052; case 26: ; goto ldv_60052; case 27: ; if (ldv_state_variable_39 != 0) { tmp___23 = __VERIFIER_nondet_int(); switch (tmp___23) { case 0: ; if (ldv_state_variable_39 == 1) { ixgbe_notify_dca(ldvarg430, ldvarg432, ldvarg431); ldv_state_variable_39 = 1; } else { } goto ldv_60086; default: ldv_stop(); } ldv_60086: ; } else { } goto ldv_60052; case 28: ; if (ldv_state_variable_36 != 0) { tmp___24 = __VERIFIER_nondet_int(); switch (tmp___24) { case 0: ; if (ldv_state_variable_36 == 2 && pci_counter == 0) { ldv_retval_6 = ixgbe_suspend(ixgbe_driver_group1, ldvarg435); if (ldv_retval_6 == 0) { ldv_state_variable_36 = 3; } else { } } else { } goto ldv_60090; case 1: ; if (ldv_state_variable_36 == 4) { ldv_retval_5 = ixgbe_resume(ixgbe_driver_group1); if (ldv_retval_5 == 0) { ldv_state_variable_36 = 2; } else { } } else { } if (ldv_state_variable_36 == 3) { ldv_retval_5 = ixgbe_resume(ixgbe_driver_group1); if (ldv_retval_5 == 0) { ldv_state_variable_36 = 2; } else { } } else { } if (ldv_state_variable_36 == 5) { ldv_retval_5 = ixgbe_resume(ixgbe_driver_group1); if (ldv_retval_5 == 0) { ldv_state_variable_36 = 2; } else { } } else { } goto ldv_60090; case 2: ; if (ldv_state_variable_36 == 1) { ldv_retval_4 = ixgbe_probe(ixgbe_driver_group1, (struct pci_device_id const *)ldvarg434); if (ldv_retval_4 == 0) { ldv_state_variable_36 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_60090; case 3: ; if (ldv_state_variable_36 == 4) { ixgbe_shutdown(ixgbe_driver_group1); ldv_state_variable_36 = 4; } else { } if (ldv_state_variable_36 == 3) { ixgbe_shutdown(ixgbe_driver_group1); ldv_state_variable_36 = 3; } else { } if (ldv_state_variable_36 == 2) { ixgbe_shutdown(ixgbe_driver_group1); ldv_state_variable_36 = 2; } else { } if (ldv_state_variable_36 == 5) { ixgbe_shutdown(ixgbe_driver_group1); ldv_state_variable_36 = 5; } else { } goto ldv_60090; case 4: ; if (ldv_state_variable_36 == 4) { ixgbe_pci_sriov_configure(ixgbe_driver_group1, ldvarg433); ldv_state_variable_36 = 4; } else { } if (ldv_state_variable_36 == 1) { ixgbe_pci_sriov_configure(ixgbe_driver_group1, ldvarg433); ldv_state_variable_36 = 1; } else { } if (ldv_state_variable_36 == 3) { ixgbe_pci_sriov_configure(ixgbe_driver_group1, ldvarg433); ldv_state_variable_36 = 3; } else { } if (ldv_state_variable_36 == 2) { ixgbe_pci_sriov_configure(ixgbe_driver_group1, ldvarg433); ldv_state_variable_36 = 2; } else { } if (ldv_state_variable_36 == 5) { ixgbe_pci_sriov_configure(ixgbe_driver_group1, ldvarg433); ldv_state_variable_36 = 5; } else { } goto ldv_60090; case 5: ; if (ldv_state_variable_36 == 4) { ixgbe_remove(ixgbe_driver_group1); ldv_state_variable_36 = 1; } else { } if (ldv_state_variable_36 == 3) { ixgbe_remove(ixgbe_driver_group1); ldv_state_variable_36 = 1; } else { } if (ldv_state_variable_36 == 2) { ixgbe_remove(ixgbe_driver_group1); ldv_state_variable_36 = 1; } else { } if (ldv_state_variable_36 == 5) { ixgbe_remove(ixgbe_driver_group1); ldv_state_variable_36 = 1; } else { } goto ldv_60090; case 6: ; if (ldv_state_variable_36 == 3) { ldv_retval_3 = ldv_suspend_late_36(); if (ldv_retval_3 == 0) { ldv_state_variable_36 = 4; } else { } } else { } goto ldv_60090; case 7: ; if (ldv_state_variable_36 == 4) { ldv_retval_2 = ldv_resume_early_36(); if (ldv_retval_2 == 0) { ldv_state_variable_36 = 5; } else { } } else { } if (ldv_state_variable_36 == 3) { ldv_retval_2 = ldv_resume_early_36(); if (ldv_retval_2 == 0) { ldv_state_variable_36 = 5; } else { } } else { } goto ldv_60090; default: ldv_stop(); } ldv_60090: ; } else { } goto ldv_60052; case 29: ; if (ldv_state_variable_3 != 0) { choose_interrupt_3(); } else { } goto ldv_60052; case 30: ; goto ldv_60052; case 31: ; if (ldv_state_variable_12 != 0) { ldv_main_exported_12(); } else { } goto ldv_60052; case 32: ; if (ldv_state_variable_15 != 0) { ldv_main_exported_15(); } else { } goto ldv_60052; case 33: ; if (ldv_state_variable_38 != 0) { tmp___25 = __VERIFIER_nondet_int(); switch (tmp___25) { case 0: ; if (ldv_state_variable_38 == 1) { ixgbe_ioctl(ixgbe_netdev_ops_group1, ldvarg500, ldvarg499); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_ioctl(ixgbe_netdev_ops_group1, ldvarg500, ldvarg499); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_ioctl(ixgbe_netdev_ops_group1, ldvarg500, ldvarg499); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 1: ; if (ldv_state_variable_38 == 1) { ixgbe_ndo_get_vf_config(ixgbe_netdev_ops_group1, ldvarg498, ldvarg497); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_ndo_get_vf_config(ixgbe_netdev_ops_group1, ldvarg498, ldvarg497); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_ndo_get_vf_config(ixgbe_netdev_ops_group1, ldvarg498, ldvarg497); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 2: ; if (ldv_state_variable_38 == 2) { ldv_retval_9 = ixgbe_open(ixgbe_netdev_ops_group1); if (ldv_retval_9 == 0) { ldv_state_variable_38 = 3; } else { } } else { } goto ldv_60105; case 3: ; if (ldv_state_variable_38 == 3) { ixgbe_xmit_frame(ldvarg496, ixgbe_netdev_ops_group1); ldv_state_variable_38 = 3; } else { } goto ldv_60105; case 4: ; if (ldv_state_variable_38 == 1) { ixgbe_ndo_set_vf_spoofchk(ixgbe_netdev_ops_group1, ldvarg495, (int )ldvarg494); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_ndo_set_vf_spoofchk(ixgbe_netdev_ops_group1, ldvarg495, (int )ldvarg494); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_ndo_set_vf_spoofchk(ixgbe_netdev_ops_group1, ldvarg495, (int )ldvarg494); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 5: ; if (ldv_state_variable_38 == 1) { ixgbe_fix_features(ixgbe_netdev_ops_group1, ldvarg493); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_fix_features(ixgbe_netdev_ops_group1, ldvarg493); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_fix_features(ixgbe_netdev_ops_group1, ldvarg493); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 6: ; if (ldv_state_variable_38 == 3) { ixgbe_close(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 7: ; if (ldv_state_variable_38 == 1) { ixgbe_set_rx_mode(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_set_rx_mode(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_set_rx_mode(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 8: ; if (ldv_state_variable_38 == 1) { eth_validate_addr(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { eth_validate_addr(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { eth_validate_addr(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 9: ; if (ldv_state_variable_38 == 1) { ixgbe_fcoe_ddp_put(ixgbe_netdev_ops_group1, (int )ldvarg492); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_fcoe_ddp_put(ixgbe_netdev_ops_group1, (int )ldvarg492); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_fcoe_ddp_put(ixgbe_netdev_ops_group1, (int )ldvarg492); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 10: ; if (ldv_state_variable_38 == 1) { ixgbe_ndo_set_vf_vlan(ixgbe_netdev_ops_group1, ldvarg491, (int )ldvarg490, (int )ldvarg489); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_ndo_set_vf_vlan(ixgbe_netdev_ops_group1, ldvarg491, (int )ldvarg490, (int )ldvarg489); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_ndo_set_vf_vlan(ixgbe_netdev_ops_group1, ldvarg491, (int )ldvarg490, (int )ldvarg489); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 11: ; if (ldv_state_variable_38 == 1) { ixgbe_fwd_del(ixgbe_netdev_ops_group1, ldvarg488); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_fwd_del(ixgbe_netdev_ops_group1, ldvarg488); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_fwd_del(ixgbe_netdev_ops_group1, ldvarg488); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 12: ; if (ldv_state_variable_38 == 1) { ixgbe_fcoe_get_hbainfo(ixgbe_netdev_ops_group1, ldvarg487); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_fcoe_get_hbainfo(ixgbe_netdev_ops_group1, ldvarg487); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_fcoe_get_hbainfo(ixgbe_netdev_ops_group1, ldvarg487); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 13: ; if (ldv_state_variable_38 == 3) { ixgbe_change_mtu(ixgbe_netdev_ops_group1, ldvarg486); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_change_mtu(ixgbe_netdev_ops_group1, ldvarg486); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 14: ; if (ldv_state_variable_38 == 1) { ixgbe_ndo_set_vf_mac(ixgbe_netdev_ops_group1, ldvarg485, ldvarg484); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_ndo_set_vf_mac(ixgbe_netdev_ops_group1, ldvarg485, ldvarg484); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_ndo_set_vf_mac(ixgbe_netdev_ops_group1, ldvarg485, ldvarg484); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 15: ; if (ldv_state_variable_38 == 1) { ixgbe_ndo_set_vf_bw(ixgbe_netdev_ops_group1, ldvarg483, ldvarg482, ldvarg481); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_ndo_set_vf_bw(ixgbe_netdev_ops_group1, ldvarg483, ldvarg482, ldvarg481); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_ndo_set_vf_bw(ixgbe_netdev_ops_group1, ldvarg483, ldvarg482, ldvarg481); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 16: ; if (ldv_state_variable_38 == 1) { ixgbe_vlan_rx_kill_vid(ixgbe_netdev_ops_group1, (int )ldvarg480, (int )ldvarg479); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_vlan_rx_kill_vid(ixgbe_netdev_ops_group1, (int )ldvarg480, (int )ldvarg479); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_vlan_rx_kill_vid(ixgbe_netdev_ops_group1, (int )ldvarg480, (int )ldvarg479); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 17: ; if (ldv_state_variable_38 == 1) { ixgbe_ndo_bridge_setlink(ixgbe_netdev_ops_group1, ldvarg478, (int )ldvarg477); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_ndo_bridge_setlink(ixgbe_netdev_ops_group1, ldvarg478, (int )ldvarg477); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_ndo_bridge_setlink(ixgbe_netdev_ops_group1, ldvarg478, (int )ldvarg477); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 18: ; if (ldv_state_variable_38 == 1) { ixgbe_vlan_rx_add_vid(ixgbe_netdev_ops_group1, (int )ldvarg476, (int )ldvarg475); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_vlan_rx_add_vid(ixgbe_netdev_ops_group1, (int )ldvarg476, (int )ldvarg475); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_vlan_rx_add_vid(ixgbe_netdev_ops_group1, (int )ldvarg476, (int )ldvarg475); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 19: ; if (ldv_state_variable_38 == 1) { ixgbe_fwd_add(ixgbe_netdev_ops_group1, ixgbe_netdev_ops_group1); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_fwd_add(ixgbe_netdev_ops_group1, ixgbe_netdev_ops_group1); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_fwd_add(ixgbe_netdev_ops_group1, ixgbe_netdev_ops_group1); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 20: ; if (ldv_state_variable_38 == 1) { ixgbe_fcoe_ddp_target(ixgbe_netdev_ops_group1, (int )ldvarg474, ldvarg473, ldvarg472); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_fcoe_ddp_target(ixgbe_netdev_ops_group1, (int )ldvarg474, ldvarg473, ldvarg472); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_fcoe_ddp_target(ixgbe_netdev_ops_group1, (int )ldvarg474, ldvarg473, ldvarg472); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 21: ; if (ldv_state_variable_38 == 1) { ixgbe_fcoe_enable(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_fcoe_enable(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_fcoe_enable(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 22: ; if (ldv_state_variable_38 == 1) { ixgbe_ndo_bridge_getlink(ldvarg468, ldvarg470, ldvarg469, ixgbe_netdev_ops_group1, ldvarg471, ldvarg467); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_ndo_bridge_getlink(ldvarg468, ldvarg470, ldvarg469, ixgbe_netdev_ops_group1, ldvarg471, ldvarg467); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_ndo_bridge_getlink(ldvarg468, ldvarg470, ldvarg469, ixgbe_netdev_ops_group1, ldvarg471, ldvarg467); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 23: ; if (ldv_state_variable_38 == 1) { ixgbe_fcoe_ddp_get(ixgbe_netdev_ops_group1, (int )ldvarg466, ldvarg465, ldvarg464); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_fcoe_ddp_get(ixgbe_netdev_ops_group1, (int )ldvarg466, ldvarg465, ldvarg464); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_fcoe_ddp_get(ixgbe_netdev_ops_group1, (int )ldvarg466, ldvarg465, ldvarg464); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 24: ; if (ldv_state_variable_38 == 1) { ixgbe_fcoe_get_wwn(ixgbe_netdev_ops_group1, ldvarg463, ldvarg462); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_fcoe_get_wwn(ixgbe_netdev_ops_group1, ldvarg463, ldvarg462); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_fcoe_get_wwn(ixgbe_netdev_ops_group1, ldvarg463, ldvarg462); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 25: ; if (ldv_state_variable_38 == 1) { ixgbe_low_latency_recv(ldvarg461); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_low_latency_recv(ldvarg461); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_low_latency_recv(ldvarg461); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 26: ; if (ldv_state_variable_38 == 1) { ixgbe_del_vxlan_port(ixgbe_netdev_ops_group1, (int )ldvarg460, (int )ldvarg459); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_del_vxlan_port(ixgbe_netdev_ops_group1, (int )ldvarg460, (int )ldvarg459); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_del_vxlan_port(ixgbe_netdev_ops_group1, (int )ldvarg460, (int )ldvarg459); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 27: ; if (ldv_state_variable_38 == 1) { ixgbe_netpoll(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_netpoll(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_netpoll(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 28: ; if (ldv_state_variable_38 == 1) { ixgbe_ndo_fdb_add(ldvarg456, ldvarg457, ixgbe_netdev_ops_group1, (unsigned char const *)ldvarg455, (int )ldvarg458, (int )ldvarg454); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_ndo_fdb_add(ldvarg456, ldvarg457, ixgbe_netdev_ops_group1, (unsigned char const *)ldvarg455, (int )ldvarg458, (int )ldvarg454); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_ndo_fdb_add(ldvarg456, ldvarg457, ixgbe_netdev_ops_group1, (unsigned char const *)ldvarg455, (int )ldvarg458, (int )ldvarg454); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 29: ; if (ldv_state_variable_38 == 1) { ixgbe_ndo_set_vf_rss_query_en(ixgbe_netdev_ops_group1, ldvarg453, (int )ldvarg452); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_ndo_set_vf_rss_query_en(ixgbe_netdev_ops_group1, ldvarg453, (int )ldvarg452); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_ndo_set_vf_rss_query_en(ixgbe_netdev_ops_group1, ldvarg453, (int )ldvarg452); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 30: ; if (ldv_state_variable_38 == 1) { ixgbe_select_queue(ixgbe_netdev_ops_group1, ldvarg451, ldvarg450, ldvarg449); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_select_queue(ixgbe_netdev_ops_group1, ldvarg451, ldvarg450, ldvarg449); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_select_queue(ixgbe_netdev_ops_group1, ldvarg451, ldvarg450, ldvarg449); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 31: ; if (ldv_state_variable_38 == 1) { ixgbe_add_vxlan_port(ixgbe_netdev_ops_group1, (int )ldvarg448, (int )ldvarg447); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_add_vxlan_port(ixgbe_netdev_ops_group1, (int )ldvarg448, (int )ldvarg447); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_add_vxlan_port(ixgbe_netdev_ops_group1, (int )ldvarg448, (int )ldvarg447); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 32: ; if (ldv_state_variable_38 == 1) { ixgbe_set_features(ixgbe_netdev_ops_group1, ldvarg446); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_set_features(ixgbe_netdev_ops_group1, ldvarg446); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_set_features(ixgbe_netdev_ops_group1, ldvarg446); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 33: ; if (ldv_state_variable_38 == 1) { ixgbe_fcoe_disable(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_fcoe_disable(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_fcoe_disable(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 34: ; if (ldv_state_variable_38 == 1) { ixgbe_set_mac(ixgbe_netdev_ops_group1, ldvarg445); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_set_mac(ixgbe_netdev_ops_group1, ldvarg445); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_set_mac(ixgbe_netdev_ops_group1, ldvarg445); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 35: ; if (ldv_state_variable_38 == 1) { ixgbe_setup_tc(ixgbe_netdev_ops_group1, (int )ldvarg444); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_setup_tc(ixgbe_netdev_ops_group1, (int )ldvarg444); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_setup_tc(ixgbe_netdev_ops_group1, (int )ldvarg444); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 36: ; if (ldv_state_variable_38 == 1) { ixgbe_get_stats64(ixgbe_netdev_ops_group1, ldvarg443); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_get_stats64(ixgbe_netdev_ops_group1, ldvarg443); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_get_stats64(ixgbe_netdev_ops_group1, ldvarg443); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 37: ; if (ldv_state_variable_38 == 1) { ixgbe_tx_timeout(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 1; } else { } if (ldv_state_variable_38 == 3) { ixgbe_tx_timeout(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 3; } else { } if (ldv_state_variable_38 == 2) { ixgbe_tx_timeout(ixgbe_netdev_ops_group1); ldv_state_variable_38 = 2; } else { } goto ldv_60105; case 38: ; if (ldv_state_variable_38 == 1) { ldv_retval_8 = ldv_ndo_init_38(); if (ldv_retval_8 == 0) { ldv_state_variable_38 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_60105; case 39: ; if (ldv_state_variable_38 == 2) { ldv_ndo_uninit_38(); ldv_state_variable_38 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_60105; default: ldv_stop(); } ldv_60105: ; } else { } goto ldv_60052; case 34: ; if (ldv_state_variable_8 != 0) { invoke_work_8(); } else { } goto ldv_60052; case 35: ; if (ldv_state_variable_4 != 0) { choose_interrupt_4(); } else { } goto ldv_60052; case 36: ; if (ldv_state_variable_34 != 0) { ldv_main_exported_34(); } else { } goto ldv_60052; case 37: ; if (ldv_state_variable_37 != 0) { tmp___26 = __VERIFIER_nondet_int(); switch (tmp___26) { case 0: ; if (ldv_state_variable_37 == 3) { ixgbe_io_resume(ixgbe_err_handler_group0); ldv_state_variable_37 = 2; } else { } goto ldv_60151; case 1: ; if (ldv_state_variable_37 == 1) { ixgbe_io_slot_reset(ixgbe_err_handler_group0); ldv_state_variable_37 = 1; } else { } if (ldv_state_variable_37 == 3) { ixgbe_io_slot_reset(ixgbe_err_handler_group0); ldv_state_variable_37 = 3; } else { } if (ldv_state_variable_37 == 2) { ixgbe_io_slot_reset(ixgbe_err_handler_group0); ldv_state_variable_37 = 2; } else { } goto ldv_60151; case 2: ; if (ldv_state_variable_37 == 1) { ixgbe_io_error_detected(ixgbe_err_handler_group0, (pci_channel_state_t )ldvarg553); ldv_state_variable_37 = 1; } else { } if (ldv_state_variable_37 == 3) { ixgbe_io_error_detected(ixgbe_err_handler_group0, (pci_channel_state_t )ldvarg553); ldv_state_variable_37 = 3; } else { } if (ldv_state_variable_37 == 2) { ixgbe_io_error_detected(ixgbe_err_handler_group0, (pci_channel_state_t )ldvarg553); ldv_state_variable_37 = 2; } else { } goto ldv_60151; case 3: ; if (ldv_state_variable_37 == 2) { ldv_suspend_37(); ldv_state_variable_37 = 3; } else { } goto ldv_60151; case 4: ; if (ldv_state_variable_37 == 3) { ldv_release_37(); ldv_state_variable_37 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_37 == 2) { ldv_release_37(); ldv_state_variable_37 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_60151; case 5: ; if (ldv_state_variable_37 == 1) { ldv_probe_37(); ldv_state_variable_37 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_60151; default: ldv_stop(); } ldv_60151: ; } else { } goto ldv_60052; case 38: ; if (ldv_state_variable_19 != 0) { ldv_main_exported_19(); } else { } goto ldv_60052; case 39: ; goto ldv_60052; default: ldv_stop(); } ldv_60052: ; goto ldv_60161; ldv_final: ldv_check_final_state(); return 0; } } __inline static void *ERR_PTR(long error ) { void *tmp ; { tmp = ldv_err_ptr(error); return (tmp); } } __inline static void spin_lock(spinlock_t *lock ) { { ldv_spin_lock(); ldv_spin_lock_5(lock); return; } } __inline static void spin_unlock(spinlock_t *lock ) { { ldv_spin_unlock(); ldv_spin_unlock_9(lock); return; } } bool ldv_queue_work_on_15(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_16(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_17(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_18(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_19(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static void *kcalloc(size_t n , size_t size , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_kmem_cache_alloc_25(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static void *kzalloc(size_t size , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_31(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_33(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_35(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_36(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_37(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_38(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_39(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_40(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_41(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_42(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static int ldv_request_irq_43(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = request_irq(irq, handler, flags, name, dev); ldv_func_res = tmp; tmp___0 = reg_check_7(handler); if (tmp___0 != 0 && ldv_func_res == 0) { activate_suitable_irq_7((int )irq, dev); } else { } return (ldv_func_res); } } __inline static int ldv_request_irq_44(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { ldv_func_ret_type___7 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = request_irq(irq, handler, flags, name, dev); ldv_func_res = tmp; tmp___0 = reg_check_7(handler); if (tmp___0 != 0 && ldv_func_res == 0) { activate_suitable_irq_7((int )irq, dev); } else { } return (ldv_func_res); } } void ldv_free_irq_45(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_7((int )ldv_func_arg1, ldv_func_arg2); return; } } __inline static int ldv_request_irq_46(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { ldv_func_ret_type___8 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = request_irq(irq, handler, flags, name, dev); ldv_func_res = tmp; tmp___0 = reg_check_7(handler); if (tmp___0 != 0 && ldv_func_res == 0) { activate_suitable_irq_7((int )irq, dev); } else { } return (ldv_func_res); } } __inline static int ldv_request_irq_47(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { ldv_func_ret_type___9 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = request_irq(irq, handler, flags, name, dev); ldv_func_res = tmp; tmp___0 = reg_check_7(handler); if (tmp___0 != 0 && ldv_func_res == 0) { activate_suitable_irq_7((int )irq, dev); } else { } return (ldv_func_res); } } void ldv_free_irq_48(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_7((int )ldv_func_arg1, ldv_func_arg2); return; } } void ldv_free_irq_49(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_7((int )ldv_func_arg1, ldv_func_arg2); return; } } void ldv_free_irq_50(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_7((int )ldv_func_arg1, ldv_func_arg2); return; } } int ldv_mod_timer_51(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___10 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_10(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } int ldv_del_timer_sync_52(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___11 ldv_func_res ; int tmp ; { tmp = del_timer_sync(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_10(ldv_func_arg1); return (ldv_func_res); } } void *ldv_vzalloc_node_53(unsigned long ldv_func_arg1 , int ldv_func_arg2 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vzalloc_54(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vzalloc_node_55(unsigned long ldv_func_arg1 , int ldv_func_arg2 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vzalloc_56(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void ldv_unregister_netdev_57(struct net_device *dev ) { { unregister_netdev(dev); ldv_state_variable_38 = 0; return; } } int ldv_mod_timer_58(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___12 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_10(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } int ldv_register_netdev_59(struct net_device *dev ) { ldv_func_ret_type___13 ldv_func_res ; int tmp ; { tmp = register_netdev(dev); ldv_func_res = tmp; ldv_state_variable_38 = 1; ldv_net_device_ops_38(); return (ldv_func_res); } } void ldv_free_netdev_60(struct net_device *dev ) { { free_netdev(dev); ldv_state_variable_38 = 0; return; } } bool ldv_cancel_work_sync_61(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___14 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_9(ldv_func_arg1); return (ldv_func_res); } } void ldv_unregister_netdev_62(struct net_device *dev ) { { unregister_netdev(dev); ldv_state_variable_38 = 0; return; } } void ldv_free_netdev_63(struct net_device *dev ) { { free_netdev(dev); ldv_state_variable_38 = 0; return; } } int ldv___pci_register_driver_64(struct pci_driver *ldv_func_arg1 , struct module *ldv_func_arg2 , char const *ldv_func_arg3 ) { ldv_func_ret_type___15 ldv_func_res ; int tmp ; { tmp = __pci_register_driver(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; ldv_state_variable_36 = 1; ldv_pci_driver_36(); return (ldv_func_res); } } void ldv_pci_unregister_driver_65(struct pci_driver *ldv_func_arg1 ) { { pci_unregister_driver(ldv_func_arg1); ldv_state_variable_36 = 0; return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_104(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_106(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_105(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_108(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_107(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_114(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_131(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_122(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_130(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_124(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_120(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_128(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_129(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_125(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_126(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_127(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw ) ; s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw , u8 *mac_addr ) ; s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw ) ; void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw ) ; s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_led_on_generic(struct ixgbe_hw *hw , u32 index ) ; s32 ixgbe_led_off_generic(struct ixgbe_hw *hw , u32 index ) ; s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw , u16 offset , u16 data ) ; s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) ; s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw , u16 offset , u16 *data ) ; s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) ; s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw , u16 offset , u16 data ) ; s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) ; s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) ; s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw , u16 *checksum_val ) ; s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw , u32 index , u8 *addr , u32 vmdq , u32 enable_addr ) ; s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw , u32 index ) ; s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw , struct net_device *netdev ) ; s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw , u32 regval ) ; s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw ) ; void ixgbe_fc_autoneg(struct ixgbe_hw *hw ) ; s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw , u32 mask ) ; void ixgbe_release_swfw_sync(struct ixgbe_hw *hw , u32 mask ) ; s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw , u8 *san_mac_addr ) ; s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw , u32 rar , u32 vmdq ) ; s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw , u32 vmdq ) ; s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw , u32 rar , u32 vmdq ) ; s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw , u32 vlan , u32 vind , bool vlan_on ) ; s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw , ixgbe_link_speed *speed , bool *link_up , bool link_up_wait_to_complete ) ; s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw , u16 *wwnn_prefix , u16 *wwpn_prefix ) ; s32 prot_autoc_read_generic(struct ixgbe_hw *hw , bool *locked , u32 *reg_val ) ; s32 prot_autoc_write_generic(struct ixgbe_hw *hw , u32 reg_val , bool locked ) ; s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw , u32 index ) ; s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw , u32 index ) ; void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw , bool enable , int pf ) ; void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw , bool enable , int vf ) ; s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw , u16 *device_caps ) ; s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw , u8 maj , u8 min , u8 build , u8 sub ) ; s32 ixgbe_host_interface_command(struct ixgbe_hw *hw , u32 *buffer , u32 length , u32 timeout , bool return_data ) ; void ixgbe_clear_tx_pending(struct ixgbe_hw *hw ) ; void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw , int num_pb , u32 headroom , int strategy ) ; u32 const ixgbe_mvals_8259X[24U] ; s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw ) ; void ixgbe_disable_rx_generic(struct ixgbe_hw *hw ) ; void ixgbe_enable_rx_generic(struct ixgbe_hw *hw ) ; static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw ) ; static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw ) ; static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw ) ; static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw ) ; static void ixgbe_standby_eeprom(struct ixgbe_hw *hw ) ; static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw , u16 data , u16 count ) ; static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw , u16 count ) ; static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw , u32 *eec ) ; static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw , u32 *eec ) ; static void ixgbe_release_eeprom(struct ixgbe_hw *hw ) ; static s32 ixgbe_mta_vector(struct ixgbe_hw *hw , u8 *mc_addr ) ; static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw , u32 ee_reg ) ; static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) ; static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) ; static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw , u16 offset ) ; static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw ) ; u32 const ixgbe_mvals_8259X[24U] = { 65552U, 65564U, 66048U, 66064U, 65872U, 65856U, 65888U, 65864U, 1U, 2U, 4U, 16777216U, 33554432U, 67108864U, 69768U, 69772U, 1U, 2U, 4U, 8U, 0U, 0U, 0U, 40U}; bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw ) { bool supported ; ixgbe_link_speed speed ; bool link_up ; { supported = 0; switch ((unsigned int )hw->phy.media_type) { case 1U: (*(hw->mac.ops.check_link))(hw, & speed, & link_up, 0); if ((int )link_up) { supported = speed == 32U; } else { supported = 1; } goto ldv_55485; case 5U: supported = 1; goto ldv_55485; case 4U: ; switch ((int )hw->device_id) { case 5404: ; case 5416: ; case 5472: ; case 5475: ; case 5549: supported = 1; goto ldv_55493; default: ; goto ldv_55493; } ldv_55493: ; default: ; goto ldv_55485; } ldv_55485: ; return (supported); } } static s32 ixgbe_setup_fc(struct ixgbe_hw *hw ) { s32 ret_val ; u32 reg ; u32 reg_bp ; u16 reg_cu ; bool locked ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; bool tmp___2 ; struct _ddebug descriptor___2 ; long tmp___3 ; { ret_val = 0; reg = 0U; reg_bp = 0U; reg_cu = 0U; locked = 0; if ((int )hw->fc.strict_ieee && (unsigned int )hw->fc.requested_mode == 1U) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_setup_fc"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"; descriptor.lineno = 131U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); } else { } return (-13); } else { } if ((unsigned int )hw->fc.requested_mode == 4U) { hw->fc.requested_mode = 3; } else { } switch ((unsigned int )hw->phy.media_type) { case 5U: ret_val = (*(hw->mac.ops.prot_autoc_read))(hw, & locked, & reg_bp); if (ret_val != 0) { return (ret_val); } else { } case 1U: reg = ixgbe_read_reg(hw, 16920U); goto ldv_55508; case 4U: (*(hw->phy.ops.read_reg))(hw, 16U, 7U, & reg_cu); goto ldv_55508; default: ; goto ldv_55508; } ldv_55508: ; switch ((unsigned int )hw->fc.requested_mode) { case 0U: reg = reg & 4294966911U; if ((unsigned int )hw->phy.media_type == 5U) { reg_bp = reg_bp & 3489660927U; } else if ((unsigned int )hw->phy.media_type == 4U) { reg_cu = (unsigned int )reg_cu & 62463U; } else { } goto ldv_55512; case 2U: reg = reg | 256U; reg = reg & 4294967167U; if ((unsigned int )hw->phy.media_type == 5U) { reg_bp = reg_bp | 536870912U; reg_bp = reg_bp & 4026531839U; } else if ((unsigned int )hw->phy.media_type == 4U) { reg_cu = (u16 )((unsigned int )reg_cu | 2048U); reg_cu = (unsigned int )reg_cu & 64511U; } else { } goto ldv_55512; case 1U: ; case 3U: reg = reg | 384U; if ((unsigned int )hw->phy.media_type == 5U) { reg_bp = reg_bp | 805306368U; } else if ((unsigned int )hw->phy.media_type == 4U) { reg_cu = (u16 )((unsigned int )reg_cu | 3072U); } else { } goto ldv_55512; default: descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_setup_fc"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___0.format = "Flow control param set incorrectly\n"; descriptor___0.lineno = 222U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flow control param set incorrectly\n"); } else { } return (-4); } ldv_55512: ; if ((unsigned int )hw->mac.type != 3U) { ixgbe_write_reg(hw, 16920U, reg); reg = ixgbe_read_reg(hw, 16904U); if ((int )hw->fc.strict_ieee) { reg = reg & 4294705151U; } else { } ixgbe_write_reg(hw, 16904U, reg); descriptor___1.modname = "ixgbe"; descriptor___1.function = "ixgbe_setup_fc"; descriptor___1.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___1.format = "Set up FC; PCS1GLCTL = 0x%08X\n"; descriptor___1.lineno = 239U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); } else { } } else { } if ((unsigned int )hw->phy.media_type == 5U) { ret_val = (*(hw->mac.ops.prot_autoc_write))(hw, reg_bp, (int )locked); if (ret_val != 0) { return (ret_val); } else { } } else if ((unsigned int )hw->phy.media_type == 4U) { tmp___2 = ixgbe_device_supports_autoneg_fc(hw); if ((int )tmp___2) { (*(hw->phy.ops.write_reg))(hw, 16U, 7U, (int )reg_cu); } else { } } else { } descriptor___2.modname = "ixgbe"; descriptor___2.function = "ixgbe_setup_fc"; descriptor___2.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___2.format = "Set up FC; IXGBE_AUTOC = 0x%08X\n"; descriptor___2.lineno = 262U; descriptor___2.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); } else { } return (ret_val); } } s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw ) { s32 ret_val ; u32 ctrl_ext ; { hw->phy.media_type = (*(hw->mac.ops.get_media_type))(hw); (*(hw->phy.ops.identify))(hw); (*(hw->mac.ops.clear_vfta))(hw); (*(hw->mac.ops.clear_hw_cntrs))(hw); ctrl_ext = ixgbe_read_reg(hw, 24U); ctrl_ext = ctrl_ext | 65536U; ixgbe_write_reg(hw, 24U, ctrl_ext); ixgbe_read_reg(hw, 8U); ret_val = ixgbe_setup_fc(hw); if (ret_val == 0) { return (0); } else { } hw->adapter_stopped = 0; return (ret_val); } } s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw ) { u32 i ; u32 regval ; u32 regval___0 ; { i = 0U; goto ldv_55530; ldv_55529: ixgbe_write_reg(hw, 18692U, i); ixgbe_write_reg(hw, 18820U, 0U); i = i + 1U; ldv_55530: ; if (hw->mac.max_tx_queues > i) { goto ldv_55529; } else { } ixgbe_read_reg(hw, 8U); i = 0U; goto ldv_55534; ldv_55533: regval = ixgbe_read_reg(hw, i * 64U + 24588U); regval = regval & 4294965247U; ixgbe_write_reg(hw, i * 64U + 24588U, regval); i = i + 1U; ldv_55534: ; if (hw->mac.max_tx_queues > i) { goto ldv_55533; } else { } i = 0U; goto ldv_55538; ldv_55537: regval___0 = ixgbe_read_reg(hw, i <= 15U ? (i + 2176U) * 4U : (i <= 63U ? i * 64U + 4108U : (i + 67108800U) * 64U + 53260U)); regval___0 = regval___0 & 4294926335U; ixgbe_write_reg(hw, i <= 15U ? (i + 2176U) * 4U : (i <= 63U ? i * 64U + 4108U : (i + 67108800U) * 64U + 53260U), regval___0); i = i + 1U; ldv_55538: ; if (hw->mac.max_rx_queues > i) { goto ldv_55537; } else { } return (0); } } s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw ) { s32 status ; { status = (*(hw->mac.ops.reset_hw))(hw); if (status == 0) { status = (*(hw->mac.ops.start_hw))(hw); } else { } return (status); } } s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw ) { u16 i ; { i = 0U; ixgbe_read_reg(hw, 16384U); ixgbe_read_reg(hw, 16388U); ixgbe_read_reg(hw, 16392U); ixgbe_read_reg(hw, 16400U); i = 0U; goto ldv_55549; ldv_55548: ixgbe_read_reg(hw, (u32 )(((int )i + 4072) * 4)); i = (u16 )((int )i + 1); ldv_55549: ; if ((unsigned int )i <= 7U) { goto ldv_55548; } else { } ixgbe_read_reg(hw, 16436U); ixgbe_read_reg(hw, 16440U); ixgbe_read_reg(hw, 16448U); ixgbe_read_reg(hw, 16224U); ixgbe_read_reg(hw, 16232U); if ((unsigned int )hw->mac.type > 1U) { ixgbe_read_reg(hw, 16804U); ixgbe_read_reg(hw, 16808U); } else { ixgbe_read_reg(hw, 53088U); ixgbe_read_reg(hw, 53096U); } i = 0U; goto ldv_55552; ldv_55551: ixgbe_read_reg(hw, (u32 )(((int )i + 4032) * 4)); ixgbe_read_reg(hw, (u32 )(((int )i + 4040) * 4)); if ((unsigned int )hw->mac.type > 1U) { ixgbe_read_reg(hw, (u32 )(((int )i + 4176) * 4)); ixgbe_read_reg(hw, (u32 )(((int )i + 4184) * 4)); } else { ixgbe_read_reg(hw, (u32 )(((int )i + 13248) * 4)); ixgbe_read_reg(hw, (u32 )(((int )i + 13256) * 4)); } i = (u16 )((int )i + 1); ldv_55552: ; if ((unsigned int )i <= 7U) { goto ldv_55551; } else { } if ((unsigned int )hw->mac.type > 1U) { i = 0U; goto ldv_55555; ldv_55554: ixgbe_read_reg(hw, (u32 )(((int )i + 3216) * 4)); i = (u16 )((int )i + 1); ldv_55555: ; if ((unsigned int )i <= 7U) { goto ldv_55554; } else { } } else { } ixgbe_read_reg(hw, 16476U); ixgbe_read_reg(hw, 16480U); ixgbe_read_reg(hw, 16484U); ixgbe_read_reg(hw, 16488U); ixgbe_read_reg(hw, 16492U); ixgbe_read_reg(hw, 16496U); ixgbe_read_reg(hw, 16500U); ixgbe_read_reg(hw, 16504U); ixgbe_read_reg(hw, 16508U); ixgbe_read_reg(hw, 16512U); ixgbe_read_reg(hw, 16520U); ixgbe_read_reg(hw, 16524U); ixgbe_read_reg(hw, 16528U); ixgbe_read_reg(hw, 16532U); if ((unsigned int )hw->mac.type == 1U) { i = 0U; goto ldv_55558; ldv_55557: ixgbe_read_reg(hw, (u32 )(((int )i + 4080) * 4)); i = (u16 )((int )i + 1); ldv_55558: ; if ((unsigned int )i <= 7U) { goto ldv_55557; } else { } } else { } ixgbe_read_reg(hw, 16548U); ixgbe_read_reg(hw, 16552U); ixgbe_read_reg(hw, 16556U); ixgbe_read_reg(hw, 16560U); ixgbe_read_reg(hw, 16564U); ixgbe_read_reg(hw, 16568U); ixgbe_read_reg(hw, 53136U); ixgbe_read_reg(hw, 16576U); ixgbe_read_reg(hw, 16580U); ixgbe_read_reg(hw, 16592U); ixgbe_read_reg(hw, 16596U); ixgbe_read_reg(hw, 16600U); ixgbe_read_reg(hw, 16604U); ixgbe_read_reg(hw, 16608U); ixgbe_read_reg(hw, 16612U); ixgbe_read_reg(hw, 16616U); ixgbe_read_reg(hw, 16620U); ixgbe_read_reg(hw, 16624U); ixgbe_read_reg(hw, 16628U); i = 0U; goto ldv_55561; ldv_55560: ixgbe_read_reg(hw, (u32 )((int )i * 64 + 4144)); ixgbe_read_reg(hw, (u32 )((int )i * 64 + 24624)); if ((unsigned int )hw->mac.type > 1U) { ixgbe_read_reg(hw, (u32 )((int )i * 64 + 4148)); ixgbe_read_reg(hw, (u32 )((int )i * 64 + 4152)); ixgbe_read_reg(hw, (u32 )(((int )i + 4320) * 8)); ixgbe_read_reg(hw, (u32 )((int )i * 8 + 34564)); ixgbe_read_reg(hw, (u32 )((int )i * 64 + 5168)); } else { ixgbe_read_reg(hw, (u32 )((int )i * 64 + 4148)); ixgbe_read_reg(hw, (u32 )((int )i * 64 + 24628)); } i = (u16 )((int )i + 1); ldv_55561: ; if ((unsigned int )i <= 15U) { goto ldv_55560; } else { } if ((unsigned int )hw->mac.type == 4U || (unsigned int )hw->mac.type == 3U) { if (hw->phy.id == 0U) { (*(hw->phy.ops.identify))(hw); } else { } (*(hw->phy.ops.read_reg))(hw, 59408U, 3U, & i); (*(hw->phy.ops.read_reg))(hw, 59409U, 3U, & i); (*(hw->phy.ops.read_reg))(hw, 59424U, 3U, & i); (*(hw->phy.ops.read_reg))(hw, 59425U, 3U, & i); } else { } return (0); } } s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw , u8 *pba_num , u32 pba_num_size ) { s32 ret_val ; u16 data ; u16 pba_ptr ; u16 offset ; u16 length ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; struct _ddebug descriptor___2 ; long tmp___2 ; struct _ddebug descriptor___3 ; long tmp___3 ; struct _ddebug descriptor___4 ; long tmp___4 ; struct _ddebug descriptor___5 ; long tmp___5 ; struct _ddebug descriptor___6 ; long tmp___6 ; struct _ddebug descriptor___7 ; long tmp___7 ; { if ((unsigned long )pba_num == (unsigned long )((u8 *)0U)) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_read_pba_string_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "PBA string buffer was null\n"; descriptor.lineno = 503U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "PBA string buffer was null\n"); } else { } return (-32); } else { } ret_val = (*(hw->eeprom.ops.read))(hw, 21, & data); if (ret_val != 0) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_read_pba_string_generic"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___0.format = "NVM Read Error\n"; descriptor___0.lineno = 509U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "NVM Read Error\n"); } else { } return (ret_val); } else { } ret_val = (*(hw->eeprom.ops.read))(hw, 22, & pba_ptr); if (ret_val != 0) { descriptor___1.modname = "ixgbe"; descriptor___1.function = "ixgbe_read_pba_string_generic"; descriptor___1.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___1.format = "NVM Read Error\n"; descriptor___1.lineno = 515U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "NVM Read Error\n"); } else { } return (ret_val); } else { } if ((unsigned int )data != 64250U) { descriptor___2.modname = "ixgbe"; descriptor___2.function = "ixgbe_read_pba_string_generic"; descriptor___2.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___2.format = "NVM PBA number is not stored as string\n"; descriptor___2.lineno = 525U; descriptor___2.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "NVM PBA number is not stored as string\n"); } else { } if (pba_num_size <= 10U) { descriptor___3.modname = "ixgbe"; descriptor___3.function = "ixgbe_read_pba_string_generic"; descriptor___3.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___3.format = "PBA string buffer too small\n"; descriptor___3.lineno = 529U; descriptor___3.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_netdev_dbg(& descriptor___3, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "PBA string buffer too small\n"); } else { } return (-25); } else { } *pba_num = (unsigned int )((u8 )((int )data >> 12)) & 15U; *(pba_num + 1UL) = (unsigned int )((u8 )((int )data >> 8)) & 15U; *(pba_num + 2UL) = (unsigned int )((u8 )((int )data >> 4)) & 15U; *(pba_num + 3UL) = (unsigned int )((u8 )data) & 15U; *(pba_num + 4UL) = (unsigned int )((u8 )((int )pba_ptr >> 12)) & 15U; *(pba_num + 5UL) = (unsigned int )((u8 )((int )pba_ptr >> 8)) & 15U; *(pba_num + 6UL) = 45U; *(pba_num + 7UL) = 0U; *(pba_num + 8UL) = (unsigned int )((u8 )((int )pba_ptr >> 4)) & 15U; *(pba_num + 9UL) = (unsigned int )((u8 )pba_ptr) & 15U; *(pba_num + 10UL) = 0U; offset = 0U; goto ldv_55580; ldv_55579: ; if ((unsigned int )*(pba_num + (unsigned long )offset) <= 9U) { *(pba_num + (unsigned long )offset) = (unsigned int )*(pba_num + (unsigned long )offset) + 48U; } else if ((unsigned int )*(pba_num + (unsigned long )offset) <= 15U) { *(pba_num + (unsigned long )offset) = (unsigned int )*(pba_num + (unsigned long )offset) + 55U; } else { } offset = (u16 )((int )offset + 1); ldv_55580: ; if ((unsigned int )offset <= 9U) { goto ldv_55579; } else { } return (0); } else { } ret_val = (*(hw->eeprom.ops.read))(hw, (int )pba_ptr, & length); if (ret_val != 0) { descriptor___4.modname = "ixgbe"; descriptor___4.function = "ixgbe_read_pba_string_generic"; descriptor___4.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___4.format = "NVM Read Error\n"; descriptor___4.lineno = 561U; descriptor___4.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___4.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_netdev_dbg(& descriptor___4, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "NVM Read Error\n"); } else { } return (ret_val); } else { } if ((unsigned int )length == 65535U || (unsigned int )length == 0U) { descriptor___5.modname = "ixgbe"; descriptor___5.function = "ixgbe_read_pba_string_generic"; descriptor___5.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___5.format = "NVM PBA number section invalid length\n"; descriptor___5.lineno = 566U; descriptor___5.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___5.flags & 1L, 0L); if (tmp___5 != 0L) { __dynamic_netdev_dbg(& descriptor___5, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "NVM PBA number section invalid length\n"); } else { } return (-31); } else { } if ((unsigned int )length * 2U - 1U > pba_num_size) { descriptor___6.modname = "ixgbe"; descriptor___6.function = "ixgbe_read_pba_string_generic"; descriptor___6.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___6.format = "PBA string buffer too small\n"; descriptor___6.lineno = 572U; descriptor___6.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___6.flags & 1L, 0L); if (tmp___6 != 0L) { __dynamic_netdev_dbg(& descriptor___6, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "PBA string buffer too small\n"); } else { } return (-25); } else { } pba_ptr = (u16 )((int )pba_ptr + 1); length = (u16 )((int )length - 1); offset = 0U; goto ldv_55587; ldv_55586: ret_val = (*(hw->eeprom.ops.read))(hw, (int )pba_ptr + (int )offset, & data); if (ret_val != 0) { descriptor___7.modname = "ixgbe"; descriptor___7.function = "ixgbe_read_pba_string_generic"; descriptor___7.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___7.format = "NVM Read Error\n"; descriptor___7.lineno = 583U; descriptor___7.flags = 0U; tmp___7 = ldv__builtin_expect((long )descriptor___7.flags & 1L, 0L); if (tmp___7 != 0L) { __dynamic_netdev_dbg(& descriptor___7, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "NVM Read Error\n"); } else { } return (ret_val); } else { } *(pba_num + (unsigned long )((int )offset * 2)) = (unsigned char )((int )data >> 8); *(pba_num + ((unsigned long )((int )offset * 2) + 1UL)) = (unsigned char )data; offset = (u16 )((int )offset + 1); ldv_55587: ; if ((int )offset < (int )length) { goto ldv_55586; } else { } *(pba_num + (unsigned long )((int )offset * 2)) = 0U; return (0); } } s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw , u8 *mac_addr ) { u32 rar_high ; u32 rar_low ; u16 i ; { rar_high = ixgbe_read_reg(hw, 21508U); rar_low = ixgbe_read_reg(hw, 21504U); i = 0U; goto ldv_55597; ldv_55596: *(mac_addr + (unsigned long )i) = (unsigned char )(rar_low >> (int )i * 8); i = (u16 )((int )i + 1); ldv_55597: ; if ((unsigned int )i <= 3U) { goto ldv_55596; } else { } i = 0U; goto ldv_55600; ldv_55599: *(mac_addr + ((unsigned long )i + 4UL)) = (unsigned char )(rar_high >> (int )i * 8); i = (u16 )((int )i + 1); ldv_55600: ; if ((unsigned int )i <= 1U) { goto ldv_55599; } else { } return (0); } } enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status ) { { switch ((int )link_status & 1008) { case 16: ; return (1); case 32: ; return (2); case 64: ; return (4); case 128: ; return (8); default: ; return (0); } } } enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status ) { { switch ((int )link_status & 15) { case 1: ; return (2500); case 2: ; return (5000); case 3: ; return (8000); default: ; return (0); } } } s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw ) { u16 link_status ; { hw->bus.type = 3; link_status = ixgbe_read_pci_cfg_word(hw, 178U); hw->bus.width = ixgbe_convert_bus_width((int )link_status); hw->bus.speed = ixgbe_convert_bus_speed((int )link_status); (*(hw->mac.ops.set_lan_id))(hw); return (0); } } void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw ) { struct ixgbe_bus_info *bus ; u32 reg ; { bus = & hw->bus; reg = ixgbe_read_reg(hw, 8U); bus->func = (u16 )((reg & 12U) >> 2); bus->lan_id = bus->func; reg = ixgbe_read_reg(hw, *(hw->mvals + 4UL)); if ((reg & 1073741824U) != 0U) { bus->func = (u16 )((unsigned int )bus->func ^ 1U); } else { } return; } } s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw ) { u32 reg_val ; u16 i ; s32 tmp ; { hw->adapter_stopped = 1; (*(hw->mac.ops.disable_rx))(hw); ixgbe_write_reg(hw, 2184U, 4294967295U); ixgbe_read_reg(hw, 2048U); i = 0U; goto ldv_55632; ldv_55631: ixgbe_write_reg(hw, (u32 )((int )i * 64 + 24616), 67108864U); i = (u16 )((int )i + 1); ldv_55632: ; if ((u32 )i < hw->mac.max_tx_queues) { goto ldv_55631; } else { } i = 0U; goto ldv_55635; ldv_55634: reg_val = ixgbe_read_reg(hw, (u32 )((unsigned int )i <= 63U ? (int )i * 64 + 4136 : ((int )i + -64) * 64 + 53288)); reg_val = reg_val & 4261412863U; reg_val = reg_val | 67108864U; ixgbe_write_reg(hw, (u32 )((unsigned int )i <= 63U ? (int )i * 64 + 4136 : ((int )i + -64) * 64 + 53288), reg_val); i = (u16 )((int )i + 1); ldv_55635: ; if ((u32 )i < hw->mac.max_rx_queues) { goto ldv_55634; } else { } ixgbe_read_reg(hw, 8U); usleep_range(1000UL, 2000UL); tmp = ixgbe_disable_pcie_master(hw); return (tmp); } } s32 ixgbe_led_on_generic(struct ixgbe_hw *hw , u32 index ) { u32 led_reg ; u32 tmp ; { tmp = ixgbe_read_reg(hw, 512U); led_reg = tmp; led_reg = (u32 )(~ (15 << (int )(index * 8U))) & led_reg; led_reg = (u32 )(14 << (int )(index * 8U)) | led_reg; ixgbe_write_reg(hw, 512U, led_reg); ixgbe_read_reg(hw, 8U); return (0); } } s32 ixgbe_led_off_generic(struct ixgbe_hw *hw , u32 index ) { u32 led_reg ; u32 tmp ; { tmp = ixgbe_read_reg(hw, 512U); led_reg = tmp; led_reg = (u32 )(~ (15 << (int )(index * 8U))) & led_reg; led_reg = (u32 )(15 << (int )(index * 8U)) | led_reg; ixgbe_write_reg(hw, 512U, led_reg); ixgbe_read_reg(hw, 8U); return (0); } } s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw ) { struct ixgbe_eeprom_info *eeprom ; u32 eec ; u16 eeprom_size ; struct _ddebug descriptor ; long tmp ; { eeprom = & hw->eeprom; if ((unsigned int )eeprom->type == 0U) { eeprom->type = 3; eeprom->semaphore_delay = 10U; eeprom->word_page_size = 0U; eec = ixgbe_read_reg(hw, *(hw->mvals)); if ((eec & 256U) != 0U) { eeprom->type = 1; eeprom_size = (unsigned short )((eec & 30720U) >> 11); eeprom->word_size = (u16 )(1 << ((int )eeprom_size + 6)); } else { } if ((eec & 1024U) != 0U) { eeprom->address_bits = 16U; } else { eeprom->address_bits = 8U; } descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_init_eeprom_params_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "Eeprom params: type = %d, size = %d, address bits: %d\n"; descriptor.lineno = 828U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Eeprom params: type = %d, size = %d, address bits: %d\n", (unsigned int )eeprom->type, (int )eeprom->word_size, (int )eeprom->address_bits); } else { } } else { } return (0); } } s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) { s32 status ; u16 i ; u16 count ; { (*(hw->eeprom.ops.init_params))(hw); if ((unsigned int )words == 0U) { return (-32); } else { } if ((int )offset + (int )words > (int )hw->eeprom.word_size) { return (-1); } else { } if ((unsigned int )hw->eeprom.word_page_size == 0U && (unsigned int )words > 128U) { ixgbe_detect_eeprom_page_size_generic(hw, (int )offset); } else { } i = 0U; goto ldv_55666; ldv_55665: count = (u16 )(512 < (int )words - (int )i ? 512 : (int )words - (int )i); status = ixgbe_write_eeprom_buffer_bit_bang(hw, (int )offset + (int )i, (int )count, data + (unsigned long )i); if (status != 0) { goto ldv_55664; } else { } i = (unsigned int )i + 512U; ldv_55666: ; if ((int )i < (int )words) { goto ldv_55665; } else { } ldv_55664: ; return (status); } } static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) { s32 status ; u16 word ; u16 page_size ; u16 i ; u8 write_opcode ; s32 tmp ; { write_opcode = 2U; status = ixgbe_acquire_eeprom(hw); if (status != 0) { return (status); } else { } tmp = ixgbe_ready_eeprom(hw); if (tmp != 0) { ixgbe_release_eeprom(hw); return (-1); } else { } i = 0U; goto ldv_55681; ldv_55680: ixgbe_standby_eeprom(hw); ixgbe_shift_out_eeprom_bits(hw, 6, 8); ixgbe_standby_eeprom(hw); if ((unsigned int )hw->eeprom.address_bits == 8U && (int )offset + (int )i > 127) { write_opcode = (u8 )((unsigned int )write_opcode | 8U); } else { } ixgbe_shift_out_eeprom_bits(hw, (int )write_opcode, 8); ixgbe_shift_out_eeprom_bits(hw, (int )((unsigned int )((unsigned short )((int )offset + (int )i)) * 2U), (int )hw->eeprom.address_bits); page_size = hw->eeprom.word_page_size; ldv_55679: word = *(data + (unsigned long )i); word = (u16 )((int )((short )((int )word >> 8)) | (int )((short )((int )word << 8))); ixgbe_shift_out_eeprom_bits(hw, (int )word, 16); if ((unsigned int )page_size == 0U) { goto ldv_55678; } else { } if ((((int )offset + (int )i) & ((int )page_size + -1)) == (int )page_size + -1) { goto ldv_55678; } else { } i = (u16 )((int )i + 1); if ((int )i < (int )words) { goto ldv_55679; } else { } ldv_55678: ixgbe_standby_eeprom(hw); usleep_range(10000UL, 20000UL); i = (u16 )((int )i + 1); ldv_55681: ; if ((int )i < (int )words) { goto ldv_55680; } else { } ixgbe_release_eeprom(hw); return (0); } } s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw , u16 offset , u16 data ) { s32 tmp ; { (*(hw->eeprom.ops.init_params))(hw); if ((int )hw->eeprom.word_size <= (int )offset) { return (-1); } else { } tmp = ixgbe_write_eeprom_buffer_bit_bang(hw, (int )offset, 1, & data); return (tmp); } } s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) { s32 status ; u16 i ; u16 count ; { (*(hw->eeprom.ops.init_params))(hw); if ((unsigned int )words == 0U) { return (-32); } else { } if ((int )offset + (int )words > (int )hw->eeprom.word_size) { return (-1); } else { } i = 0U; goto ldv_55698; ldv_55697: count = (u16 )(512 < (int )words - (int )i ? 512 : (int )words - (int )i); status = ixgbe_read_eeprom_buffer_bit_bang(hw, (int )offset + (int )i, (int )count, data + (unsigned long )i); if (status != 0) { return (status); } else { } i = (unsigned int )i + 512U; ldv_55698: ; if ((int )i < (int )words) { goto ldv_55697; } else { } return (0); } } static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) { s32 status ; u16 word_in ; u8 read_opcode ; u16 i ; s32 tmp ; { read_opcode = 3U; status = ixgbe_acquire_eeprom(hw); if (status != 0) { return (status); } else { } tmp = ixgbe_ready_eeprom(hw); if (tmp != 0) { ixgbe_release_eeprom(hw); return (-1); } else { } i = 0U; goto ldv_55711; ldv_55710: ixgbe_standby_eeprom(hw); if ((unsigned int )hw->eeprom.address_bits == 8U && (int )offset + (int )i > 127) { read_opcode = (u8 )((unsigned int )read_opcode | 8U); } else { } ixgbe_shift_out_eeprom_bits(hw, (int )read_opcode, 8); ixgbe_shift_out_eeprom_bits(hw, (int )((unsigned int )((unsigned short )((int )offset + (int )i)) * 2U), (int )hw->eeprom.address_bits); word_in = ixgbe_shift_in_eeprom_bits(hw, 16); *(data + (unsigned long )i) = (u16 )((int )((short )((int )word_in >> 8)) | (int )((short )((int )word_in << 8))); i = (u16 )((int )i + 1); ldv_55711: ; if ((int )i < (int )words) { goto ldv_55710; } else { } ixgbe_release_eeprom(hw); return (0); } } s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw , u16 offset , u16 *data ) { s32 tmp ; { (*(hw->eeprom.ops.init_params))(hw); if ((int )hw->eeprom.word_size <= (int )offset) { return (-1); } else { } tmp = ixgbe_read_eeprom_buffer_bit_bang(hw, (int )offset, 1, data); return (tmp); } } s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) { u32 eerd ; s32 status ; u32 i ; u32 tmp ; struct _ddebug descriptor ; long tmp___0 ; { (*(hw->eeprom.ops.init_params))(hw); if ((unsigned int )words == 0U) { return (-32); } else { } if ((int )hw->eeprom.word_size <= (int )offset) { return (-1); } else { } i = 0U; goto ldv_55730; ldv_55729: eerd = (((u32 )offset + i) << 2) | 1U; ixgbe_write_reg(hw, 65556U, eerd); status = ixgbe_poll_eerd_eewr_done(hw, 0U); if (status == 0) { tmp = ixgbe_read_reg(hw, 65556U); *(data + (unsigned long )i) = (u16 )(tmp >> 16); } else { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_read_eerd_buffer_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "Eeprom read timed out\n"; descriptor.lineno = 1129U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Eeprom read timed out\n"); } else { } return (status); } i = i + 1U; ldv_55730: ; if ((u32 )words > i) { goto ldv_55729; } else { } return (0); } } static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw , u16 offset ) { u16 data[128U] ; s32 status ; u16 i ; struct _ddebug descriptor ; long tmp ; { i = 0U; goto ldv_55740; ldv_55739: data[(int )i] = i; i = (u16 )((int )i + 1); ldv_55740: ; if ((unsigned int )i <= 127U) { goto ldv_55739; } else { } hw->eeprom.word_page_size = 128U; status = ixgbe_write_eeprom_buffer_bit_bang(hw, (int )offset, 128, (u16 *)(& data)); hw->eeprom.word_page_size = 0U; if (status != 0) { return (status); } else { } status = ixgbe_read_eeprom_buffer_bit_bang(hw, (int )offset, 1, (u16 *)(& data)); if (status != 0) { return (status); } else { } hw->eeprom.word_page_size = 128U - (unsigned int )data[0]; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_detect_eeprom_page_size_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "Detected EEPROM page size = %d words.\n"; descriptor.lineno = 1174U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Detected EEPROM page size = %d words.\n", (int )hw->eeprom.word_page_size); } else { } return (0); } } s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw , u16 offset , u16 *data ) { s32 tmp ; { tmp = ixgbe_read_eerd_buffer_generic(hw, (int )offset, 1, data); return (tmp); } } s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) { u32 eewr ; s32 status ; u16 i ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; { (*(hw->eeprom.ops.init_params))(hw); if ((unsigned int )words == 0U) { return (-32); } else { } if ((int )hw->eeprom.word_size <= (int )offset) { return (-1); } else { } i = 0U; goto ldv_55762; ldv_55761: eewr = (u32 )(((((int )offset + (int )i) << 2) | ((int )*(data + (unsigned long )i) << 16)) | 1); status = ixgbe_poll_eerd_eewr_done(hw, 1U); if (status != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_write_eewr_buffer_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "Eeprom write EEWR timed out\n"; descriptor.lineno = 1222U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Eeprom write EEWR timed out\n"); } else { } return (status); } else { } ixgbe_write_reg(hw, 65560U, eewr); status = ixgbe_poll_eerd_eewr_done(hw, 1U); if (status != 0) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_write_eewr_buffer_generic"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___0.format = "Eeprom write EEWR timed out\n"; descriptor___0.lineno = 1230U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Eeprom write EEWR timed out\n"); } else { } return (status); } else { } i = (u16 )((int )i + 1); ldv_55762: ; if ((int )i < (int )words) { goto ldv_55761; } else { } return (0); } } s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw , u16 offset , u16 data ) { s32 tmp ; { tmp = ixgbe_write_eewr_buffer_generic(hw, (int )offset, 1, & data); return (tmp); } } static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw , u32 ee_reg ) { u32 i ; u32 reg ; { i = 0U; goto ldv_55776; ldv_55775: ; if (ee_reg == 0U) { reg = ixgbe_read_reg(hw, 65556U); } else { reg = ixgbe_read_reg(hw, 65560U); } if ((reg & 2U) != 0U) { return (0); } else { } __const_udelay(21475UL); i = i + 1U; ldv_55776: ; if (i <= 99999U) { goto ldv_55775; } else { } return (-1); } } static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw ) { u32 eec ; u32 i ; s32 tmp ; struct _ddebug descriptor ; long tmp___0 ; { tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, 1U); if (tmp != 0) { return (-16); } else { } eec = ixgbe_read_reg(hw, *(hw->mvals)); eec = eec | 64U; ixgbe_write_reg(hw, *(hw->mvals), eec); i = 0U; goto ldv_55785; ldv_55784: eec = ixgbe_read_reg(hw, *(hw->mvals)); if ((eec & 128U) != 0U) { goto ldv_55783; } else { } __const_udelay(21475UL); i = i + 1U; ldv_55785: ; if (i <= 999U) { goto ldv_55784; } else { } ldv_55783: ; if ((eec & 128U) == 0U) { eec = eec & 4294967231U; ixgbe_write_reg(hw, *(hw->mvals), eec); descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_acquire_eeprom"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "Could not acquire EEPROM grant\n"; descriptor.lineno = 1310U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Could not acquire EEPROM grant\n"); } else { } (*(hw->mac.ops.release_swfw_sync))(hw, 1U); return (-1); } else { } eec = eec & 4294967292U; ixgbe_write_reg(hw, *(hw->mvals), eec); ixgbe_read_reg(hw, 8U); __const_udelay(4295UL); return (0); } } static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw ) { u32 timeout ; u32 i ; u32 swsm ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; { timeout = 2000U; i = 0U; goto ldv_55796; ldv_55795: swsm = ixgbe_read_reg(hw, *(hw->mvals + 5UL)); if ((swsm & 1U) == 0U) { goto ldv_55794; } else { } usleep_range(50UL, 100UL); i = i + 1U; ldv_55796: ; if (i < timeout) { goto ldv_55795; } else { } ldv_55794: ; if (i == timeout) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_get_eeprom_semaphore"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "Driver can\'t access the Eeprom - SMBI Semaphore not granted.\n"; descriptor.lineno = 1350U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Driver can\'t access the Eeprom - SMBI Semaphore not granted.\n"); } else { } ixgbe_release_eeprom_semaphore(hw); usleep_range(50UL, 100UL); swsm = ixgbe_read_reg(hw, *(hw->mvals + 5UL)); if ((int )swsm & 1) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_get_eeprom_semaphore"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___0.format = "Software semaphore SMBI between device drivers not granted.\n"; descriptor___0.lineno = 1365U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Software semaphore SMBI between device drivers not granted.\n"); } else { } return (-1); } else { } } else { } i = 0U; goto ldv_55802; ldv_55801: swsm = ixgbe_read_reg(hw, *(hw->mvals + 5UL)); swsm = swsm | 2U; ixgbe_write_reg(hw, *(hw->mvals + 5UL), swsm); swsm = ixgbe_read_reg(hw, *(hw->mvals + 5UL)); if ((swsm & 2U) != 0U) { goto ldv_55800; } else { } usleep_range(50UL, 100UL); i = i + 1U; ldv_55802: ; if (i < timeout) { goto ldv_55801; } else { } ldv_55800: ; if (i >= timeout) { descriptor___1.modname = "ixgbe"; descriptor___1.function = "ixgbe_get_eeprom_semaphore"; descriptor___1.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___1.format = "SWESMBI Software EEPROM semaphore not granted.\n"; descriptor___1.lineno = 1392U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "SWESMBI Software EEPROM semaphore not granted.\n"); } else { } ixgbe_release_eeprom_semaphore(hw); return (-1); } else { } return (0); } } static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw ) { u32 swsm ; { swsm = ixgbe_read_reg(hw, *(hw->mvals + 5UL)); swsm = swsm & 4294967292U; ixgbe_write_reg(hw, *(hw->mvals + 5UL), swsm); ixgbe_read_reg(hw, 8U); return; } } static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw ) { u16 i ; u8 spi_stat_reg ; u16 tmp ; struct _ddebug descriptor ; long tmp___0 ; { i = 0U; goto ldv_55815; ldv_55814: ixgbe_shift_out_eeprom_bits(hw, 5, 8); tmp = ixgbe_shift_in_eeprom_bits(hw, 8); spi_stat_reg = (unsigned char )tmp; if (((int )spi_stat_reg & 1) == 0) { goto ldv_55813; } else { } __const_udelay(21475UL); ixgbe_standby_eeprom(hw); i = (unsigned int )i + 5U; ldv_55815: ; if ((unsigned int )i <= 4999U) { goto ldv_55814; } else { } ldv_55813: ; if ((unsigned int )i > 4999U) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_ready_eeprom"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "SPI EEPROM Status error\n"; descriptor.lineno = 1449U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "SPI EEPROM Status error\n"); } else { } return (-1); } else { } return (0); } } static void ixgbe_standby_eeprom(struct ixgbe_hw *hw ) { u32 eec ; { eec = ixgbe_read_reg(hw, *(hw->mvals)); eec = eec | 2U; ixgbe_write_reg(hw, *(hw->mvals), eec); ixgbe_read_reg(hw, 8U); __const_udelay(4295UL); eec = eec & 4294967293U; ixgbe_write_reg(hw, *(hw->mvals), eec); ixgbe_read_reg(hw, 8U); __const_udelay(4295UL); return; } } static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw , u16 data , u16 count ) { u32 eec ; u32 mask ; u32 i ; { eec = ixgbe_read_reg(hw, *(hw->mvals)); mask = (u32 )(1 << ((int )count + -1)); i = 0U; goto ldv_55831; ldv_55830: ; if (((u32 )data & mask) != 0U) { eec = eec | 4U; } else { eec = eec & 4294967291U; } ixgbe_write_reg(hw, *(hw->mvals), eec); ixgbe_read_reg(hw, 8U); __const_udelay(4295UL); ixgbe_raise_eeprom_clk(hw, & eec); ixgbe_lower_eeprom_clk(hw, & eec); mask = mask >> 1; i = i + 1U; ldv_55831: ; if ((u32 )count > i) { goto ldv_55830; } else { } eec = eec & 4294967291U; ixgbe_write_reg(hw, *(hw->mvals), eec); ixgbe_read_reg(hw, 8U); return; } } static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw , u16 count ) { u32 eec ; u32 i ; u16 data ; { data = 0U; eec = ixgbe_read_reg(hw, *(hw->mvals)); eec = eec & 4294967283U; i = 0U; goto ldv_55841; ldv_55840: data = (int )data << 1U; ixgbe_raise_eeprom_clk(hw, & eec); eec = ixgbe_read_reg(hw, *(hw->mvals)); eec = eec & 4294967291U; if ((eec & 8U) != 0U) { data = (u16 )((unsigned int )data | 1U); } else { } ixgbe_lower_eeprom_clk(hw, & eec); i = i + 1U; ldv_55841: ; if ((u32 )count > i) { goto ldv_55840; } else { } return (data); } } static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw , u32 *eec ) { { *eec = *eec | 1U; ixgbe_write_reg(hw, *(hw->mvals), *eec); ixgbe_read_reg(hw, 8U); __const_udelay(4295UL); return; } } static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw , u32 *eec ) { { *eec = *eec & 4294967294U; ixgbe_write_reg(hw, *(hw->mvals), *eec); ixgbe_read_reg(hw, 8U); __const_udelay(4295UL); return; } } static void ixgbe_release_eeprom(struct ixgbe_hw *hw ) { u32 eec ; { eec = ixgbe_read_reg(hw, *(hw->mvals)); eec = eec | 2U; eec = eec & 4294967294U; ixgbe_write_reg(hw, *(hw->mvals), eec); ixgbe_read_reg(hw, 8U); __const_udelay(4295UL); eec = eec & 4294967231U; ixgbe_write_reg(hw, *(hw->mvals), eec); (*(hw->mac.ops.release_swfw_sync))(hw, 1U); usleep_range((unsigned long )(hw->eeprom.semaphore_delay * 1000U), (unsigned long )(hw->eeprom.semaphore_delay * 2000U)); return; } } s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw ) { u16 i ; u16 j ; u16 checksum ; u16 length ; u16 pointer ; u16 word ; struct _ddebug descriptor ; long tmp ; s32 tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; s32 tmp___2 ; struct _ddebug descriptor___1 ; long tmp___3 ; s32 tmp___4 ; struct _ddebug descriptor___2 ; long tmp___5 ; s32 tmp___6 ; { checksum = 0U; length = 0U; pointer = 0U; word = 0U; i = 0U; goto ldv_55868; ldv_55867: tmp___0 = (*(hw->eeprom.ops.read))(hw, (int )i, & word); if (tmp___0 != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_calc_eeprom_checksum_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "EEPROM read failed\n"; descriptor.lineno = 1651U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } goto ldv_55866; } else { } checksum = (int )checksum + (int )word; i = (u16 )((int )i + 1); ldv_55868: ; if ((unsigned int )i <= 62U) { goto ldv_55867; } else { } ldv_55866: i = 3U; goto ldv_55877; ldv_55876: tmp___2 = (*(hw->eeprom.ops.read))(hw, (int )i, & pointer); if (tmp___2 != 0) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_calc_eeprom_checksum_generic"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___0.format = "EEPROM read failed\n"; descriptor___0.lineno = 1660U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } return (-1); } else { } if ((unsigned int )pointer == 65535U || (unsigned int )pointer == 0U) { goto ldv_55870; } else { } tmp___4 = (*(hw->eeprom.ops.read))(hw, (int )pointer, & length); if (tmp___4 != 0) { descriptor___1.modname = "ixgbe"; descriptor___1.function = "ixgbe_calc_eeprom_checksum_generic"; descriptor___1.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___1.format = "EEPROM read failed\n"; descriptor___1.lineno = 1669U; descriptor___1.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } return (-1); } else { } if ((unsigned int )length == 65535U || (unsigned int )length == 0U) { goto ldv_55870; } else { } j = (unsigned int )pointer + 1U; goto ldv_55874; ldv_55873: tmp___6 = (*(hw->eeprom.ops.read))(hw, (int )j, & word); if (tmp___6 != 0) { descriptor___2.modname = "ixgbe"; descriptor___2.function = "ixgbe_calc_eeprom_checksum_generic"; descriptor___2.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___2.format = "EEPROM read failed\n"; descriptor___2.lineno = 1678U; descriptor___2.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___5 != 0L) { __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } return (-1); } else { } checksum = (int )checksum + (int )word; j = (u16 )((int )j + 1); ldv_55874: ; if ((int )j <= (int )pointer + (int )length) { goto ldv_55873; } else { } ldv_55870: i = (u16 )((int )i + 1); ldv_55877: ; if ((unsigned int )i <= 14U) { goto ldv_55876; } else { } checksum = 47802U - (unsigned int )checksum; return ((s32 )checksum); } } s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw , u16 *checksum_val ) { s32 status ; u16 checksum ; u16 read_checksum ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; { read_checksum = 0U; status = (*(hw->eeprom.ops.read))(hw, 0, & checksum); if (status != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_validate_eeprom_checksum_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "EEPROM read failed\n"; descriptor.lineno = 1712U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } return (status); } else { } status = (*(hw->eeprom.ops.calc_checksum))(hw); if (status < 0) { return (status); } else { } checksum = (unsigned short )status; status = (*(hw->eeprom.ops.read))(hw, 63, & read_checksum); if (status != 0) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_validate_eeprom_checksum_generic"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___0.format = "EEPROM read failed\n"; descriptor___0.lineno = 1724U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } return (status); } else { } if ((int )read_checksum != (int )checksum) { status = -2; } else { } if ((unsigned long )checksum_val != (unsigned long )((u16 *)0U)) { *checksum_val = checksum; } else { } return (status); } } s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw ) { s32 status ; u16 checksum ; struct _ddebug descriptor ; long tmp ; { status = (*(hw->eeprom.ops.read))(hw, 0, & checksum); if (status != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_update_eeprom_checksum_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "EEPROM read failed\n"; descriptor.lineno = 1757U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } return (status); } else { } status = (*(hw->eeprom.ops.calc_checksum))(hw); if (status < 0) { return (status); } else { } checksum = (unsigned short )status; status = (*(hw->eeprom.ops.write))(hw, 63, (int )checksum); return (status); } } s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw , u32 index , u8 *addr , u32 vmdq , u32 enable_addr ) { u32 rar_low ; u32 rar_high ; u32 rar_entries ; struct _ddebug descriptor ; long tmp ; { rar_entries = hw->mac.num_rar_entries; if (index >= rar_entries) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_set_rar_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "RAR index %d is out of range.\n"; descriptor.lineno = 1790U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "RAR index %d is out of range.\n", index); } else { } return (-32); } else { } (*(hw->mac.ops.set_vmdq))(hw, index, vmdq); rar_low = (((unsigned int )*addr | ((unsigned int )*(addr + 1UL) << 8)) | ((unsigned int )*(addr + 2UL) << 16)) | ((unsigned int )*(addr + 3UL) << 24); rar_high = ixgbe_read_reg(hw, index <= 15U ? index * 8U + 21508U : index * 8U + 41476U); rar_high = rar_high & 2147418112U; rar_high = ((unsigned int )*(addr + 4UL) | ((unsigned int )*(addr + 5UL) << 8)) | rar_high; if (enable_addr != 0U) { rar_high = rar_high | 2147483648U; } else { } ixgbe_write_reg(hw, index <= 15U ? (index + 2688U) * 8U : (index + 5184U) * 8U, rar_low); ixgbe_write_reg(hw, index <= 15U ? index * 8U + 21508U : index * 8U + 41476U, rar_high); return (0); } } s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw , u32 index ) { u32 rar_high ; u32 rar_entries ; struct _ddebug descriptor ; long tmp ; { rar_entries = hw->mac.num_rar_entries; if (index >= rar_entries) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_clear_rar_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "RAR index %d is out of range.\n"; descriptor.lineno = 1837U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "RAR index %d is out of range.\n", index); } else { } return (-32); } else { } rar_high = ixgbe_read_reg(hw, index <= 15U ? index * 8U + 21508U : index * 8U + 41476U); rar_high = rar_high & 2147418112U; ixgbe_write_reg(hw, index <= 15U ? (index + 2688U) * 8U : (index + 5184U) * 8U, 0U); ixgbe_write_reg(hw, index <= 15U ? index * 8U + 21508U : index * 8U + 41476U, rar_high); (*(hw->mac.ops.clear_vmdq))(hw, index, 4294967295U); return (0); } } s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw ) { u32 i ; u32 rar_entries ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; bool tmp___2 ; int tmp___3 ; struct _ddebug descriptor___2 ; long tmp___4 ; struct _ddebug descriptor___3 ; long tmp___5 ; { rar_entries = hw->mac.num_rar_entries; tmp___2 = is_valid_ether_addr((u8 const *)(& hw->mac.addr)); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { (*(hw->mac.ops.get_mac_addr))(hw, (u8 *)(& hw->mac.addr)); descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_init_rx_addrs_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = " Keeping Current RAR0 Addr =%pM\n"; descriptor.lineno = 1880U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " Keeping Current RAR0 Addr =%pM\n", (u8 *)(& hw->mac.addr)); } else { } } else { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_init_rx_addrs_generic"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___0.format = "Overriding MAC Address in RAR[0]\n"; descriptor___0.lineno = 1883U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Overriding MAC Address in RAR[0]\n"); } else { } descriptor___1.modname = "ixgbe"; descriptor___1.function = "ixgbe_init_rx_addrs_generic"; descriptor___1.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___1.format = " New MAC Addr =%pM\n"; descriptor___1.lineno = 1884U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " New MAC Addr =%pM\n", (u8 *)(& hw->mac.addr)); } else { } (*(hw->mac.ops.set_rar))(hw, 0U, (u8 *)(& hw->mac.addr), 0U, 2147483648U); (*(hw->mac.ops.clear_vmdq))(hw, 0U, 4294967295U); } hw->addr_ctrl.overflow_promisc = 0U; hw->addr_ctrl.rar_used_count = 1U; descriptor___2.modname = "ixgbe"; descriptor___2.function = "ixgbe_init_rx_addrs_generic"; descriptor___2.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___2.format = "Clearing RAR[1-%d]\n"; descriptor___2.lineno = 1896U; descriptor___2.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Clearing RAR[1-%d]\n", rar_entries - 1U); } else { } i = 1U; goto ldv_55927; ldv_55926: ixgbe_write_reg(hw, i <= 15U ? (i + 2688U) * 8U : (i + 5184U) * 8U, 0U); ixgbe_write_reg(hw, i <= 15U ? i * 8U + 21508U : i * 8U + 41476U, 0U); i = i + 1U; ldv_55927: ; if (i < rar_entries) { goto ldv_55926; } else { } hw->addr_ctrl.mta_in_use = 0U; ixgbe_write_reg(hw, 20624U, (u32 )hw->mac.mc_filter_type); descriptor___3.modname = "ixgbe"; descriptor___3.function = "ixgbe_init_rx_addrs_generic"; descriptor___3.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___3.format = " Clearing MTA\n"; descriptor___3.lineno = 1906U; descriptor___3.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___5 != 0L) { __dynamic_netdev_dbg(& descriptor___3, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " Clearing MTA\n"); } else { } i = 0U; goto ldv_55931; ldv_55930: ixgbe_write_reg(hw, (i + 5248U) * 4U, 0U); i = i + 1U; ldv_55931: ; if (hw->mac.mcft_size > i) { goto ldv_55930; } else { } if ((unsigned long )hw->mac.ops.init_uta_tables != (unsigned long )((s32 (*)(struct ixgbe_hw * ))0)) { (*(hw->mac.ops.init_uta_tables))(hw); } else { } return (0); } } static s32 ixgbe_mta_vector(struct ixgbe_hw *hw , u8 *mc_addr ) { u32 vector ; struct _ddebug descriptor ; long tmp ; { vector = 0U; switch (hw->mac.mc_filter_type) { case 0: vector = (u32 )(((int )*(mc_addr + 4UL) >> 4) | ((int )*(mc_addr + 5UL) << 4)); goto ldv_55939; case 1: vector = (u32 )(((int )*(mc_addr + 4UL) >> 3) | ((int )*(mc_addr + 5UL) << 5)); goto ldv_55939; case 2: vector = (u32 )(((int )*(mc_addr + 4UL) >> 2) | ((int )*(mc_addr + 5UL) << 6)); goto ldv_55939; case 3: vector = (u32 )((int )*(mc_addr + 4UL) | ((int )*(mc_addr + 5UL) << 8)); goto ldv_55939; default: descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_mta_vector"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "MC filter type param set incorrectly\n"; descriptor.lineno = 1946U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "MC filter type param set incorrectly\n"); } else { } goto ldv_55939; } ldv_55939: vector = vector & 4095U; return ((s32 )vector); } } static void ixgbe_set_mta(struct ixgbe_hw *hw , u8 *mc_addr ) { u32 vector ; u32 vector_bit ; u32 vector_reg ; s32 tmp ; struct _ddebug descriptor ; long tmp___0 ; { hw->addr_ctrl.mta_in_use = hw->addr_ctrl.mta_in_use + 1U; tmp = ixgbe_mta_vector(hw, mc_addr); vector = (u32 )tmp; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_set_mta"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = " bit-vector = 0x%03X\n"; descriptor.lineno = 1971U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " bit-vector = 0x%03X\n", vector); } else { } vector_reg = (vector >> 5) & 127U; vector_bit = vector & 31U; hw->mac.mta_shadow[vector_reg] = hw->mac.mta_shadow[vector_reg] | (u32 )(1 << (int )vector_bit); return; } } s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw , struct net_device *netdev ) { struct netdev_hw_addr *ha ; u32 i ; struct _ddebug descriptor ; long tmp ; struct list_head const *__mptr ; struct _ddebug descriptor___0 ; long tmp___0 ; struct list_head const *__mptr___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; { hw->addr_ctrl.num_mc_addrs = (u32 )netdev->mc.count; hw->addr_ctrl.mta_in_use = 0U; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_update_mc_addr_list_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = " Clearing MTA\n"; descriptor.lineno = 2011U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " Clearing MTA\n"); } else { } memset((void *)(& hw->mac.mta_shadow), 0, 512UL); __mptr = (struct list_head const *)netdev->mc.list.next; ha = (struct netdev_hw_addr *)__mptr; goto ldv_55969; ldv_55968: descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_update_mc_addr_list_generic"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___0.format = " Adding the multicast addresses:\n"; descriptor___0.lineno = 2016U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " Adding the multicast addresses:\n"); } else { } ixgbe_set_mta(hw, (u8 *)(& ha->addr)); __mptr___0 = (struct list_head const *)ha->list.next; ha = (struct netdev_hw_addr *)__mptr___0; ldv_55969: ; if ((unsigned long )(& ha->list) != (unsigned long )(& netdev->mc.list)) { goto ldv_55968; } else { } i = 0U; goto ldv_55972; ldv_55971: ixgbe_write_reg(hw, (i << 2) + 20992U, hw->mac.mta_shadow[i]); i = i + 1U; ldv_55972: ; if (hw->mac.mcft_size > i) { goto ldv_55971; } else { } if (hw->addr_ctrl.mta_in_use != 0U) { ixgbe_write_reg(hw, 20624U, (u32 )(hw->mac.mc_filter_type | 4)); } else { } descriptor___1.modname = "ixgbe"; descriptor___1.function = "ixgbe_update_mc_addr_list_generic"; descriptor___1.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___1.format = "ixgbe_update_mc_addr_list_generic Complete\n"; descriptor___1.lineno = 2029U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "ixgbe_update_mc_addr_list_generic Complete\n"); } else { } return (0); } } s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw ) { struct ixgbe_addr_filter_info *a ; { a = & hw->addr_ctrl; if (a->mta_in_use != 0U) { ixgbe_write_reg(hw, 20624U, (u32 )(hw->mac.mc_filter_type | 4)); } else { } return (0); } } s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw ) { struct ixgbe_addr_filter_info *a ; { a = & hw->addr_ctrl; if (a->mta_in_use != 0U) { ixgbe_write_reg(hw, 20624U, (u32 )hw->mac.mc_filter_type); } else { } return (0); } } s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw ) { u32 mflcn_reg ; u32 fccfg_reg ; u32 reg ; u32 fcrtl ; u32 fcrth ; int i ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; u32 tmp___1 ; { if ((unsigned int )hw->fc.pause_time == 0U) { return (-13); } else { } i = 0; goto ldv_55995; ldv_55994: ; if (((unsigned int )hw->fc.current_mode & 2U) != 0U && hw->fc.high_water[i] != 0U) { if (hw->fc.low_water[i] == 0U || hw->fc.low_water[i] >= hw->fc.high_water[i]) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_fc_enable_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "Invalid water mark configuration\n"; descriptor.lineno = 2089U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Invalid water mark configuration\n"); } else { } return (-13); } else { } } else { } i = i + 1; ldv_55995: ; if (i <= 7) { goto ldv_55994; } else { } ixgbe_fc_autoneg(hw); mflcn_reg = ixgbe_read_reg(hw, 17044U); mflcn_reg = mflcn_reg & 4294963203U; fccfg_reg = ixgbe_read_reg(hw, 15616U); fccfg_reg = fccfg_reg & 4294967271U; switch ((unsigned int )hw->fc.current_mode) { case 0U: ; goto ldv_55998; case 1U: mflcn_reg = mflcn_reg | 8U; goto ldv_55998; case 2U: fccfg_reg = fccfg_reg | 8U; goto ldv_55998; case 3U: mflcn_reg = mflcn_reg | 8U; fccfg_reg = fccfg_reg | 8U; goto ldv_55998; default: descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_fc_enable_generic"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___0.format = "Flow control param set incorrectly\n"; descriptor___0.lineno = 2146U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flow control param set incorrectly\n"); } else { } return (-4); } ldv_55998: mflcn_reg = mflcn_reg | 2U; ixgbe_write_reg(hw, 17044U, mflcn_reg); ixgbe_write_reg(hw, 15616U, fccfg_reg); i = 0; goto ldv_56005; ldv_56004: ; if (((unsigned int )hw->fc.current_mode & 2U) != 0U && hw->fc.high_water[i] != 0U) { fcrtl = (hw->fc.low_water[i] << 10) | 2147483648U; ixgbe_write_reg(hw, (u32 )((i + 3208) * 4), fcrtl); fcrth = (hw->fc.high_water[i] << 10) | 2147483648U; } else { ixgbe_write_reg(hw, (u32 )((i + 3208) * 4), 0U); tmp___1 = ixgbe_read_reg(hw, (u32 )((i + 3840) * 4)); fcrth = tmp___1 - 32U; } ixgbe_write_reg(hw, (u32 )((i + 3224) * 4), fcrth); i = i + 1; ldv_56005: ; if (i <= 7) { goto ldv_56004; } else { } reg = (u32 )((int )hw->fc.pause_time * 65537); i = 0; goto ldv_56008; ldv_56007: ixgbe_write_reg(hw, (u32 )((i + 3200) * 4), reg); i = i + 1; ldv_56008: ; if (i <= 3) { goto ldv_56007; } else { } ixgbe_write_reg(hw, 12960U, (unsigned int )hw->fc.pause_time / 2U); return (0); } } static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw , u32 adv_reg , u32 lp_reg , u32 adv_sym , u32 adv_asm , u32 lp_sym , u32 lp_asm ) { struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; struct _ddebug descriptor___2 ; long tmp___2 ; struct _ddebug descriptor___3 ; long tmp___3 ; { if (adv_reg == 0U || lp_reg == 0U) { return (-27); } else { } if ((adv_reg & adv_sym) != 0U && (lp_reg & lp_sym) != 0U) { if ((unsigned int )hw->fc.requested_mode == 3U) { hw->fc.current_mode = 3; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_negotiate_fc"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "Flow Control = FULL.\n"; descriptor.lineno = 2215U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flow Control = FULL.\n"); } else { } } else { hw->fc.current_mode = 1; descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_negotiate_fc"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___0.format = "Flow Control=RX PAUSE frames only\n"; descriptor___0.lineno = 2218U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flow Control=RX PAUSE frames only\n"); } else { } } } else if ((((adv_reg & adv_sym) == 0U && (adv_reg & adv_asm) != 0U) && (lp_reg & lp_sym) != 0U) && (lp_reg & lp_asm) != 0U) { hw->fc.current_mode = 2; descriptor___1.modname = "ixgbe"; descriptor___1.function = "ixgbe_negotiate_fc"; descriptor___1.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___1.format = "Flow Control = TX PAUSE frames only.\n"; descriptor___1.lineno = 2223U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flow Control = TX PAUSE frames only.\n"); } else { } } else if ((((adv_reg & adv_sym) != 0U && (adv_reg & adv_asm) != 0U) && (lp_reg & lp_sym) == 0U) && (lp_reg & lp_asm) != 0U) { hw->fc.current_mode = 1; descriptor___2.modname = "ixgbe"; descriptor___2.function = "ixgbe_negotiate_fc"; descriptor___2.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___2.format = "Flow Control = RX PAUSE frames only.\n"; descriptor___2.lineno = 2227U; descriptor___2.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flow Control = RX PAUSE frames only.\n"); } else { } } else { hw->fc.current_mode = 0; descriptor___3.modname = "ixgbe"; descriptor___3.function = "ixgbe_negotiate_fc"; descriptor___3.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___3.format = "Flow Control = NONE.\n"; descriptor___3.lineno = 2230U; descriptor___3.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_netdev_dbg(& descriptor___3, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flow Control = NONE.\n"); } else { } } return (0); } } static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw ) { u32 pcs_anadv_reg ; u32 pcs_lpab_reg ; u32 linkstat ; s32 ret_val ; { linkstat = ixgbe_read_reg(hw, 16908U); if ((linkstat & 65536U) == 0U || (linkstat & 262144U) != 0U) { return (-27); } else { } pcs_anadv_reg = ixgbe_read_reg(hw, 16920U); pcs_lpab_reg = ixgbe_read_reg(hw, 16924U); ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, pcs_lpab_reg, 128U, 256U, 128U, 256U); return (ret_val); } } static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw ) { u32 links2 ; u32 anlp1_reg ; u32 autoc_reg ; u32 links ; s32 ret_val ; { links = ixgbe_read_reg(hw, 17060U); if ((int )links >= 0) { return (-27); } else { } if ((unsigned int )hw->mac.type == 2U) { links2 = ixgbe_read_reg(hw, 17188U); if ((links2 & 64U) == 0U) { return (-27); } else { } } else { } autoc_reg = ixgbe_read_reg(hw, 17056U); anlp1_reg = ixgbe_read_reg(hw, 17072U); ret_val = ixgbe_negotiate_fc(hw, autoc_reg, anlp1_reg, 268435456U, 536870912U, 1024U, 2048U); return (ret_val); } } static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw ) { u16 technology_ability_reg ; u16 lp_technology_ability_reg ; s32 tmp ; { technology_ability_reg = 0U; lp_technology_ability_reg = 0U; (*(hw->phy.ops.read_reg))(hw, 16U, 7U, & technology_ability_reg); (*(hw->phy.ops.read_reg))(hw, 19U, 7U, & lp_technology_ability_reg); tmp = ixgbe_negotiate_fc(hw, (unsigned int )technology_ability_reg, (unsigned int )lp_technology_ability_reg, 1024U, 2048U, 1024U, 2048U); return (tmp); } } void ixgbe_fc_autoneg(struct ixgbe_hw *hw ) { s32 ret_val ; ixgbe_link_speed speed ; bool link_up ; bool tmp ; { ret_val = -27; if ((int )hw->fc.disable_fc_autoneg) { goto out; } else { } (*(hw->mac.ops.check_link))(hw, & speed, & link_up, 0); if (! link_up) { goto out; } else { } switch ((unsigned int )hw->phy.media_type) { case 1U: ; if (speed == 32U) { ret_val = ixgbe_fc_autoneg_fiber(hw); } else { } goto ldv_56053; case 5U: ret_val = ixgbe_fc_autoneg_backplane(hw); goto ldv_56053; case 4U: tmp = ixgbe_device_supports_autoneg_fc(hw); if ((int )tmp) { ret_val = ixgbe_fc_autoneg_copper(hw); } else { } goto ldv_56053; default: ; goto ldv_56053; } ldv_56053: ; out: ; if (ret_val == 0) { hw->fc.fc_was_autonegged = 1; } else { hw->fc.fc_was_autonegged = 0; hw->fc.current_mode = hw->fc.requested_mode; } return; } } static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw ) { s16 devctl2 ; u32 pollcnt ; u16 tmp ; { tmp = ixgbe_read_pci_cfg_word(hw, 200U); devctl2 = (s16 )tmp; devctl2 = (int )devctl2 & 15; switch ((int )devctl2) { case 6: pollcnt = 1300U; goto ldv_56063; case 9: pollcnt = 5200U; goto ldv_56063; case 10: pollcnt = 20000U; goto ldv_56063; case 13: pollcnt = 80000U; goto ldv_56063; case 14: pollcnt = 34000U; goto ldv_56063; case 1: ; case 2: ; case 5: ; case 0: ; default: pollcnt = 800U; goto ldv_56063; } ldv_56063: ; return ((pollcnt * 11U) / 10U); } } static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw ) { u32 i ; u32 poll ; u16 value ; u32 tmp ; bool tmp___0 ; u32 tmp___1 ; struct _ddebug descriptor ; long tmp___2 ; bool tmp___3 ; struct _ddebug descriptor___0 ; long tmp___4 ; { ixgbe_write_reg(hw, 0U, 4U); tmp = ixgbe_read_reg(hw, 8U); if ((tmp & 524288U) == 0U) { return (0); } else { tmp___0 = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp___0) { return (0); } else { } } i = 0U; goto ldv_56080; ldv_56079: __const_udelay(429500UL); tmp___1 = ixgbe_read_reg(hw, 8U); if ((tmp___1 & 524288U) == 0U) { return (0); } else { } i = i + 1U; ldv_56080: ; if (i <= 799U) { goto ldv_56079; } else { } descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_disable_pcie_master"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "GIO Master Disable bit didn\'t clear - requesting resets\n"; descriptor.lineno = 2476U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "GIO Master Disable bit didn\'t clear - requesting resets\n"); } else { } hw->mac.flags = (u8 )((unsigned int )hw->mac.flags | 1U); poll = ixgbe_pcie_timeout_poll(hw); i = 0U; goto ldv_56085; ldv_56084: __const_udelay(429500UL); value = ixgbe_read_pci_cfg_word(hw, 170U); tmp___3 = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp___3) { return (0); } else { } if (((int )value & 32) == 0) { return (0); } else { } i = i + 1U; ldv_56085: ; if (i < poll) { goto ldv_56084; } else { } descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_disable_pcie_master"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___0.format = "PCIe transaction pending bit also did not clear.\n"; descriptor___0.lineno = 2493U; descriptor___0.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "PCIe transaction pending bit also did not clear.\n"); } else { } return (-12); } } s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw , u32 mask ) { u32 gssr ; u32 swmask ; u32 fwmask ; u32 timeout ; u32 i ; s32 tmp ; { gssr = 0U; swmask = mask; fwmask = mask << 5; timeout = 200U; i = 0U; goto ldv_56098; ldv_56097: tmp = ixgbe_get_eeprom_semaphore(hw); if (tmp != 0) { return (-16); } else { } gssr = ixgbe_read_reg(hw, 65888U); if (((fwmask | swmask) & gssr) == 0U) { gssr = gssr | swmask; ixgbe_write_reg(hw, 65888U, gssr); ixgbe_release_eeprom_semaphore(hw); return (0); } else { ixgbe_release_eeprom_semaphore(hw); usleep_range(5000UL, 10000UL); } i = i + 1U; ldv_56098: ; if (i < timeout) { goto ldv_56097; } else { } if (((fwmask | swmask) & gssr) != 0U) { ixgbe_release_swfw_sync(hw, (fwmask | swmask) & gssr); } else { } usleep_range(5000UL, 10000UL); return (-16); } } void ixgbe_release_swfw_sync(struct ixgbe_hw *hw , u32 mask ) { u32 gssr ; u32 swmask ; { swmask = mask; ixgbe_get_eeprom_semaphore(hw); gssr = ixgbe_read_reg(hw, 65888U); gssr = ~ swmask & gssr; ixgbe_write_reg(hw, 65888U, gssr); ixgbe_release_eeprom_semaphore(hw); return; } } s32 prot_autoc_read_generic(struct ixgbe_hw *hw , bool *locked , u32 *reg_val ) { { *locked = 0; *reg_val = ixgbe_read_reg(hw, 17056U); return (0); } } s32 prot_autoc_write_generic(struct ixgbe_hw *hw , u32 reg_val , bool locked ) { { ixgbe_write_reg(hw, 17056U, reg_val); return (0); } } s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw ) { int i ; int secrxreg ; u32 tmp ; u32 tmp___0 ; struct _ddebug descriptor ; long tmp___1 ; { tmp = ixgbe_read_reg(hw, 36096U); secrxreg = (int )tmp; secrxreg = secrxreg | 2; ixgbe_write_reg(hw, 36096U, (u32 )secrxreg); i = 0; goto ldv_56123; ldv_56122: tmp___0 = ixgbe_read_reg(hw, 36100U); secrxreg = (int )tmp___0; if (secrxreg & 1) { goto ldv_56121; } else { __const_udelay(4295000UL); } i = i + 1; ldv_56123: ; if (i <= 39) { goto ldv_56122; } else { } ldv_56121: ; if (i > 39) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_disable_rx_buff_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "Rx unit being enabled before security path fully disabled. Continuing with init.\n"; descriptor.lineno = 2620U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Rx unit being enabled before security path fully disabled. Continuing with init.\n"); } else { } } else { } return (0); } } s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw ) { int secrxreg ; u32 tmp ; { tmp = ixgbe_read_reg(hw, 36096U); secrxreg = (int )tmp; secrxreg = secrxreg & -3; ixgbe_write_reg(hw, 36096U, (u32 )secrxreg); ixgbe_read_reg(hw, 8U); return (0); } } s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw , u32 regval ) { { if ((int )regval & 1) { (*(hw->mac.ops.enable_rx))(hw); } else { (*(hw->mac.ops.disable_rx))(hw); } return (0); } } s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw , u32 index ) { ixgbe_link_speed speed ; bool link_up ; u32 autoc_reg ; u32 tmp ; u32 led_reg ; u32 tmp___0 ; bool locked ; s32 ret_val ; { speed = 0U; link_up = 0; tmp = ixgbe_read_reg(hw, 17056U); autoc_reg = tmp; tmp___0 = ixgbe_read_reg(hw, 512U); led_reg = tmp___0; locked = 0; (*(hw->mac.ops.check_link))(hw, & speed, & link_up, 0); if (! link_up) { ret_val = (*(hw->mac.ops.prot_autoc_read))(hw, & locked, & autoc_reg); if (ret_val != 0) { return (ret_val); } else { } autoc_reg = autoc_reg | 4096U; autoc_reg = autoc_reg | 1U; ret_val = (*(hw->mac.ops.prot_autoc_write))(hw, autoc_reg, (int )locked); if (ret_val != 0) { return (ret_val); } else { } ixgbe_read_reg(hw, 8U); usleep_range(10000UL, 20000UL); } else { } led_reg = (u32 )(~ (15 << (int )(index * 8U))) & led_reg; led_reg = (u32 )(128 << (int )(index * 8U)) | led_reg; ixgbe_write_reg(hw, 512U, led_reg); ixgbe_read_reg(hw, 8U); return (0); } } s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw , u32 index ) { u32 autoc_reg ; u32 led_reg ; u32 tmp ; bool locked ; s32 ret_val ; { autoc_reg = 0U; tmp = ixgbe_read_reg(hw, 512U); led_reg = tmp; locked = 0; ret_val = (*(hw->mac.ops.prot_autoc_read))(hw, & locked, & autoc_reg); if (ret_val != 0) { return (ret_val); } else { } autoc_reg = autoc_reg & 4294967294U; autoc_reg = autoc_reg | 4096U; ret_val = (*(hw->mac.ops.prot_autoc_write))(hw, autoc_reg, (int )locked); if (ret_val != 0) { return (ret_val); } else { } led_reg = (u32 )(~ (15 << (int )(index * 8U))) & led_reg; led_reg = (u32 )(~ (128 << (int )(index * 8U))) & led_reg; led_reg = (u32 )(4 << (int )(index * 8U)) | led_reg; ixgbe_write_reg(hw, 512U, led_reg); ixgbe_read_reg(hw, 8U); return (0); } } static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw , u16 *san_mac_offset ) { s32 ret_val ; { ret_val = (*(hw->eeprom.ops.read))(hw, 40, san_mac_offset); if (ret_val != 0) { netdev_err((struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "eeprom read at offset %d failed\n", 40); } else { } return (ret_val); } } s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw , u8 *san_mac_addr ) { u16 san_mac_data ; u16 san_mac_offset ; u8 i ; s32 ret_val ; { ret_val = ixgbe_get_san_mac_addr_offset(hw, & san_mac_offset); if ((ret_val != 0 || (unsigned int )san_mac_offset == 0U) || (unsigned int )san_mac_offset == 65535U) { goto san_mac_addr_clr; } else { } (*(hw->mac.ops.set_lan_id))(hw); if ((unsigned int )hw->bus.func != 0U) { san_mac_offset = (unsigned int )san_mac_offset + 3U; } else { san_mac_offset = san_mac_offset; } i = 0U; goto ldv_56167; ldv_56166: ret_val = (*(hw->eeprom.ops.read))(hw, (int )san_mac_offset, & san_mac_data); if (ret_val != 0) { netdev_err((struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "eeprom read at offset %d failed\n", (int )san_mac_offset); goto san_mac_addr_clr; } else { } *(san_mac_addr + (unsigned long )((int )i * 2)) = (unsigned char )san_mac_data; *(san_mac_addr + ((unsigned long )((int )i * 2) + 1UL)) = (unsigned char )((int )san_mac_data >> 8); san_mac_offset = (u16 )((int )san_mac_offset + 1); i = (u8 )((int )i + 1); ldv_56167: ; if ((unsigned int )i <= 2U) { goto ldv_56166; } else { } return (0); san_mac_addr_clr: i = 0U; goto ldv_56170; ldv_56169: *(san_mac_addr + (unsigned long )i) = 255U; i = (u8 )((int )i + 1); ldv_56170: ; if ((unsigned int )i <= 5U) { goto ldv_56169; } else { } return (ret_val); } } u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw ) { u16 msix_count ; u16 max_msix_count ; u16 pcie_offset ; bool tmp ; { switch ((unsigned int )hw->mac.type) { case 1U: pcie_offset = 98U; max_msix_count = 19U; goto ldv_56179; case 2U: ; case 3U: ; case 4U: ; case 5U: pcie_offset = 114U; max_msix_count = 64U; goto ldv_56179; default: ; return (1U); } ldv_56179: msix_count = ixgbe_read_pci_cfg_word(hw, (u32 )pcie_offset); tmp = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp) { msix_count = 0U; } else { } msix_count = (unsigned int )msix_count & 2047U; msix_count = (u16 )((int )msix_count + 1); if ((int )msix_count > (int )max_msix_count) { msix_count = max_msix_count; } else { } return (msix_count); } } s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw , u32 rar , u32 vmdq ) { u32 mpsar_lo ; u32 mpsar_hi ; u32 rar_entries ; struct _ddebug descriptor ; long tmp ; bool tmp___0 ; { rar_entries = hw->mac.num_rar_entries; if (rar >= rar_entries) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_clear_vmdq_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "RAR index %d is out of range.\n"; descriptor.lineno = 2874U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "RAR index %d is out of range.\n", rar); } else { } return (-32); } else { } mpsar_lo = ixgbe_read_reg(hw, (rar + 5312U) * 8U); mpsar_hi = ixgbe_read_reg(hw, rar * 8U + 42500U); tmp___0 = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp___0) { return (0); } else { } if (mpsar_lo == 0U && mpsar_hi == 0U) { return (0); } else { } if (vmdq == 4294967295U) { if (mpsar_lo != 0U) { ixgbe_write_reg(hw, (rar + 5312U) * 8U, 0U); mpsar_lo = 0U; } else { } if (mpsar_hi != 0U) { ixgbe_write_reg(hw, rar * 8U + 42500U, 0U); mpsar_hi = 0U; } else { } } else if (vmdq <= 31U) { mpsar_lo = (u32 )(~ (1 << (int )vmdq)) & mpsar_lo; ixgbe_write_reg(hw, (rar + 5312U) * 8U, mpsar_lo); } else { mpsar_hi = (u32 )(~ (1 << (int )(vmdq - 32U))) & mpsar_hi; ixgbe_write_reg(hw, rar * 8U + 42500U, mpsar_hi); } if ((mpsar_lo == 0U && mpsar_hi == 0U) && rar != 0U) { (*(hw->mac.ops.clear_rar))(hw, rar); } else { } return (0); } } s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw , u32 rar , u32 vmdq ) { u32 mpsar ; u32 rar_entries ; struct _ddebug descriptor ; long tmp ; { rar_entries = hw->mac.num_rar_entries; if (rar >= rar_entries) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_set_vmdq_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "RAR index %d is out of range.\n"; descriptor.lineno = 2923U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "RAR index %d is out of range.\n", rar); } else { } return (-32); } else { } if (vmdq <= 31U) { mpsar = ixgbe_read_reg(hw, (rar + 5312U) * 8U); mpsar = (u32 )(1 << (int )vmdq) | mpsar; ixgbe_write_reg(hw, (rar + 5312U) * 8U, mpsar); } else { mpsar = ixgbe_read_reg(hw, rar * 8U + 42500U); mpsar = (u32 )(1 << (int )(vmdq - 32U)) | mpsar; ixgbe_write_reg(hw, rar * 8U + 42500U, mpsar); } return (0); } } s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw , u32 vmdq ) { u32 rar ; { rar = (u32 )hw->mac.san_mac_rar_index; if (vmdq <= 31U) { ixgbe_write_reg(hw, (rar + 5312U) * 8U, (u32 )(1 << (int )vmdq)); ixgbe_write_reg(hw, rar * 8U + 42500U, 0U); } else { ixgbe_write_reg(hw, (rar + 5312U) * 8U, 0U); ixgbe_write_reg(hw, rar * 8U + 42500U, (u32 )(1 << (int )(vmdq - 32U))); } return (0); } } s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw ) { int i ; { i = 0; goto ldv_56214; ldv_56213: ixgbe_write_reg(hw, (u32 )((i + 15616) * 4), 0U); i = i + 1; ldv_56214: ; if (i <= 127) { goto ldv_56213; } else { } return (0); } } static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw , u32 vlan ) { u32 bits ; u32 first_empty_slot ; s32 regindex ; struct _ddebug descriptor ; long tmp ; { bits = 0U; first_empty_slot = 0U; if (vlan == 0U) { return (0); } else { } regindex = 1; goto ldv_56225; ldv_56224: bits = ixgbe_read_reg(hw, (u32 )((regindex + 15424) * 4)); if (bits == 0U && first_empty_slot == 0U) { first_empty_slot = (u32 )regindex; } else if ((bits & 4095U) == vlan) { goto ldv_56223; } else { } regindex = regindex + 1; ldv_56225: ; if (regindex <= 63) { goto ldv_56224; } else { } ldv_56223: ; if (regindex > 63) { if (first_empty_slot != 0U) { regindex = (s32 )first_empty_slot; } else { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_find_vlvf_slot"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "No space in VLVF.\n"; descriptor.lineno = 3017U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "No space in VLVF.\n"); } else { } regindex = -25; } } else { } return (regindex); } } s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw , u32 vlan , u32 vind , bool vlan_on ) { s32 regindex ; u32 bitindex ; u32 vfta ; u32 bits ; u32 vt ; u32 targetbit ; bool vfta_changed ; s32 vlvf_index ; u32 tmp ; u32 tmp___0 ; { vfta_changed = 0; if (vlan > 4095U) { return (-5); } else { } regindex = (s32 )(vlan >> 5) & 127; bitindex = vlan & 31U; targetbit = (u32 )(1 << (int )bitindex); vfta = ixgbe_read_reg(hw, (u32 )((regindex + 10240) * 4)); if ((int )vlan_on) { if ((vfta & targetbit) == 0U) { vfta = vfta | targetbit; vfta_changed = 1; } else { } } else if ((vfta & targetbit) != 0U) { vfta = ~ targetbit & vfta; vfta_changed = 1; } else { } vt = ixgbe_read_reg(hw, 20912U); if ((int )vt & 1) { vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); if (vlvf_index < 0) { return (vlvf_index); } else { } if ((int )vlan_on) { if (vind <= 31U) { bits = ixgbe_read_reg(hw, (u32 )((vlvf_index + 7744) * 8)); bits = (u32 )(1 << (int )vind) | bits; ixgbe_write_reg(hw, (u32 )((vlvf_index + 7744) * 8), bits); } else { bits = ixgbe_read_reg(hw, (u32 )(vlvf_index * 8 + 61956)); bits = (u32 )(1 << (int )(vind - 32U)) | bits; ixgbe_write_reg(hw, (u32 )(vlvf_index * 8 + 61956), bits); } } else if (vind <= 31U) { bits = ixgbe_read_reg(hw, (u32 )((vlvf_index + 7744) * 8)); bits = (u32 )(~ (1 << (int )vind)) & bits; ixgbe_write_reg(hw, (u32 )((vlvf_index + 7744) * 8), bits); tmp = ixgbe_read_reg(hw, (u32 )(vlvf_index * 8 + 61956)); bits = tmp | bits; } else { bits = ixgbe_read_reg(hw, (u32 )(vlvf_index * 8 + 61956)); bits = (u32 )(~ (1 << (int )(vind - 32U))) & bits; ixgbe_write_reg(hw, (u32 )(vlvf_index * 8 + 61956), bits); tmp___0 = ixgbe_read_reg(hw, (u32 )((vlvf_index + 7744) * 8)); bits = tmp___0 | bits; } if (bits != 0U) { ixgbe_write_reg(hw, (u32 )((vlvf_index + 15424) * 4), vlan | 2147483648U); if (! vlan_on) { vfta_changed = 0; } else { } } else { ixgbe_write_reg(hw, (u32 )((vlvf_index + 15424) * 4), 0U); } } else { } if ((int )vfta_changed) { ixgbe_write_reg(hw, (u32 )((regindex + 10240) * 4), vfta); } else { } return (0); } } s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw ) { u32 offset ; { offset = 0U; goto ldv_56247; ldv_56246: ixgbe_write_reg(hw, (offset + 10240U) * 4U, 0U); offset = offset + 1U; ldv_56247: ; if (hw->mac.vft_size > offset) { goto ldv_56246; } else { } offset = 0U; goto ldv_56250; ldv_56249: ixgbe_write_reg(hw, (offset + 15424U) * 4U, 0U); ixgbe_write_reg(hw, (offset + 7744U) * 8U, 0U); ixgbe_write_reg(hw, offset * 8U + 61956U, 0U); offset = offset + 1U; ldv_56250: ; if (offset <= 63U) { goto ldv_56249; } else { } return (0); } } s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw , ixgbe_link_speed *speed , bool *link_up , bool link_up_wait_to_complete ) { u32 links_reg ; u32 links_orig ; u32 i ; struct _ddebug descriptor ; long tmp ; { links_orig = ixgbe_read_reg(hw, 17060U); links_reg = ixgbe_read_reg(hw, 17060U); if (links_orig != links_reg) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_check_mac_link_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "LINKS changed from %08X to %08X\n"; descriptor.lineno = 3212U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "LINKS changed from %08X to %08X\n", links_orig, links_reg); } else { } } else { } if ((int )link_up_wait_to_complete) { i = 0U; goto ldv_56265; ldv_56264: ; if ((links_reg & 1073741824U) != 0U) { *link_up = 1; goto ldv_56263; } else { *link_up = 0; } msleep(100U); links_reg = ixgbe_read_reg(hw, 17060U); i = i + 1U; ldv_56265: ; if (i <= 89U) { goto ldv_56264; } else { } ldv_56263: ; } else if ((links_reg & 1073741824U) != 0U) { *link_up = 1; } else { *link_up = 0; } switch (links_reg & 805306368U) { case 805306368U: ; if ((unsigned int )hw->mac.type > 3U && (links_reg & 134217728U) != 0U) { *speed = 1024U; } else { *speed = 128U; } goto ldv_56267; case 536870912U: *speed = 32U; goto ldv_56267; case 268435456U: ; if ((unsigned int )hw->mac.type > 3U && (links_reg & 134217728U) != 0U) { *speed = 2048U; } else { *speed = 8U; } goto ldv_56267; default: *speed = 0U; } ldv_56267: ; return (0); } } s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw , u16 *wwnn_prefix , u16 *wwpn_prefix ) { u16 offset ; u16 caps ; u16 alt_san_mac_blk_offset ; s32 tmp ; s32 tmp___0 ; s32 tmp___1 ; s32 tmp___2 ; { *wwnn_prefix = 65535U; *wwpn_prefix = 65535U; offset = 39U; tmp = (*(hw->eeprom.ops.read))(hw, (int )offset, & alt_san_mac_blk_offset); if (tmp != 0) { goto wwn_prefix_err; } else { } if ((unsigned int )alt_san_mac_blk_offset == 0U || (unsigned int )alt_san_mac_blk_offset == 65535U) { return (0); } else { } offset = alt_san_mac_blk_offset; tmp___0 = (*(hw->eeprom.ops.read))(hw, (int )offset, & caps); if (tmp___0 != 0) { goto wwn_prefix_err; } else { } if (((int )caps & 1) == 0) { return (0); } else { } offset = (unsigned int )alt_san_mac_blk_offset + 7U; tmp___1 = (*(hw->eeprom.ops.read))(hw, (int )offset, wwnn_prefix); if (tmp___1 != 0) { netdev_err((struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "eeprom read at offset %d failed\n", (int )offset); } else { } offset = (unsigned int )alt_san_mac_blk_offset + 8U; tmp___2 = (*(hw->eeprom.ops.read))(hw, (int )offset, wwpn_prefix); if (tmp___2 != 0) { goto wwn_prefix_err; } else { } return (0); wwn_prefix_err: netdev_err((struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "eeprom read at offset %d failed\n", (int )offset); return (0); } } void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw , bool enable , int pf ) { int j ; int pf_target_reg ; int pf_target_shift ; u32 pfvfspoof ; { pf_target_reg = pf >> 3; pf_target_shift = pf % 8; pfvfspoof = 0U; if ((unsigned int )hw->mac.type == 1U) { return; } else { } if ((int )enable) { pfvfspoof = 255U; } else { } j = 0; goto ldv_56290; ldv_56289: ixgbe_write_reg(hw, (u32 )((j + 8320) * 4), pfvfspoof); j = j + 1; ldv_56290: ; if (j < pf_target_reg) { goto ldv_56289; } else { } pfvfspoof = (u32 )((1 << pf_target_shift) + -1) & pfvfspoof; ixgbe_write_reg(hw, (u32 )((j + 8320) * 4), pfvfspoof); j = j + 1; goto ldv_56293; ldv_56292: ixgbe_write_reg(hw, (u32 )((j + 8320) * 4), 0U); j = j + 1; ldv_56293: ; if (j <= 7) { goto ldv_56292; } else { } return; } } void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw , bool enable , int vf ) { int vf_target_reg ; int vf_target_shift ; u32 pfvfspoof ; { vf_target_reg = vf >> 3; vf_target_shift = vf % 8 + 8; if ((unsigned int )hw->mac.type == 1U) { return; } else { } pfvfspoof = ixgbe_read_reg(hw, (u32 )((vf_target_reg + 8320) * 4)); if ((int )enable) { pfvfspoof = (u32 )(1 << vf_target_shift) | pfvfspoof; } else { pfvfspoof = (u32 )(~ (1 << vf_target_shift)) & pfvfspoof; } ixgbe_write_reg(hw, (u32 )((vf_target_reg + 8320) * 4), pfvfspoof); return; } } s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw , u16 *device_caps ) { { (*(hw->eeprom.ops.read))(hw, 44, device_caps); return (0); } } void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw , int num_pb , u32 headroom , int strategy ) { u32 pbsize ; int i ; u32 rxpktsize ; u32 txpktsize ; u32 txpbthresh ; { pbsize = hw->mac.rx_pb_size; i = 0; pbsize = pbsize - headroom; if (num_pb == 0) { num_pb = 1; } else { } switch (strategy) { case 1: rxpktsize = (pbsize * 10U) / (u32 )(num_pb * 8); pbsize = pbsize - (u32 )(num_pb / 2) * rxpktsize; rxpktsize = rxpktsize << 10; goto ldv_56320; ldv_56319: ixgbe_write_reg(hw, (u32 )((i + 3840) * 4), rxpktsize); i = i + 1; ldv_56320: ; if (num_pb / 2 > i) { goto ldv_56319; } else { } case 0: rxpktsize = pbsize / (u32 )(num_pb - i) << 10; goto ldv_56324; ldv_56323: ixgbe_write_reg(hw, (u32 )((i + 3840) * 4), rxpktsize); i = i + 1; ldv_56324: ; if (i < num_pb) { goto ldv_56323; } else { } goto ldv_56326; default: ; goto ldv_56326; } ldv_56326: txpktsize = (u32 )(163840 / num_pb); txpbthresh = txpktsize / 1024U - 10U; i = 0; goto ldv_56329; ldv_56328: ixgbe_write_reg(hw, (u32 )((i + 13056) * 4), txpktsize); ixgbe_write_reg(hw, (u32 )((i + 4692) * 4), txpbthresh); i = i + 1; ldv_56329: ; if (i < num_pb) { goto ldv_56328; } else { } goto ldv_56332; ldv_56331: ixgbe_write_reg(hw, (u32 )((i + 3840) * 4), 0U); ixgbe_write_reg(hw, (u32 )((i + 13056) * 4), 0U); ixgbe_write_reg(hw, (u32 )((i + 4692) * 4), 0U); i = i + 1; ldv_56332: ; if (i <= 7) { goto ldv_56331; } else { } return; } } static u8 ixgbe_calculate_checksum(u8 *buffer , u32 length ) { u32 i ; u8 sum ; { sum = 0U; if ((unsigned long )buffer == (unsigned long )((u8 *)0U)) { return (0U); } else { } i = 0U; goto ldv_56341; ldv_56340: sum = (int )*(buffer + (unsigned long )i) + (int )sum; i = i + 1U; ldv_56341: ; if (i < length) { goto ldv_56340; } else { } return (- ((int )sum)); } } s32 ixgbe_host_interface_command(struct ixgbe_hw *hw , u32 *buffer , u32 length , u32 timeout , bool return_data ) { u32 hicr ; u32 i ; u32 bi ; u32 fwsts ; u32 hdr_size ; u16 buf_len ; u16 dword_len ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; struct _ddebug descriptor___2 ; long tmp___2 ; u32 tmp___3 ; struct _ddebug descriptor___3 ; long tmp___4 ; { hdr_size = 4U; if (length == 0U || length > 1792U) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_host_interface_command"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor.format = "Buffer length failure buffersize-%d.\n"; descriptor.lineno = 3505U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Buffer length failure buffersize-%d.\n", length); } else { } return (-33); } else { } fwsts = ixgbe_read_reg(hw, 89868U); ixgbe_write_reg(hw, 89868U, fwsts | 512U); hicr = ixgbe_read_reg(hw, 89856U); if ((hicr & 1U) == 0U) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_host_interface_command"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___0.format = "IXGBE_HOST_EN bit disabled.\n"; descriptor___0.lineno = 3516U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "IXGBE_HOST_EN bit disabled.\n"); } else { } return (-33); } else { } if ((length & 3U) != 0U) { descriptor___1.modname = "ixgbe"; descriptor___1.function = "ixgbe_host_interface_command"; descriptor___1.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___1.format = "Buffer length failure, not aligned to dword"; descriptor___1.lineno = 3522U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Buffer length failure, not aligned to dword"); } else { } return (-32); } else { } dword_len = (u16 )(length >> 2); i = 0U; goto ldv_56362; ldv_56361: ixgbe_write_reg(hw, (i << 2) + 88064U, *(buffer + (unsigned long )i)); i = i + 1U; ldv_56362: ; if ((u32 )dword_len > i) { goto ldv_56361; } else { } ixgbe_write_reg(hw, 89856U, hicr | 2U); i = 0U; goto ldv_56366; ldv_56365: hicr = ixgbe_read_reg(hw, 89856U); if ((hicr & 2U) == 0U) { goto ldv_56364; } else { } usleep_range(1000UL, 2000UL); i = i + 1U; ldv_56366: ; if (i < timeout) { goto ldv_56365; } else { } ldv_56364: ; if (timeout != 0U && i == timeout) { goto _L; } else { tmp___3 = ixgbe_read_reg(hw, 89856U); if ((tmp___3 & 4U) == 0U) { _L: /* CIL Label */ descriptor___2.modname = "ixgbe"; descriptor___2.function = "ixgbe_host_interface_command"; descriptor___2.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___2.format = "Command has failed with no status valid.\n"; descriptor___2.lineno = 3549U; descriptor___2.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Command has failed with no status valid.\n"); } else { } return (-33); } else { } } if (! return_data) { return (0); } else { } dword_len = (u16 )(hdr_size >> 2); bi = 0U; goto ldv_56369; ldv_56368: *(buffer + (unsigned long )bi) = ixgbe_read_reg(hw, (bi << 2) + 88064U); bi = bi + 1U; ldv_56369: ; if ((u32 )dword_len > bi) { goto ldv_56368; } else { } buf_len = (u16 )((struct ixgbe_hic_hdr *)buffer)->buf_len; if ((unsigned int )buf_len == 0U) { return (0); } else { } if ((u32 )buf_len + hdr_size > length) { descriptor___3.modname = "ixgbe"; descriptor___3.function = "ixgbe_host_interface_command"; descriptor___3.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c"; descriptor___3.format = "Buffer not large enough for reply message.\n"; descriptor___3.lineno = 3571U; descriptor___3.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_netdev_dbg(& descriptor___3, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Buffer not large enough for reply message.\n"); } else { } return (-33); } else { } dword_len = (u16 )(((int )buf_len + 3) >> 2); goto ldv_56373; ldv_56372: *(buffer + (unsigned long )bi) = ixgbe_read_reg(hw, (bi << 2) + 88064U); bi = bi + 1U; ldv_56373: ; if ((u32 )dword_len >= bi) { goto ldv_56372; } else { } return (0); } } s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw , u8 maj , u8 min , u8 build , u8 sub ) { struct ixgbe_hic_drv_info fw_cmd ; int i ; s32 ret_val ; s32 tmp ; { tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, 1024U); if (tmp != 0) { return (-16); } else { } fw_cmd.hdr.cmd = 221U; fw_cmd.hdr.buf_len = 5U; fw_cmd.hdr.cmd_or_resp.cmd_resv = 0U; fw_cmd.port_num = (unsigned char )hw->bus.func; fw_cmd.ver_maj = maj; fw_cmd.ver_min = min; fw_cmd.ver_build = build; fw_cmd.ver_sub = sub; fw_cmd.hdr.checksum = 0U; fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)(& fw_cmd), (u32 )((int )fw_cmd.hdr.buf_len + 4)); fw_cmd.pad = 0U; fw_cmd.pad2 = 0U; i = 0; goto ldv_56388; ldv_56387: ret_val = ixgbe_host_interface_command(hw, (u32 *)(& fw_cmd), 12U, 500U, 1); if (ret_val != 0) { goto ldv_56385; } else { } if ((unsigned int )fw_cmd.hdr.cmd_or_resp.ret_status == 1U) { ret_val = 0; } else { ret_val = -33; } goto ldv_56386; ldv_56385: i = i + 1; ldv_56388: ; if (i <= 3) { goto ldv_56387; } else { } ldv_56386: (*(hw->mac.ops.release_swfw_sync))(hw, 1024U); return (ret_val); } } void ixgbe_clear_tx_pending(struct ixgbe_hw *hw ) { u32 gcr_ext ; u32 hlreg0 ; u32 i ; u32 poll ; u16 value ; bool tmp ; { if (((int )hw->mac.flags & 1) == 0) { return; } else { } hlreg0 = ixgbe_read_reg(hw, 16960U); ixgbe_write_reg(hw, 16960U, hlreg0 | 32768U); ixgbe_read_reg(hw, 8U); usleep_range(3000UL, 6000UL); poll = ixgbe_pcie_timeout_poll(hw); i = 0U; goto ldv_56399; ldv_56398: usleep_range(100UL, 200UL); value = ixgbe_read_pci_cfg_word(hw, 170U); tmp = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp) { goto ldv_56397; } else { } if (((int )value & 32) == 0) { goto ldv_56397; } else { } i = i + 1U; ldv_56399: ; if (i < poll) { goto ldv_56398; } else { } ldv_56397: gcr_ext = ixgbe_read_reg(hw, 69712U); ixgbe_write_reg(hw, 69712U, gcr_ext | 1073741824U); ixgbe_read_reg(hw, 8U); __const_udelay(85900UL); ixgbe_write_reg(hw, 69712U, gcr_ext); ixgbe_write_reg(hw, 16960U, hlreg0); return; } } static u8 const ixgbe_emc_temp_data[4U] = { 0U, 1U, 35U, 42U}; static u8 const ixgbe_emc_therm_limit[4U] = { 32U, 25U, 26U, 48U}; static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw , u16 *ets_cfg , u16 *ets_offset ) { s32 status ; { status = (*(hw->eeprom.ops.read))(hw, 38, ets_offset); if (status != 0) { return (status); } else { } if ((unsigned int )*ets_offset == 0U || (unsigned int )*ets_offset == 65535U) { return (2147483647); } else { } status = (*(hw->eeprom.ops.read))(hw, (int )*ets_offset, ets_cfg); if (status != 0) { return (status); } else { } if (((int )*ets_cfg & 56) != 0) { return (2147483647); } else { } return (0); } } s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw ) { s32 status ; u16 ets_offset ; u16 ets_cfg ; u16 ets_sensor ; u8 num_sensors ; u8 i ; struct ixgbe_thermal_sensor_data *data ; u32 tmp ; u8 sensor_index ; u8 sensor_location ; { data = & hw->mac.thermal_sensor_data; tmp = ixgbe_read_reg(hw, 8U); if ((tmp & 4U) != 0U) { return (2147483647); } else { } status = ixgbe_get_ets_data(hw, & ets_cfg, & ets_offset); if (status != 0) { return (status); } else { } num_sensors = (unsigned int )((u8 )ets_cfg) & 7U; if ((unsigned int )num_sensors > 3U) { num_sensors = 3U; } else { } i = 0U; goto ldv_56421; ldv_56420: status = (*(hw->eeprom.ops.read))(hw, (int )((unsigned int )((int )((u16 )i) + (int )ets_offset) + 1U), & ets_sensor); if (status != 0) { return (status); } else { } sensor_index = (u8 )(((int )ets_sensor & 768) >> 8); sensor_location = (u8 )(((int )ets_sensor & 15360) >> 10); if ((unsigned int )sensor_location != 0U) { status = (*(hw->phy.ops.read_i2c_byte))(hw, (int )ixgbe_emc_temp_data[(int )sensor_index], 248, & data->sensor[(int )i].temp); if (status != 0) { return (status); } else { } } else { } i = (u8 )((int )i + 1); ldv_56421: ; if ((int )i < (int )num_sensors) { goto ldv_56420; } else { } return (0); } } s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw ) { s32 status ; u16 ets_offset ; u16 ets_cfg ; u16 ets_sensor ; u8 low_thresh_delta ; u8 num_sensors ; u8 therm_limit ; u8 i ; struct ixgbe_thermal_sensor_data *data ; u32 tmp ; u8 sensor_index ; u8 sensor_location ; s32 tmp___0 ; { data = & hw->mac.thermal_sensor_data; memset((void *)data, 0, 12UL); tmp = ixgbe_read_reg(hw, 8U); if ((tmp & 4U) != 0U) { return (2147483647); } else { } status = ixgbe_get_ets_data(hw, & ets_cfg, & ets_offset); if (status != 0) { return (status); } else { } low_thresh_delta = (u8 )(((int )ets_cfg & 1984) >> 6); num_sensors = (unsigned int )((u8 )ets_cfg) & 7U; if ((unsigned int )num_sensors > 3U) { num_sensors = 3U; } else { } i = 0U; goto ldv_56439; ldv_56438: tmp___0 = (*(hw->eeprom.ops.read))(hw, (int )((unsigned int )((int )((u16 )i) + (int )ets_offset) + 1U), & ets_sensor); if (tmp___0 != 0) { netdev_err((struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "eeprom read at offset %d failed\n", ((int )ets_offset + 1) + (int )i); goto ldv_56437; } else { } sensor_index = (u8 )(((int )ets_sensor & 768) >> 8); sensor_location = (u8 )(((int )ets_sensor & 15360) >> 10); therm_limit = (u8 )ets_sensor; (*(hw->phy.ops.write_i2c_byte))(hw, (int )ixgbe_emc_therm_limit[(int )sensor_index], 248, (int )therm_limit); if ((unsigned int )sensor_location == 0U) { goto ldv_56437; } else { } data->sensor[(int )i].location = sensor_location; data->sensor[(int )i].caution_thresh = therm_limit; data->sensor[(int )i].max_op_thresh = (int )therm_limit - (int )low_thresh_delta; ldv_56437: i = (u8 )((int )i + 1); ldv_56439: ; if ((int )i < (int )num_sensors) { goto ldv_56438; } else { } return (0); } } void ixgbe_disable_rx_generic(struct ixgbe_hw *hw ) { u32 rxctrl ; u32 pfdtxgswc ; { rxctrl = ixgbe_read_reg(hw, 12288U); if ((int )rxctrl & 1) { if ((unsigned int )hw->mac.type != 1U) { pfdtxgswc = ixgbe_read_reg(hw, 33312U); if ((int )pfdtxgswc & 1) { pfdtxgswc = pfdtxgswc & 4294967294U; ixgbe_write_reg(hw, 33312U, pfdtxgswc); hw->mac.set_lben = 1; } else { hw->mac.set_lben = 0; } } else { } rxctrl = rxctrl & 4294967294U; ixgbe_write_reg(hw, 12288U, rxctrl); } else { } return; } } void ixgbe_enable_rx_generic(struct ixgbe_hw *hw ) { u32 rxctrl ; u32 pfdtxgswc ; { rxctrl = ixgbe_read_reg(hw, 12288U); ixgbe_write_reg(hw, 12288U, rxctrl | 1U); if ((unsigned int )hw->mac.type != 1U) { if ((int )hw->mac.set_lben) { pfdtxgswc = ixgbe_read_reg(hw, 33312U); pfdtxgswc = pfdtxgswc | 1U; ixgbe_write_reg(hw, 33312U, pfdtxgswc); hw->mac.set_lben = 0; } else { } } else { } return; } } bool ldv_queue_work_on_104(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_105(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_106(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_107(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_108(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_114(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_120(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_122(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_124(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_125(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_126(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_127(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_128(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_129(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_130(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_131(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static __u32 __arch_swab32(__u32 val ) { { __asm__ ("bswapl %0": "=r" (val): "0" (val)); return (val); } } __inline static __u32 __fswab32(__u32 val ) { __u32 tmp ; { tmp = __arch_swab32(val); return (tmp); } } extern void __might_sleep(char const * , int , int ) ; extern int sprintf(char * , char const * , ...) ; __inline static void INIT_HLIST_NODE(struct hlist_node *h ) { { h->next = (struct hlist_node *)0; h->pprev = (struct hlist_node **)0; return; } } __inline static void hlist_add_head(struct hlist_node *n , struct hlist_head *h ) { struct hlist_node *first ; { first = h->first; n->next = first; if ((unsigned long )first != (unsigned long )((struct hlist_node *)0)) { first->pprev = & n->next; } else { } h->first = n; n->pprev = & h->first; return; } } __inline static void hlist_add_behind(struct hlist_node *n , struct hlist_node *prev ) { { n->next = prev->next; prev->next = n; n->pprev = & prev->next; if ((unsigned long )n->next != (unsigned long )((struct hlist_node *)0)) { (n->next)->pprev = & n->next; } else { } return; } } extern int memcmp(void const * , void const * , size_t ) ; __inline static void spin_lock(spinlock_t *lock ) ; __inline static void spin_unlock(spinlock_t *lock ) ; bool ldv_queue_work_on_151(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_153(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_152(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_155(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_154(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_161(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_178(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; __inline static void *kzalloc(size_t size , gfp_t flags ) ; __inline static void *kzalloc(size_t size , gfp_t flags ) ; int ldv_irq_6(int state , int line , void *data ) ; void activate_suitable_irq_6(int line , void *data ) ; void choose_interrupt_5(void) ; void disable_suitable_irq_5(int line , void *data ) ; int ldv_irq_5(int state , int line , void *data ) ; int reg_check_6(irqreturn_t (*handler)(int , void * ) ) ; void choose_interrupt_6(void) ; void disable_suitable_irq_6(int line , void *data ) ; void activate_suitable_irq_5(int line , void *data ) ; int ldv_irq_7(int state , int line , void *data ) ; void choose_interrupt_7(void) ; int reg_check_5(irqreturn_t (*handler)(int , void * ) ) ; __inline static int ldv_request_irq_43(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; __inline static int ldv_request_irq_44(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; __inline static int ldv_request_irq_46(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; void ldv_free_irq_183(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; __inline static bool device_can_wakeup(struct device *dev ) { { return ((int )dev->power.can_wakeup != 0); } } extern void debug_dma_sync_single_for_cpu(struct device * , dma_addr_t , size_t , int ) ; extern void debug_dma_sync_single_for_device(struct device * , dma_addr_t , size_t , int ) ; __inline static void dma_sync_single_for_cpu(struct device *dev , dma_addr_t addr , size_t size , enum dma_data_direction dir ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (108), "i" (12UL)); ldv_28561: ; goto ldv_28561; } else { } if ((unsigned long )ops->sync_single_for_cpu != (unsigned long )((void (*)(struct device * , dma_addr_t , size_t , enum dma_data_direction ))0)) { (*(ops->sync_single_for_cpu))(dev, addr, size, dir); } else { } debug_dma_sync_single_for_cpu(dev, addr, size, (int )dir); return; } } __inline static void dma_sync_single_for_device(struct device *dev , dma_addr_t addr , size_t size , enum dma_data_direction dir ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (120), "i" (12UL)); ldv_28569: ; goto ldv_28569; } else { } if ((unsigned long )ops->sync_single_for_device != (unsigned long )((void (*)(struct device * , dma_addr_t , size_t , enum dma_data_direction ))0)) { (*(ops->sync_single_for_device))(dev, addr, size, dir); } else { } debug_dma_sync_single_for_device(dev, addr, size, (int )dir); return; } } extern unsigned long msleep_interruptible(unsigned int ) ; extern void kfree_skb(struct sk_buff * ) ; __inline static struct sk_buff *alloc_skb(unsigned int size , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_169(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_177(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_171(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_167(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_175(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_176(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; extern unsigned char *skb_put(struct sk_buff * , unsigned int ) ; struct sk_buff *ldv___netdev_alloc_skb_172(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_173(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_174(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; __inline static void ethtool_cmd_speed_set(struct ethtool_cmd *ep , __u32 speed ) { { ep->speed = (unsigned short )speed; ep->speed_hi = (unsigned short )(speed >> 16); return; } } __inline static __u32 ethtool_cmd_speed(struct ethtool_cmd const *ep ) { { return ((__u32 )(((int )ep->speed_hi << 16) | (int )ep->speed)); } } __inline static __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie ) { { return (ring_cookie & 4294967295ULL); } } __inline static __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie ) { { return ((ring_cookie & 1095216660480ULL) >> 32); } } extern u32 ethtool_op_get_link(struct net_device * ) ; extern int ethtool_op_get_ts_info(struct net_device * , struct ethtool_ts_info * ) ; extern int dev_open(struct net_device * ) ; extern int dev_close(struct net_device * ) ; extern struct rtnl_link_stats64 *dev_get_stats(struct net_device * , struct rtnl_link_stats64 * ) ; void *ldv_vmalloc_179(unsigned long ldv_func_arg1 ) ; __inline static void *kmap(struct page *page ) { void *tmp ; { __might_sleep("include/linux/highmem.h", 58, 0); tmp = lowmem_page_address((struct page const *)page); return (tmp); } } __inline static void kunmap(struct page *page ) { { return; } } extern int ptp_clock_index(struct ptp_clock * ) ; s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw , union ixgbe_atr_input *input , u16 soft_id ) ; void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input , union ixgbe_atr_input *input_mask ) ; static struct ixgbe_stats const ixgbe_gstrings_stats[57U] = { {{'r', 'x', '_', 'p', 'a', 'c', 'k', 'e', 't', 's', '\000'}, 0, 8, 0}, {{'t', 'x', '_', 'p', 'a', 'c', 'k', 'e', 't', 's', '\000'}, 0, 8, 8}, {{'r', 'x', '_', 'b', 'y', 't', 'e', 's', '\000'}, 0, 8, 16}, {{'t', 'x', '_', 'b', 'y', 't', 'e', 's', '\000'}, 0, 8, 24}, {{'r', 'x', '_', 'p', 'k', 't', 's', '_', 'n', 'i', 'c', '\000'}, 1, 8, 39032}, {{'t', 'x', '_', 'p', 'k', 't', 's', '_', 'n', 'i', 'c', '\000'}, 1, 8, 39056}, {{'r', 'x', '_', 'b', 'y', 't', 'e', 's', '_', 'n', 'i', 'c', '\000'}, 1, 8, 39064}, {{'t', 'x', '_', 'b', 'y', 't', 'e', 's', '_', 'n', 'i', 'c', '\000'}, 1, 8, 39072}, {{'l', 's', 'c', '_', 'i', 'n', 't', '\000'}, 1, 8, 1096}, {{'t', 'x', '_', 'b', 'u', 's', 'y', '\000'}, 1, 8, 40336}, {{'n', 'o', 'n', '_', 'e', 'o', 'p', '_', 'd', 'e', 's', 'c', 's', '\000'}, 1, 8, 1664}, {{'r', 'x', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 0, 8, 32}, {{'t', 'x', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 0, 8, 40}, {{'r', 'x', '_', 'd', 'r', 'o', 'p', 'p', 'e', 'd', '\000'}, 0, 8, 48}, {{'t', 'x', '_', 'd', 'r', 'o', 'p', 'p', 'e', 'd', '\000'}, 0, 8, 56}, {{'m', 'u', 'l', 't', 'i', 'c', 'a', 's', 't', '\000'}, 0, 8, 64}, {{'b', 'r', 'o', 'a', 'd', 'c', 'a', 's', 't', '\000'}, 1, 8, 39040}, {{'r', 'x', '_', 'n', 'o', '_', 'b', 'u', 'f', 'f', 'e', 'r', '_', 'c', 'o', 'u', 'n', 't', '\000'}, 1, 8, 39080}, {{'c', 'o', 'l', 'l', 'i', 's', 'i', 'o', 'n', 's', '\000'}, 0, 8, 72}, {{'r', 'x', '_', 'o', 'v', 'e', 'r', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 0, 8, 88}, {{'r', 'x', '_', 'c', 'r', 'c', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 0, 8, 96}, {{'r', 'x', '_', 'f', 'r', 'a', 'm', 'e', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 0, 8, 104}, {{'h', 'w', '_', 'r', 's', 'c', '_', 'a', 'g', 'g', 'r', 'e', 'g', 'a', 't', 'e', 'd', '\000'}, 1, 8, 1648}, {{'h', 'w', '_', 'r', 's', 'c', '_', 'f', 'l', 'u', 's', 'h', 'e', 'd', '\000'}, 1, 8, 1656}, {{'f', 'd', 'i', 'r', '_', 'm', 'a', 't', 'c', 'h', '\000'}, 1, 8, 40224}, {{'f', 'd', 'i', 'r', '_', 'm', 'i', 's', 's', '\000'}, 1, 8, 40232}, {{'f', 'd', 'i', 'r', '_', 'o', 'v', 'e', 'r', 'f', 'l', 'o', 'w', '\000'}, 1, 8, 40584}, {{'r', 'x', '_', 'f', 'i', 'f', 'o', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 0, 8, 112}, {{'r', 'x', '_', 'm', 'i', 's', 's', 'e', 'd', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 0, 8, 120}, {{'t', 'x', '_', 'a', 'b', 'o', 'r', 't', 'e', 'd', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 0, 8, 128}, {{'t', 'x', '_', 'c', 'a', 'r', 'r', 'i', 'e', 'r', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 0, 8, 136}, {{'t', 'x', '_', 'f', 'i', 'f', 'o', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 0, 8, 144}, {{'t', 'x', '_', 'h', 'e', 'a', 'r', 't', 'b', 'e', 'a', 't', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 0, 8, 152}, {{'t', 'x', '_', 't', 'i', 'm', 'e', 'o', 'u', 't', '_', 'c', 'o', 'u', 'n', 't', '\000'}, 1, 4, 1104}, {{'t', 'x', '_', 'r', 'e', 's', 't', 'a', 'r', 't', '_', 'q', 'u', 'e', 'u', 'e', '\000'}, 1, 8, 1088}, {{'r', 'x', '_', 'l', 'o', 'n', 'g', '_', 'l', 'e', 'n', 'g', 't', 'h', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 1, 8, 39160}, {{'r', 'x', '_', 's', 'h', 'o', 'r', 't', '_', 'l', 'e', 'n', 'g', 't', 'h', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 1, 8, 39144}, {{'t', 'x', '_', 'f', 'l', 'o', 'w', '_', 'c', 'o', 'n', 't', 'r', 'o', 'l', '_', 'x', 'o', 'n', '\000'}, 1, 8, 38696}, {{'r', 'x', '_', 'f', 'l', 'o', 'w', '_', 'c', 'o', 'n', 't', 'r', 'o', 'l', '_', 'x', 'o', 'n', '\000'}, 1, 8, 38704}, {{'t', 'x', '_', 'f', 'l', 'o', 'w', '_', 'c', 'o', 'n', 't', 'r', 'o', 'l', '_', 'x', 'o', 'f', 'f', '\000'}, 1, 8, 38712}, {{'r', 'x', '_', 'f', 'l', 'o', 'w', '_', 'c', 'o', 'n', 't', 'r', 'o', 'l', '_', 'x', 'o', 'f', 'f', '\000'}, 1, 8, 38720}, {{'r', 'x', '_', 'c', 's', 'u', 'm', '_', 'o', 'f', 'f', 'l', 'o', 'a', 'd', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 1, 8, 1632}, {{'a', 'l', 'l', 'o', 'c', '_', 'r', 'x', '_', 'p', 'a', 'g', 'e', '_', 'f', 'a', 'i', 'l', 'e', 'd', '\000'}, 1, 4, 1672}, {{'a', 'l', 'l', 'o', 'c', '_', 'r', 'x', '_', 'b', 'u', 'f', 'f', '_', 'f', 'a', 'i', 'l', 'e', 'd', '\000'}, 1, 4, 1676}, {{'r', 'x', '_', 'n', 'o', '_', 'd', 'm', 'a', '_', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', 's', '\000'}, 1, 8, 1640}, {{'o', 's', '2', 'b', 'm', 'c', '_', 'r', 'x', '_', 'b', 'y', '_', 'b', 'm', 'c', '\000'}, 1, 8, 40320}, {{'o', 's', '2', 'b', 'm', 'c', '_', 't', 'x', '_', 'b', 'y', '_', 'b', 'm', 'c', '\000'}, 1, 8, 40304}, {{'o', 's', '2', 'b', 'm', 'c', '_', 't', 'x', '_', 'b', 'y', '_', 'h', 'o', 's', 't', '\000'}, 1, 8, 40328}, {{'o', 's', '2', 'b', 'm', 'c', '_', 'r', 'x', '_', 'b', 'y', '_', 'h', 'o', 's', 't', '\000'}, 1, 8, 40312}, {{'f', 'c', 'o', 'e', '_', 'b', 'a', 'd', '_', 'f', 'c', 'c', 'r', 'c', '\000'}, 1, 8, 40240}, {{'r', 'x', '_', 'f', 'c', 'o', 'e', '_', 'd', 'r', 'o', 'p', 'p', 'e', 'd', '\000'}, 1, 8, 40248}, {{'r', 'x', '_', 'f', 'c', 'o', 'e', '_', 'p', 'a', 'c', 'k', 'e', 't', 's', '\000'}, 1, 8, 40256}, {{'r', 'x', '_', 'f', 'c', 'o', 'e', '_', 'd', 'w', 'o', 'r', 'd', 's', '\000'}, 1, 8, 40272}, {{'f', 'c', 'o', 'e', '_', 'n', 'o', 'd', 'd', 'p', '\000'}, 1, 8, 40288}, {{'f', 'c', 'o', 'e', '_', 'n', 'o', 'd', 'd', 'p', '_', 'e', 'x', 't', '_', 'b', 'u', 'f', 'f', '\000'}, 1, 8, 40296}, {{'t', 'x', '_', 'f', 'c', 'o', 'e', '_', 'p', 'a', 'c', 'k', 'e', 't', 's', '\000'}, 1, 8, 40264}, {{'t', 'x', '_', 'f', 'c', 'o', 'e', '_', 'd', 'w', 'o', 'r', 'd', 's', '\000'}, 1, 8, 40280}}; static char const ixgbe_gstrings_test[5U][32U] = { { 'R', 'e', 'g', 'i', 's', 't', 'e', 'r', ' ', 't', 'e', 's', 't', ' ', ' ', '(', 'o', 'f', 'f', 'l', 'i', 'n', 'e', ')', '\000'}, { 'E', 'e', 'p', 'r', 'o', 'm', ' ', 't', 'e', 's', 't', ' ', ' ', ' ', ' ', '(', 'o', 'f', 'f', 'l', 'i', 'n', 'e', ')', '\000'}, { 'I', 'n', 't', 'e', 'r', 'r', 'u', 'p', 't', ' ', 't', 'e', 's', 't', ' ', '(', 'o', 'f', 'f', 'l', 'i', 'n', 'e', ')', '\000'}, { 'L', 'o', 'o', 'p', 'b', 'a', 'c', 'k', ' ', 't', 'e', 's', 't', ' ', ' ', '(', 'o', 'f', 'f', 'l', 'i', 'n', 'e', ')', '\000'}, { 'L', 'i', 'n', 'k', ' ', 't', 'e', 's', 't', ' ', ' ', ' ', '(', 'o', 'n', '/', 'o', 'f', 'f', 'l', 'i', 'n', 'e', ')', '\000'}}; static int ixgbe_get_settings(struct net_device *netdev , struct ethtool_cmd *ecmd ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; ixgbe_link_speed supported_link ; u32 link_speed ; bool autoneg ; bool link_up ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; link_speed = 0U; autoneg = 0; (*(hw->mac.ops.get_link_capabilities))(hw, & supported_link, & autoneg); if ((supported_link & 128U) != 0U) { ecmd->supported = ecmd->supported | 4096U; } else { } if ((supported_link & 32U) != 0U) { ecmd->supported = ecmd->supported | 32U; } else { } if ((supported_link & 8U) != 0U) { ecmd->supported = ecmd->supported | 8U; } else { } if (hw->phy.autoneg_advertised != 0U) { if ((hw->phy.autoneg_advertised & 8U) != 0U) { ecmd->advertising = ecmd->advertising | 8U; } else { } if ((hw->phy.autoneg_advertised & 128U) != 0U) { ecmd->advertising = ecmd->advertising | 4096U; } else { } if ((hw->phy.autoneg_advertised & 32U) != 0U) { ecmd->advertising = ecmd->advertising | 32U; } else { } } else { if ((supported_link & 128U) != 0U) { ecmd->advertising = ecmd->advertising | 4096U; } else { } if ((supported_link & 32U) != 0U) { ecmd->advertising = ecmd->advertising | 32U; } else { } if ((supported_link & 8U) != 0U) { ecmd->advertising = ecmd->advertising | 8U; } else { } if ((int )hw->phy.multispeed_fiber && ! autoneg) { if ((supported_link & 128U) != 0U) { ecmd->advertising = 4096U; } else { } } else { } } if ((int )autoneg) { ecmd->supported = ecmd->supported | 64U; ecmd->advertising = ecmd->advertising | 64U; ecmd->autoneg = 1U; } else { ecmd->autoneg = 0U; } ecmd->transceiver = 1U; switch ((unsigned int )adapter->hw.phy.type) { case 2U: ; case 3U: ; case 6U: ; case 7U: ecmd->supported = ecmd->supported | 128U; ecmd->advertising = ecmd->advertising | 128U; ecmd->port = 0U; goto ldv_55761; case 8U: ecmd->supported = ecmd->supported | 1024U; ecmd->advertising = ecmd->advertising | 1024U; ecmd->port = 3U; goto ldv_55761; case 10U: ; case 11U: ; case 12U: ; case 15U: ; case 14U: ; case 18U: ; case 17U: ; switch ((unsigned int )adapter->hw.phy.sfp_type) { case 0U: ; case 3U: ; case 4U: ecmd->supported = ecmd->supported | 1024U; ecmd->advertising = ecmd->advertising | 1024U; ecmd->port = 5U; goto ldv_55773; case 1U: ; case 2U: ; case 5U: ; case 6U: ; case 11U: ; case 12U: ; case 13U: ; case 14U: ecmd->supported = ecmd->supported | 1024U; ecmd->advertising = ecmd->advertising | 1024U; ecmd->port = 3U; goto ldv_55773; case 65534U: ecmd->supported = ecmd->supported | 1024U; ecmd->advertising = ecmd->advertising | 1024U; ecmd->port = 239U; goto ldv_55773; case 9U: ; case 10U: ecmd->supported = ecmd->supported | 128U; ecmd->advertising = ecmd->advertising | 128U; ecmd->port = 0U; goto ldv_55773; case 65535U: ; default: ecmd->supported = ecmd->supported | 1024U; ecmd->advertising = ecmd->advertising | 1024U; ecmd->port = 255U; goto ldv_55773; } ldv_55773: ; goto ldv_55761; case 9U: ecmd->supported = ecmd->supported | 1024U; ecmd->advertising = ecmd->advertising | 1024U; ecmd->port = 239U; goto ldv_55761; case 0U: ; case 24U: ; case 23U: ; default: ecmd->supported = ecmd->supported | 1024U; ecmd->advertising = ecmd->advertising | 1024U; ecmd->port = 255U; goto ldv_55761; } ldv_55761: (*(hw->mac.ops.check_link))(hw, & link_speed, & link_up, 0); if ((int )link_up) { switch (link_speed) { case 128U: ethtool_cmd_speed_set(ecmd, 10000U); goto ldv_55793; case 32U: ethtool_cmd_speed_set(ecmd, 1000U); goto ldv_55793; case 8U: ethtool_cmd_speed_set(ecmd, 100U); goto ldv_55793; default: ; goto ldv_55793; } ldv_55793: ecmd->duplex = 1U; } else { ethtool_cmd_speed_set(ecmd, 4294967295U); ecmd->duplex = 255U; } return (0); } } static int ixgbe_set_settings(struct net_device *netdev , struct ethtool_cmd *ecmd ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; u32 advertised ; u32 old ; s32 err ; int tmp___0 ; u32 speed ; __u32 tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; err = 0; if ((unsigned int )hw->phy.media_type == 4U || (int )hw->phy.multispeed_fiber) { if ((ecmd->advertising & ~ ecmd->supported) != 0U) { return (-22); } else { } if ((unsigned int )ecmd->autoneg == 0U && (int )hw->phy.multispeed_fiber) { if (ecmd->advertising == 4128U) { return (-22); } else { } } else { } old = hw->phy.autoneg_advertised; advertised = 0U; if ((ecmd->advertising & 4096U) != 0U) { advertised = advertised | 128U; } else { } if ((ecmd->advertising & 32U) != 0U) { advertised = advertised | 32U; } else { } if ((ecmd->advertising & 8U) != 0U) { advertised = advertised | 8U; } else { } if (old == advertised) { return (err); } else { } goto ldv_55807; ldv_55806: usleep_range(1000UL, 2000UL); ldv_55807: tmp___0 = test_and_set_bit(7L, (unsigned long volatile *)(& adapter->state)); if (tmp___0 != 0) { goto ldv_55806; } else { } hw->mac.autotry_restart = 1; err = (*(hw->mac.ops.setup_link))(hw, advertised, 1); if (err != 0) { if (((int )adapter->msg_enable & 2) != 0) { netdev_info((struct net_device const *)adapter->netdev, "setup link failed with code %d\n", err); } else { } (*(hw->mac.ops.setup_link))(hw, old, 1); } else { } clear_bit(7L, (unsigned long volatile *)(& adapter->state)); } else { tmp___1 = ethtool_cmd_speed((struct ethtool_cmd const *)ecmd); speed = tmp___1; if (((unsigned int )ecmd->autoneg == 1U || ecmd->advertising != 4096U) || (u32 )ecmd->duplex + speed != 10001U) { return (-22); } else { } } return (err); } } static void ixgbe_get_pauseparam(struct net_device *netdev , struct ethtool_pauseparam *pause ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; bool tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; tmp___0 = ixgbe_device_supports_autoneg_fc(hw); if ((int )tmp___0 && ! hw->fc.disable_fc_autoneg) { pause->autoneg = 1U; } else { pause->autoneg = 0U; } if ((unsigned int )hw->fc.current_mode == 1U) { pause->rx_pause = 1U; } else if ((unsigned int )hw->fc.current_mode == 2U) { pause->tx_pause = 1U; } else if ((unsigned int )hw->fc.current_mode == 3U) { pause->rx_pause = 1U; pause->tx_pause = 1U; } else { } return; } } static int ixgbe_set_pauseparam(struct net_device *netdev , struct ethtool_pauseparam *pause ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; struct ixgbe_fc_info fc ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; fc = hw->fc; if ((unsigned int )hw->mac.type == 1U && (adapter->flags & 4096U) != 0U) { return (-22); } else { } if (pause->autoneg == 1U) { tmp___0 = ixgbe_device_supports_autoneg_fc(hw); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (-22); } else { } } else { } fc.disable_fc_autoneg = pause->autoneg != 1U; if ((pause->rx_pause != 0U && pause->tx_pause != 0U) || pause->autoneg != 0U) { fc.requested_mode = 3; } else if (pause->rx_pause != 0U && pause->tx_pause == 0U) { fc.requested_mode = 1; } else if (pause->rx_pause == 0U && pause->tx_pause != 0U) { fc.requested_mode = 2; } else { fc.requested_mode = 0; } tmp___3 = memcmp((void const *)(& fc), (void const *)(& hw->fc), 80UL); if (tmp___3 != 0) { hw->fc = fc; tmp___2 = netif_running((struct net_device const *)netdev); if ((int )tmp___2) { ixgbe_reinit_locked(adapter); } else { ixgbe_reset(adapter); } } else { } return (0); } } static u32 ixgbe_get_msglevel(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; return ((u32 )adapter->msg_enable); } } static void ixgbe_set_msglevel(struct net_device *netdev , u32 data ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; adapter->msg_enable = (u16 )data; return; } } static int ixgbe_get_regs_len(struct net_device *netdev ) { { return (4556); } } static void ixgbe_get_regs(struct net_device *netdev , struct ethtool_regs *regs , void *p ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; u32 *regs_buff ; u8 i ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; regs_buff = (u32 *)p; memset(p, 0, 4556UL); regs->version = (((unsigned int )hw->mac.type << 24) | (unsigned int )((int )hw->revision_id << 16)) | (unsigned int )hw->device_id; *regs_buff = ixgbe_read_reg(hw, 0U); *(regs_buff + 1UL) = ixgbe_read_reg(hw, 8U); *(regs_buff + 2UL) = ixgbe_read_reg(hw, 24U); *(regs_buff + 3UL) = ixgbe_read_reg(hw, 32U); *(regs_buff + 4UL) = ixgbe_read_reg(hw, 40U); *(regs_buff + 5UL) = ixgbe_read_reg(hw, 512U); *(regs_buff + 6UL) = ixgbe_read_reg(hw, 72U); *(regs_buff + 7UL) = ixgbe_read_reg(hw, 76U); *(regs_buff + 8UL) = ixgbe_read_reg(hw, *(hw->mvals)); *(regs_buff + 9UL) = ixgbe_read_reg(hw, 65556U); *(regs_buff + 10UL) = ixgbe_read_reg(hw, *(hw->mvals + 1UL)); *(regs_buff + 11UL) = ixgbe_read_reg(hw, 65808U); *(regs_buff + 12UL) = ixgbe_read_reg(hw, 65812U); *(regs_buff + 13UL) = ixgbe_read_reg(hw, 65816U); *(regs_buff + 14UL) = ixgbe_read_reg(hw, 65820U); *(regs_buff + 15UL) = ixgbe_read_reg(hw, 65824U); *(regs_buff + 16UL) = ixgbe_read_reg(hw, 65852U); *(regs_buff + 17UL) = ixgbe_read_reg(hw, *(hw->mvals + 2UL)); *(regs_buff + 18UL) = ixgbe_read_reg(hw, 2056U); *(regs_buff + 19UL) = ixgbe_read_reg(hw, 2056U); *(regs_buff + 20UL) = ixgbe_read_reg(hw, 2176U); *(regs_buff + 21UL) = ixgbe_read_reg(hw, 2184U); *(regs_buff + 22UL) = ixgbe_read_reg(hw, 2064U); *(regs_buff + 23UL) = ixgbe_read_reg(hw, 2192U); *(regs_buff + 24UL) = ixgbe_read_reg(hw, 2080U); *(regs_buff + 25UL) = ixgbe_read_reg(hw, 2304U); *(regs_buff + 26UL) = ixgbe_read_reg(hw, 0U); *(regs_buff + 27UL) = ixgbe_read_reg(hw, 8192U); *(regs_buff + 28UL) = ixgbe_read_reg(hw, 69736U); *(regs_buff + 29UL) = ixgbe_read_reg(hw, 2200U); *(regs_buff + 30UL) = ixgbe_read_reg(hw, 12296U); *(regs_buff + 31UL) = ixgbe_read_reg(hw, 12800U); *(regs_buff + 32UL) = ixgbe_read_reg(hw, 12804U); *(regs_buff + 33UL) = ixgbe_read_reg(hw, 12808U); *(regs_buff + 34UL) = ixgbe_read_reg(hw, 12812U); i = 0U; goto ldv_55852; ldv_55851: ; switch ((unsigned int )hw->mac.type) { case 1U: *(regs_buff + (unsigned long )((int )i + 35)) = ixgbe_read_reg(hw, (u32 )(((int )i + 1604) * 8)); *(regs_buff + (unsigned long )((int )i + 43)) = ixgbe_read_reg(hw, (u32 )(((int )i + 1612) * 8)); goto ldv_55845; case 2U: ; case 3U: ; case 4U: ; case 5U: *(regs_buff + (unsigned long )((int )i + 35)) = ixgbe_read_reg(hw, (u32 )(((int )i + 3208) * 4)); *(regs_buff + (unsigned long )((int )i + 43)) = ixgbe_read_reg(hw, (u32 )(((int )i + 3224) * 4)); goto ldv_55845; default: ; goto ldv_55845; } ldv_55845: i = (u8 )((int )i + 1); ldv_55852: ; if ((unsigned int )i <= 7U) { goto ldv_55851; } else { } *(regs_buff + 51UL) = ixgbe_read_reg(hw, 12960U); *(regs_buff + 52UL) = ixgbe_read_reg(hw, 52736U); i = 0U; goto ldv_55855; ldv_55854: *(regs_buff + (unsigned long )((int )i + 53)) = ixgbe_read_reg(hw, (u32 )((unsigned int )i <= 63U ? ((int )i + 64) * 64 : ((int )i + 768) * 64)); i = (u8 )((int )i + 1); ldv_55855: ; if ((unsigned int )i <= 63U) { goto ldv_55854; } else { } i = 0U; goto ldv_55858; ldv_55857: *(regs_buff + (unsigned long )((int )i + 117)) = ixgbe_read_reg(hw, (u32 )((unsigned int )i <= 63U ? (int )i * 64 + 4100 : ((int )i + -64) * 64 + 53252)); i = (u8 )((int )i + 1); ldv_55858: ; if ((unsigned int )i <= 63U) { goto ldv_55857; } else { } i = 0U; goto ldv_55861; ldv_55860: *(regs_buff + (unsigned long )((int )i + 181)) = ixgbe_read_reg(hw, (u32 )((unsigned int )i <= 63U ? (int )i * 64 + 4104 : ((int )i + -64) * 64 + 53256)); i = (u8 )((int )i + 1); ldv_55861: ; if ((unsigned int )i <= 63U) { goto ldv_55860; } else { } i = 0U; goto ldv_55864; ldv_55863: *(regs_buff + (unsigned long )((int )i + 245)) = ixgbe_read_reg(hw, (u32 )((unsigned int )i <= 63U ? (int )i * 64 + 4112 : ((int )i + -64) * 64 + 53264)); i = (u8 )((int )i + 1); ldv_55864: ; if ((unsigned int )i <= 63U) { goto ldv_55863; } else { } i = 0U; goto ldv_55867; ldv_55866: *(regs_buff + (unsigned long )((int )i + 309)) = ixgbe_read_reg(hw, (u32 )((unsigned int )i <= 63U ? (int )i * 64 + 4120 : ((int )i + -64) * 64 + 53272)); i = (u8 )((int )i + 1); ldv_55867: ; if ((unsigned int )i <= 63U) { goto ldv_55866; } else { } i = 0U; goto ldv_55870; ldv_55869: *(regs_buff + (unsigned long )((int )i + 373)) = ixgbe_read_reg(hw, (u32 )((unsigned int )i <= 63U ? (int )i * 64 + 4136 : ((int )i + -64) * 64 + 53288)); i = (u8 )((int )i + 1); ldv_55870: ; if ((unsigned int )i <= 63U) { goto ldv_55869; } else { } i = 0U; goto ldv_55873; ldv_55872: *(regs_buff + (unsigned long )((int )i + 437)) = ixgbe_read_reg(hw, (u32 )((unsigned int )i <= 15U ? ((int )i + 2112) * 4 : ((unsigned int )i <= 63U ? (int )i * 64 + 4116 : ((int )i + -64) * 64 + 53268))); i = (u8 )((int )i + 1); ldv_55873: ; if ((unsigned int )i <= 15U) { goto ldv_55872; } else { } i = 0U; goto ldv_55876; ldv_55875: *(regs_buff + (unsigned long )((int )i + 453)) = ixgbe_read_reg(hw, (u32 )((unsigned int )i <= 15U ? ((int )i + 2176) * 4 : ((unsigned int )i <= 63U ? (int )i * 64 + 4108 : ((int )i + -64) * 64 + 53260))); i = (u8 )((int )i + 1); ldv_55876: ; if ((unsigned int )i <= 15U) { goto ldv_55875; } else { } *(regs_buff + 469UL) = ixgbe_read_reg(hw, 12032U); i = 0U; goto ldv_55879; ldv_55878: *(regs_buff + (unsigned long )((int )i + 470)) = ixgbe_read_reg(hw, (u32 )(((int )i + 3840) * 4)); i = (u8 )((int )i + 1); ldv_55879: ; if ((unsigned int )i <= 7U) { goto ldv_55878; } else { } *(regs_buff + 478UL) = ixgbe_read_reg(hw, 12288U); *(regs_buff + 479UL) = ixgbe_read_reg(hw, 15620U); *(regs_buff + 480UL) = ixgbe_read_reg(hw, 20480U); *(regs_buff + 481UL) = ixgbe_read_reg(hw, 20488U); i = 0U; goto ldv_55882; ldv_55881: *(regs_buff + (unsigned long )((int )i + 482)) = ixgbe_read_reg(hw, (u32 )((unsigned int )i <= 15U ? ((int )i + 2688) * 8 : ((int )i + 5184) * 8)); i = (u8 )((int )i + 1); ldv_55882: ; if ((unsigned int )i <= 15U) { goto ldv_55881; } else { } i = 0U; goto ldv_55885; ldv_55884: *(regs_buff + (unsigned long )((int )i + 498)) = ixgbe_read_reg(hw, (u32 )((unsigned int )i <= 15U ? (int )i * 8 + 21508 : (int )i * 8 + 41476)); i = (u8 )((int )i + 1); ldv_55885: ; if ((unsigned int )i <= 15U) { goto ldv_55884; } else { } *(regs_buff + 514UL) = ixgbe_read_reg(hw, 21632U); *(regs_buff + 515UL) = ixgbe_read_reg(hw, 20608U); *(regs_buff + 516UL) = ixgbe_read_reg(hw, 20616U); *(regs_buff + 517UL) = ixgbe_read_reg(hw, 20624U); *(regs_buff + 518UL) = ixgbe_read_reg(hw, 22552U); *(regs_buff + 519UL) = ixgbe_read_reg(hw, 22556U); i = 0U; goto ldv_55888; ldv_55887: *(regs_buff + (unsigned long )((int )i + 520)) = ixgbe_read_reg(hw, (u32 )(((int )i + 5792) * 4)); i = (u8 )((int )i + 1); ldv_55888: ; if ((unsigned int )i <= 7U) { goto ldv_55887; } else { } i = 0U; goto ldv_55891; ldv_55890: *(regs_buff + (unsigned long )((int )i + 528)) = ixgbe_read_reg(hw, (u32 )(((int )i + 5800) * 4)); i = (u8 )((int )i + 1); ldv_55891: ; if ((unsigned int )i <= 7U) { goto ldv_55890; } else { } *(regs_buff + 536UL) = ixgbe_read_reg(hw, 23232U); i = 0U; goto ldv_55894; ldv_55893: *(regs_buff + (unsigned long )((int )i + 537)) = ixgbe_read_reg(hw, (u32 )(((int )i + 384) * 64)); i = (u8 )((int )i + 1); ldv_55894: ; if ((unsigned int )i <= 31U) { goto ldv_55893; } else { } i = 0U; goto ldv_55897; ldv_55896: *(regs_buff + (unsigned long )((int )i + 569)) = ixgbe_read_reg(hw, (u32 )((int )i * 64 + 24580)); i = (u8 )((int )i + 1); ldv_55897: ; if ((unsigned int )i <= 31U) { goto ldv_55896; } else { } i = 0U; goto ldv_55900; ldv_55899: *(regs_buff + (unsigned long )((int )i + 601)) = ixgbe_read_reg(hw, (u32 )((int )i * 64 + 24584)); i = (u8 )((int )i + 1); ldv_55900: ; if ((unsigned int )i <= 31U) { goto ldv_55899; } else { } i = 0U; goto ldv_55903; ldv_55902: *(regs_buff + (unsigned long )((int )i + 633)) = ixgbe_read_reg(hw, (u32 )((int )i * 64 + 24592)); i = (u8 )((int )i + 1); ldv_55903: ; if ((unsigned int )i <= 31U) { goto ldv_55902; } else { } i = 0U; goto ldv_55906; ldv_55905: *(regs_buff + (unsigned long )((int )i + 665)) = ixgbe_read_reg(hw, (u32 )((int )i * 64 + 24600)); i = (u8 )((int )i + 1); ldv_55906: ; if ((unsigned int )i <= 31U) { goto ldv_55905; } else { } i = 0U; goto ldv_55909; ldv_55908: *(regs_buff + (unsigned long )((int )i + 697)) = ixgbe_read_reg(hw, (u32 )((int )i * 64 + 24616)); i = (u8 )((int )i + 1); ldv_55909: ; if ((unsigned int )i <= 31U) { goto ldv_55908; } else { } i = 0U; goto ldv_55912; ldv_55911: *(regs_buff + (unsigned long )((int )i + 729)) = ixgbe_read_reg(hw, (u32 )((int )i * 64 + 24632)); i = (u8 )((int )i + 1); ldv_55912: ; if ((unsigned int )i <= 31U) { goto ldv_55911; } else { } i = 0U; goto ldv_55915; ldv_55914: *(regs_buff + (unsigned long )((int )i + 761)) = ixgbe_read_reg(hw, (u32 )((int )i * 64 + 24636)); i = (u8 )((int )i + 1); ldv_55915: ; if ((unsigned int )i <= 31U) { goto ldv_55914; } else { } *(regs_buff + 793UL) = ixgbe_read_reg(hw, 32256U); i = 0U; goto ldv_55918; ldv_55917: *(regs_buff + (unsigned long )((int )i + 794)) = ixgbe_read_reg(hw, (u32 )(((int )i + 7296) * 4)); i = (u8 )((int )i + 1); ldv_55918: ; if ((unsigned int )i <= 15U) { goto ldv_55917; } else { } *(regs_buff + 810UL) = ixgbe_read_reg(hw, 51968U); i = 0U; goto ldv_55921; ldv_55920: *(regs_buff + (unsigned long )((int )i + 811)) = ixgbe_read_reg(hw, (u32 )(((int )i + 13056) * 4)); i = (u8 )((int )i + 1); ldv_55921: ; if ((unsigned int )i <= 7U) { goto ldv_55920; } else { } *(regs_buff + 819UL) = ixgbe_read_reg(hw, 52496U); *(regs_buff + 820UL) = ixgbe_read_reg(hw, 22528U); *(regs_buff + 821UL) = ixgbe_read_reg(hw, 22536U); *(regs_buff + 822UL) = ixgbe_read_reg(hw, 22544U); *(regs_buff + 823UL) = ixgbe_read_reg(hw, 22584U); *(regs_buff + 824UL) = ixgbe_read_reg(hw, 22592U); *(regs_buff + 825UL) = ixgbe_read_reg(hw, 22656U); *(regs_buff + 826UL) = ixgbe_read_reg(hw, 22784U); *(regs_buff + 827UL) = ixgbe_read_reg(hw, 23040U); *(regs_buff + 828UL) = ixgbe_read_reg(hw, 36864U); *(regs_buff + 829UL) = ixgbe_read_reg(hw, 15616U); *(regs_buff + 831UL) = ixgbe_read_reg(hw, 52480U); switch ((unsigned int )hw->mac.type) { case 1U: *(regs_buff + 830UL) = ixgbe_read_reg(hw, 32576U); *(regs_buff + 832UL) = ixgbe_read_reg(hw, 20640U); i = 0U; goto ldv_55925; ldv_55924: *(regs_buff + (unsigned long )((int )i + 833)) = ixgbe_read_reg(hw, (u32 )(((int )i + 3848) * 4)); i = (u8 )((int )i + 1); ldv_55925: ; if ((unsigned int )i <= 7U) { goto ldv_55924; } else { } i = 0U; goto ldv_55928; ldv_55927: *(regs_buff + (unsigned long )((int )i + 841)) = ixgbe_read_reg(hw, (u32 )(((int )i + 3856) * 4)); i = (u8 )((int )i + 1); ldv_55928: ; if ((unsigned int )i <= 7U) { goto ldv_55927; } else { } i = 0U; goto ldv_55931; ldv_55930: *(regs_buff + (unsigned long )((int )i + 849)) = ixgbe_read_reg(hw, (u32 )((int )i * 64 + 24620)); i = (u8 )((int )i + 1); ldv_55931: ; if ((unsigned int )i <= 7U) { goto ldv_55930; } else { } i = 0U; goto ldv_55934; ldv_55933: *(regs_buff + (unsigned long )((int )i + 857)) = ixgbe_read_reg(hw, (u32 )((int )i * 64 + 25132)); i = (u8 )((int )i + 1); ldv_55934: ; if ((unsigned int )i <= 7U) { goto ldv_55933; } else { } goto ldv_55936; case 2U: ; case 3U: ; case 4U: ; case 5U: *(regs_buff + 830UL) = ixgbe_read_reg(hw, 18688U); *(regs_buff + 832UL) = ixgbe_read_reg(hw, 9264U); i = 0U; goto ldv_55942; ldv_55941: *(regs_buff + (unsigned long )((int )i + 833)) = ixgbe_read_reg(hw, (u32 )(((int )i + 2128) * 4)); i = (u8 )((int )i + 1); ldv_55942: ; if ((unsigned int )i <= 7U) { goto ldv_55941; } else { } i = 0U; goto ldv_55945; ldv_55944: *(regs_buff + (unsigned long )((int )i + 841)) = ixgbe_read_reg(hw, (u32 )(((int )i + 2136) * 4)); i = (u8 )((int )i + 1); ldv_55945: ; if ((unsigned int )i <= 7U) { goto ldv_55944; } else { } i = 0U; goto ldv_55948; ldv_55947: *(regs_buff + (unsigned long )((int )i + 849)) = ixgbe_read_reg(hw, (u32 )(((int )i + 4676) * 4)); i = (u8 )((int )i + 1); ldv_55948: ; if ((unsigned int )i <= 7U) { goto ldv_55947; } else { } i = 0U; goto ldv_55951; ldv_55950: *(regs_buff + (unsigned long )((int )i + 857)) = ixgbe_read_reg(hw, (u32 )(((int )i + 4684) * 4)); i = (u8 )((int )i + 1); ldv_55951: ; if ((unsigned int )i <= 7U) { goto ldv_55950; } else { } goto ldv_55936; default: ; goto ldv_55936; } ldv_55936: i = 0U; goto ldv_55955; ldv_55954: *(regs_buff + (unsigned long )((int )i + 865)) = ixgbe_read_reg(hw, (u32 )(((int )i + 13128) * 4)); i = (u8 )((int )i + 1); ldv_55955: ; if ((unsigned int )i <= 7U) { goto ldv_55954; } else { } i = 0U; goto ldv_55958; ldv_55957: *(regs_buff + (unsigned long )((int )i + 873)) = ixgbe_read_reg(hw, (u32 )(((int )i + 13136) * 4)); i = (u8 )((int )i + 1); ldv_55958: ; if ((unsigned int )i <= 7U) { goto ldv_55957; } else { } *(regs_buff + 881UL) = (u32 )adapter->stats.crcerrs; *(regs_buff + 882UL) = (u32 )adapter->stats.illerrc; *(regs_buff + 883UL) = (u32 )adapter->stats.errbc; *(regs_buff + 884UL) = (u32 )adapter->stats.mspdc; i = 0U; goto ldv_55961; ldv_55960: *(regs_buff + (unsigned long )((int )i + 885)) = (u32 )adapter->stats.mpc[(int )i]; i = (u8 )((int )i + 1); ldv_55961: ; if ((unsigned int )i <= 7U) { goto ldv_55960; } else { } *(regs_buff + 893UL) = (u32 )adapter->stats.mlfc; *(regs_buff + 894UL) = (u32 )adapter->stats.mrfc; *(regs_buff + 895UL) = (u32 )adapter->stats.rlec; *(regs_buff + 896UL) = (u32 )adapter->stats.lxontxc; *(regs_buff + 897UL) = (u32 )adapter->stats.lxonrxc; *(regs_buff + 898UL) = (u32 )adapter->stats.lxofftxc; *(regs_buff + 899UL) = (u32 )adapter->stats.lxoffrxc; i = 0U; goto ldv_55964; ldv_55963: *(regs_buff + (unsigned long )((int )i + 900)) = (u32 )adapter->stats.pxontxc[(int )i]; i = (u8 )((int )i + 1); ldv_55964: ; if ((unsigned int )i <= 7U) { goto ldv_55963; } else { } i = 0U; goto ldv_55967; ldv_55966: *(regs_buff + (unsigned long )((int )i + 908)) = (u32 )adapter->stats.pxonrxc[(int )i]; i = (u8 )((int )i + 1); ldv_55967: ; if ((unsigned int )i <= 7U) { goto ldv_55966; } else { } i = 0U; goto ldv_55970; ldv_55969: *(regs_buff + (unsigned long )((int )i + 916)) = (u32 )adapter->stats.pxofftxc[(int )i]; i = (u8 )((int )i + 1); ldv_55970: ; if ((unsigned int )i <= 7U) { goto ldv_55969; } else { } i = 0U; goto ldv_55973; ldv_55972: *(regs_buff + (unsigned long )((int )i + 924)) = (u32 )adapter->stats.pxoffrxc[(int )i]; i = (u8 )((int )i + 1); ldv_55973: ; if ((unsigned int )i <= 7U) { goto ldv_55972; } else { } *(regs_buff + 932UL) = (u32 )adapter->stats.prc64; *(regs_buff + 933UL) = (u32 )adapter->stats.prc127; *(regs_buff + 934UL) = (u32 )adapter->stats.prc255; *(regs_buff + 935UL) = (u32 )adapter->stats.prc511; *(regs_buff + 936UL) = (u32 )adapter->stats.prc1023; *(regs_buff + 937UL) = (u32 )adapter->stats.prc1522; *(regs_buff + 938UL) = (u32 )adapter->stats.gprc; *(regs_buff + 939UL) = (u32 )adapter->stats.bprc; *(regs_buff + 940UL) = (u32 )adapter->stats.mprc; *(regs_buff + 941UL) = (u32 )adapter->stats.gptc; *(regs_buff + 942UL) = (u32 )adapter->stats.gorc; *(regs_buff + 944UL) = (u32 )adapter->stats.gotc; i = 0U; goto ldv_55976; ldv_55975: *(regs_buff + (unsigned long )((int )i + 946)) = (u32 )adapter->stats.rnbc[(int )i]; i = (u8 )((int )i + 1); ldv_55976: ; if ((unsigned int )i <= 7U) { goto ldv_55975; } else { } *(regs_buff + 954UL) = (u32 )adapter->stats.ruc; *(regs_buff + 955UL) = (u32 )adapter->stats.rfc; *(regs_buff + 956UL) = (u32 )adapter->stats.roc; *(regs_buff + 957UL) = (u32 )adapter->stats.rjc; *(regs_buff + 958UL) = (u32 )adapter->stats.mngprc; *(regs_buff + 959UL) = (u32 )adapter->stats.mngpdc; *(regs_buff + 960UL) = (u32 )adapter->stats.mngptc; *(regs_buff + 961UL) = (u32 )adapter->stats.tor; *(regs_buff + 963UL) = (u32 )adapter->stats.tpr; *(regs_buff + 964UL) = (u32 )adapter->stats.tpt; *(regs_buff + 965UL) = (u32 )adapter->stats.ptc64; *(regs_buff + 966UL) = (u32 )adapter->stats.ptc127; *(regs_buff + 967UL) = (u32 )adapter->stats.ptc255; *(regs_buff + 968UL) = (u32 )adapter->stats.ptc511; *(regs_buff + 969UL) = (u32 )adapter->stats.ptc1023; *(regs_buff + 970UL) = (u32 )adapter->stats.ptc1522; *(regs_buff + 971UL) = (u32 )adapter->stats.mptc; *(regs_buff + 972UL) = (u32 )adapter->stats.bptc; *(regs_buff + 973UL) = (u32 )adapter->stats.xec; i = 0U; goto ldv_55979; ldv_55978: *(regs_buff + (unsigned long )((int )i + 974)) = (u32 )adapter->stats.qprc[(int )i]; i = (u8 )((int )i + 1); ldv_55979: ; if ((unsigned int )i <= 15U) { goto ldv_55978; } else { } i = 0U; goto ldv_55982; ldv_55981: *(regs_buff + (unsigned long )((int )i + 990)) = (u32 )adapter->stats.qptc[(int )i]; i = (u8 )((int )i + 1); ldv_55982: ; if ((unsigned int )i <= 15U) { goto ldv_55981; } else { } i = 0U; goto ldv_55985; ldv_55984: *(regs_buff + (unsigned long )((int )i + 1006)) = (u32 )adapter->stats.qbrc[(int )i]; i = (u8 )((int )i + 1); ldv_55985: ; if ((unsigned int )i <= 15U) { goto ldv_55984; } else { } i = 0U; goto ldv_55988; ldv_55987: *(regs_buff + (unsigned long )((int )i + 1022)) = (u32 )adapter->stats.qbtc[(int )i]; i = (u8 )((int )i + 1); ldv_55988: ; if ((unsigned int )i <= 15U) { goto ldv_55987; } else { } *(regs_buff + 1038UL) = ixgbe_read_reg(hw, 16896U); *(regs_buff + 1039UL) = ixgbe_read_reg(hw, 16904U); *(regs_buff + 1040UL) = ixgbe_read_reg(hw, 16908U); *(regs_buff + 1041UL) = ixgbe_read_reg(hw, 16912U); *(regs_buff + 1042UL) = ixgbe_read_reg(hw, 16916U); *(regs_buff + 1043UL) = ixgbe_read_reg(hw, 16920U); *(regs_buff + 1044UL) = ixgbe_read_reg(hw, 16924U); *(regs_buff + 1045UL) = ixgbe_read_reg(hw, 16928U); *(regs_buff + 1046UL) = ixgbe_read_reg(hw, 16932U); *(regs_buff + 1047UL) = ixgbe_read_reg(hw, 16960U); *(regs_buff + 1048UL) = ixgbe_read_reg(hw, 16964U); *(regs_buff + 1049UL) = ixgbe_read_reg(hw, 16968U); *(regs_buff + 1050UL) = ixgbe_read_reg(hw, 16972U); *(regs_buff + 1051UL) = ixgbe_read_reg(hw, 16976U); *(regs_buff + 1052UL) = ixgbe_read_reg(hw, 16980U); *(regs_buff + 1053UL) = ixgbe_read_reg(hw, 16984U); *(regs_buff + 1054UL) = ixgbe_read_reg(hw, 16988U); *(regs_buff + 1055UL) = ixgbe_read_reg(hw, 16992U); *(regs_buff + 1056UL) = ixgbe_read_reg(hw, 16996U); *(regs_buff + 1057UL) = ixgbe_read_reg(hw, 17000U); *(regs_buff + 1058UL) = ixgbe_read_reg(hw, 17004U); *(regs_buff + 1059UL) = ixgbe_read_reg(hw, 17032U); *(regs_buff + 1060UL) = ixgbe_read_reg(hw, 17036U); *(regs_buff + 1061UL) = ixgbe_read_reg(hw, 17040U); *(regs_buff + 1062UL) = ixgbe_read_reg(hw, 17048U); *(regs_buff + 1063UL) = ixgbe_read_reg(hw, 17052U); *(regs_buff + 1064UL) = ixgbe_read_reg(hw, 17056U); *(regs_buff + 1065UL) = ixgbe_read_reg(hw, 17060U); *(regs_buff + 1066UL) = ixgbe_read_reg(hw, 17064U); *(regs_buff + 1067UL) = ixgbe_read_reg(hw, 17068U); *(regs_buff + 1068UL) = ixgbe_read_reg(hw, 17072U); *(regs_buff + 1069UL) = ixgbe_read_reg(hw, 17076U); *(regs_buff + 1070UL) = ixgbe_read_reg(hw, 18432U); *(regs_buff + 1071UL) = ixgbe_read_reg(hw, 11296U); i = 0U; goto ldv_55991; ldv_55990: *(regs_buff + (unsigned long )((int )i + 1072)) = ixgbe_read_reg(hw, (u32 )(((int )i + 2816) * 4)); i = (u8 )((int )i + 1); ldv_55991: ; if ((unsigned int )i <= 7U) { goto ldv_55990; } else { } *(regs_buff + 1080UL) = ixgbe_read_reg(hw, 12040U); i = 0U; goto ldv_55994; ldv_55993: *(regs_buff + (unsigned long )((int )i + 1081)) = ixgbe_read_reg(hw, (u32 )(((int )i + 3012) * 4)); i = (u8 )((int )i + 1); ldv_55994: ; if ((unsigned int )i <= 3U) { goto ldv_55993; } else { } *(regs_buff + 1085UL) = ixgbe_read_reg(hw, 12064U); *(regs_buff + 1086UL) = ixgbe_read_reg(hw, 31776U); i = 0U; goto ldv_55997; ldv_55996: *(regs_buff + (unsigned long )((int )i + 1087)) = ixgbe_read_reg(hw, (u32 )(((int )i + 7936) * 4)); i = (u8 )((int )i + 1); ldv_55997: ; if ((unsigned int )i <= 7U) { goto ldv_55996; } else { } *(regs_buff + 1095UL) = ixgbe_read_reg(hw, 32520U); i = 0U; goto ldv_56000; ldv_55999: *(regs_buff + (unsigned long )((int )i + 1096)) = ixgbe_read_reg(hw, (u32 )(((int )i + 8132) * 4)); i = (u8 )((int )i + 1); ldv_56000: ; if ((unsigned int )i <= 3U) { goto ldv_55999; } else { } *(regs_buff + 1100UL) = ixgbe_read_reg(hw, 32544U); *(regs_buff + 1101UL) = ixgbe_read_reg(hw, 50688U); *(regs_buff + 1102UL) = ixgbe_read_reg(hw, 50704U); *(regs_buff + 1103UL) = ixgbe_read_reg(hw, 50708U); *(regs_buff + 1104UL) = ixgbe_read_reg(hw, 50712U); *(regs_buff + 1105UL) = ixgbe_read_reg(hw, 50716U); *(regs_buff + 1106UL) = ixgbe_read_reg(hw, 13824U); *(regs_buff + 1107UL) = ixgbe_read_reg(hw, 13840U); *(regs_buff + 1108UL) = ixgbe_read_reg(hw, 13844U); *(regs_buff + 1109UL) = ixgbe_read_reg(hw, 13848U); *(regs_buff + 1110UL) = ixgbe_read_reg(hw, 13852U); i = 0U; goto ldv_56003; ldv_56002: *(regs_buff + (unsigned long )((int )i + 1111)) = ixgbe_read_reg(hw, (u32 )(((int )i + 17444) * 4)); i = (u8 )((int )i + 1); ldv_56003: ; if ((unsigned int )i <= 7U) { goto ldv_56002; } else { } *(regs_buff + 1119UL) = ixgbe_read_reg(hw, 20644U); *(regs_buff + 1120UL) = ixgbe_read_reg(hw, 17080U); *(regs_buff + 1121UL) = ixgbe_read_reg(hw, 17088U); *(regs_buff + 1122UL) = ixgbe_read_reg(hw, 17092U); *(regs_buff + 1123UL) = ixgbe_read_reg(hw, 17096U); *(regs_buff + 1124UL) = ixgbe_read_reg(hw, 17100U); *(regs_buff + 1125UL) = ixgbe_read_reg(hw, 69740U); *(regs_buff + 1126UL) = ixgbe_read_reg(hw, 49920U); *(regs_buff + 1127UL) = ixgbe_read_reg(hw, 13056U); *(regs_buff + 1128UL) = ixgbe_read_reg(hw, 17044U); *(regs_buff + 1129UL) = ixgbe_read_reg(hw, 12320U); *(regs_buff + 1130UL) = ixgbe_read_reg(hw, 51200U); i = 0U; goto ldv_56006; ldv_56005: *(regs_buff + (unsigned long )((int )i + 1131)) = ixgbe_read_reg(hw, (u32 )(((int )i + 8376) * 4)); i = (u8 )((int )i + 1); ldv_56006: ; if ((unsigned int )i <= 3U) { goto ldv_56005; } else { } *(regs_buff + 1135UL) = ixgbe_read_reg(hw, 18816U); *(regs_buff + 1136UL) = ixgbe_read_reg(hw, 18828U); *(regs_buff + 1137UL) = ixgbe_read_reg(hw, 35584U); *(regs_buff + 1138UL) = ixgbe_read_reg(hw, 19088U); return; } } static int ixgbe_get_eeprom_len(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; return ((int )adapter->hw.eeprom.word_size * 2); } } static int ixgbe_get_eeprom(struct net_device *netdev , struct ethtool_eeprom *eeprom , u8 *bytes ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; u16 *eeprom_buff ; int first_word ; int last_word ; int eeprom_len ; int ret_val ; u16 i ; void *tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; ret_val = 0; if (eeprom->len == 0U) { return (-22); } else { } eeprom->magic = (__u32 )((int )hw->vendor_id | ((int )hw->device_id << 16)); first_word = (int )(eeprom->offset >> 1); last_word = (int )(((eeprom->offset + eeprom->len) - 1U) >> 1); eeprom_len = (last_word - first_word) + 1; tmp___0 = kzalloc((unsigned long )eeprom_len * 2UL, 208U); eeprom_buff = (u16 *)tmp___0; if ((unsigned long )eeprom_buff == (unsigned long )((u16 *)0U)) { return (-12); } else { } ret_val = (*(hw->eeprom.ops.read_buffer))(hw, (int )((u16 )first_word), (int )((u16 )eeprom_len), eeprom_buff); i = 0U; goto ldv_56026; ldv_56025: i = (u16 )((int )i + 1); ldv_56026: ; if ((int )i < eeprom_len) { goto ldv_56025; } else { } memcpy((void *)bytes, (void const *)eeprom_buff + ((unsigned long )eeprom->offset & 1UL), (size_t )eeprom->len); kfree((void const *)eeprom_buff); return (ret_val); } } static int ixgbe_set_eeprom(struct net_device *netdev , struct ethtool_eeprom *eeprom , u8 *bytes ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; u16 *eeprom_buff ; void *ptr ; int max_len ; int first_word ; int last_word ; int ret_val ; u16 i ; void *tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; ret_val = 0; if (eeprom->len == 0U) { return (-22); } else { } if (eeprom->magic != (__u32 )((int )hw->vendor_id | ((int )hw->device_id << 16))) { return (-22); } else { } max_len = (int )hw->eeprom.word_size * 2; first_word = (int )(eeprom->offset >> 1); last_word = (int )(((eeprom->offset + eeprom->len) - 1U) >> 1); tmp___0 = kzalloc((size_t )max_len, 208U); eeprom_buff = (u16 *)tmp___0; if ((unsigned long )eeprom_buff == (unsigned long )((u16 *)0U)) { return (-12); } else { } ptr = (void *)eeprom_buff; if ((int )eeprom->offset & 1) { ret_val = (*(hw->eeprom.ops.read))(hw, (int )((u16 )first_word), eeprom_buff); if (ret_val != 0) { goto err; } else { } ptr = ptr + 1; } else { } if ((int )(eeprom->offset + eeprom->len) & 1) { ret_val = (*(hw->eeprom.ops.read))(hw, (int )((u16 )last_word), eeprom_buff + (unsigned long )(last_word - first_word)); if (ret_val != 0) { goto err; } else { } } else { } i = 0U; goto ldv_56044; ldv_56043: i = (u16 )((int )i + 1); ldv_56044: ; if ((int )i < (last_word - first_word) + 1) { goto ldv_56043; } else { } memcpy(ptr, (void const *)bytes, (size_t )eeprom->len); i = 0U; goto ldv_56047; ldv_56046: i = (u16 )((int )i + 1); ldv_56047: ; if ((int )i < (last_word - first_word) + 1) { goto ldv_56046; } else { } ret_val = (*(hw->eeprom.ops.write_buffer))(hw, (int )((u16 )first_word), (int )((unsigned int )((int )((u16 )last_word) - (int )((u16 )first_word)) + 1U), eeprom_buff); if (ret_val == 0) { (*(hw->eeprom.ops.update_checksum))(hw); } else { } err: kfree((void const *)eeprom_buff); return (ret_val); } } static void ixgbe_get_drvinfo(struct net_device *netdev , struct ethtool_drvinfo *drvinfo ) { struct ixgbe_adapter *adapter ; void *tmp ; u32 nvm_track_id ; char const *tmp___0 ; int tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; strlcpy((char *)(& drvinfo->driver), (char const *)(& ixgbe_driver_name), 32UL); strlcpy((char *)(& drvinfo->version), (char const *)(& ixgbe_driver_version), 32UL); nvm_track_id = (u32 )(((int )adapter->eeprom_verh << 16) | (int )adapter->eeprom_verl); snprintf((char *)(& drvinfo->fw_version), 32UL, "0x%08x", nvm_track_id); tmp___0 = pci_name((struct pci_dev const *)adapter->pdev); strlcpy((char *)(& drvinfo->bus_info), tmp___0, 32UL); drvinfo->n_stats = netdev->num_tx_queues * 10U + 89U; drvinfo->testinfo_len = 5U; tmp___1 = ixgbe_get_regs_len(netdev); drvinfo->regdump_len = (__u32 )tmp___1; return; } } static void ixgbe_get_ringparam(struct net_device *netdev , struct ethtool_ringparam *ring ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_ring *tx_ring ; struct ixgbe_ring *rx_ring ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; tx_ring = adapter->tx_ring[0]; rx_ring = adapter->rx_ring[0]; ring->rx_max_pending = 4096U; ring->tx_max_pending = 4096U; ring->rx_pending = (__u32 )rx_ring->count; ring->tx_pending = (__u32 )tx_ring->count; return; } } static int ixgbe_set_ringparam(struct net_device *netdev , struct ethtool_ringparam *ring ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_ring *temp_ring ; int i ; int err ; u32 new_rx_count ; u32 new_tx_count ; u32 __min1 ; u32 __max1 ; u32 __max2 ; u32 __min2 ; u32 __min1___0 ; u32 __max1___0 ; u32 __max2___0 ; u32 __min2___0 ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; int __max1___1 ; int __max2___1 ; void *tmp___3 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; err = 0; if (ring->rx_mini_pending != 0U || ring->rx_jumbo_pending != 0U) { return (-22); } else { } __max1 = ring->tx_pending; __max2 = 64U; __min1 = __max1 > __max2 ? __max1 : __max2; __min2 = 4096U; new_tx_count = __min1 < __min2 ? __min1 : __min2; new_tx_count = (new_tx_count + 7U) & 4294967288U; __max1___0 = ring->rx_pending; __max2___0 = 64U; __min1___0 = __max1___0 > __max2___0 ? __max1___0 : __max2___0; __min2___0 = 4096U; new_rx_count = __min1___0 < __min2___0 ? __min1___0 : __min2___0; new_rx_count = (new_rx_count + 7U) & 4294967288U; if (adapter->tx_ring_count == new_tx_count && adapter->rx_ring_count == new_rx_count) { return (0); } else { } goto ldv_56087; ldv_56086: usleep_range(1000UL, 2000UL); ldv_56087: tmp___0 = test_and_set_bit(1L, (unsigned long volatile *)(& adapter->state)); if (tmp___0 != 0) { goto ldv_56086; } else { } tmp___1 = netif_running((struct net_device const *)adapter->netdev); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { i = 0; goto ldv_56090; ldv_56089: (adapter->tx_ring[i])->count = (u16 )new_tx_count; i = i + 1; ldv_56090: ; if (adapter->num_tx_queues > i) { goto ldv_56089; } else { } i = 0; goto ldv_56093; ldv_56092: (adapter->rx_ring[i])->count = (u16 )new_rx_count; i = i + 1; ldv_56093: ; if (adapter->num_rx_queues > i) { goto ldv_56092; } else { } adapter->tx_ring_count = new_tx_count; adapter->rx_ring_count = new_rx_count; goto clear_reset; } else { } __max1___1 = adapter->num_tx_queues; __max2___1 = adapter->num_rx_queues; i = __max1___1 > __max2___1 ? __max1___1 : __max2___1; tmp___3 = ldv_vmalloc_179((unsigned long )i * 4096UL); temp_ring = (struct ixgbe_ring *)tmp___3; if ((unsigned long )temp_ring == (unsigned long )((struct ixgbe_ring *)0)) { err = -12; goto clear_reset; } else { } ixgbe_down(adapter); if (adapter->tx_ring_count != new_tx_count) { i = 0; goto ldv_56104; ldv_56103: memcpy((void *)temp_ring + (unsigned long )i, (void const *)adapter->tx_ring[i], 4096UL); (temp_ring + (unsigned long )i)->count = (u16 )new_tx_count; err = ixgbe_setup_tx_resources(temp_ring + (unsigned long )i); if (err != 0) { goto ldv_56100; ldv_56099: i = i - 1; ixgbe_free_tx_resources(temp_ring + (unsigned long )i); ldv_56100: ; if (i != 0) { goto ldv_56099; } else { } goto err_setup; } else { } i = i + 1; ldv_56104: ; if (adapter->num_tx_queues > i) { goto ldv_56103; } else { } i = 0; goto ldv_56107; ldv_56106: ixgbe_free_tx_resources(adapter->tx_ring[i]); memcpy((void *)adapter->tx_ring[i], (void const *)temp_ring + (unsigned long )i, 4096UL); i = i + 1; ldv_56107: ; if (adapter->num_tx_queues > i) { goto ldv_56106; } else { } adapter->tx_ring_count = new_tx_count; } else { } if (adapter->rx_ring_count != new_rx_count) { i = 0; goto ldv_56113; ldv_56112: memcpy((void *)temp_ring + (unsigned long )i, (void const *)adapter->rx_ring[i], 4096UL); (temp_ring + (unsigned long )i)->count = (u16 )new_rx_count; err = ixgbe_setup_rx_resources(temp_ring + (unsigned long )i); if (err != 0) { goto ldv_56110; ldv_56109: i = i - 1; ixgbe_free_rx_resources(temp_ring + (unsigned long )i); ldv_56110: ; if (i != 0) { goto ldv_56109; } else { } goto err_setup; } else { } i = i + 1; ldv_56113: ; if (adapter->num_rx_queues > i) { goto ldv_56112; } else { } i = 0; goto ldv_56116; ldv_56115: ixgbe_free_rx_resources(adapter->rx_ring[i]); memcpy((void *)adapter->rx_ring[i], (void const *)temp_ring + (unsigned long )i, 4096UL); i = i + 1; ldv_56116: ; if (adapter->num_rx_queues > i) { goto ldv_56115; } else { } adapter->rx_ring_count = new_rx_count; } else { } err_setup: ixgbe_up(adapter); vfree((void const *)temp_ring); clear_reset: clear_bit(1L, (unsigned long volatile *)(& adapter->state)); return (err); } } static int ixgbe_get_sset_count(struct net_device *netdev , int sset ) { { switch (sset) { case 0: ; return (5); case 1: ; return ((int )(netdev->num_tx_queues * 10U + 89U)); default: ; return (-95); } } } static void ixgbe_get_ethtool_stats(struct net_device *netdev , struct ethtool_stats *stats , u64 *data ) { struct ixgbe_adapter *adapter ; void *tmp ; struct rtnl_link_stats64 temp ; struct rtnl_link_stats64 const *net_stats ; unsigned int start ; struct ixgbe_ring *ring ; int i ; int j ; char *p ; struct rtnl_link_stats64 *tmp___0 ; bool tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; p = (char *)0; ixgbe_update_stats(adapter); tmp___0 = dev_get_stats(netdev, & temp); net_stats = (struct rtnl_link_stats64 const *)tmp___0; i = 0; goto ldv_56148; ldv_56147: ; switch (ixgbe_gstrings_stats[i].type) { case 0: p = (char *)net_stats + (unsigned long )ixgbe_gstrings_stats[i].stat_offset; goto ldv_56143; case 1: p = (char *)adapter + (unsigned long )ixgbe_gstrings_stats[i].stat_offset; goto ldv_56143; default: *(data + (unsigned long )i) = 0ULL; goto ldv_56146; } ldv_56143: *(data + (unsigned long )i) = ixgbe_gstrings_stats[i].sizeof_stat == 8 ? *((u64 *)p) : (u64 )*((u32 *)p); ldv_56146: i = i + 1; ldv_56148: ; if ((unsigned int )i <= 56U) { goto ldv_56147; } else { } j = 0; goto ldv_56154; ldv_56153: ring = adapter->tx_ring[j]; if ((unsigned long )ring == (unsigned long )((struct ixgbe_ring *)0)) { *(data + (unsigned long )i) = 0ULL; *(data + ((unsigned long )i + 1UL)) = 0ULL; i = i + 2; *(data + (unsigned long )i) = 0ULL; *(data + ((unsigned long )i + 1UL)) = 0ULL; *(data + ((unsigned long )i + 2UL)) = 0ULL; i = i + 3; goto ldv_56150; } else { } ldv_56151: start = u64_stats_fetch_begin_irq((struct u64_stats_sync const *)(& ring->syncp)); *(data + (unsigned long )i) = ring->stats.packets; *(data + ((unsigned long )i + 1UL)) = ring->stats.bytes; tmp___1 = u64_stats_fetch_retry_irq((struct u64_stats_sync const *)(& ring->syncp), start); if ((int )tmp___1) { goto ldv_56151; } else { } i = i + 2; *(data + (unsigned long )i) = ring->stats.yields; *(data + ((unsigned long )i + 1UL)) = ring->stats.misses; *(data + ((unsigned long )i + 2UL)) = ring->stats.cleaned; i = i + 3; ldv_56150: j = j + 1; ldv_56154: ; if ((unsigned int )j < netdev->num_tx_queues) { goto ldv_56153; } else { } j = 0; goto ldv_56160; ldv_56159: ring = adapter->rx_ring[j]; if ((unsigned long )ring == (unsigned long )((struct ixgbe_ring *)0)) { *(data + (unsigned long )i) = 0ULL; *(data + ((unsigned long )i + 1UL)) = 0ULL; i = i + 2; *(data + (unsigned long )i) = 0ULL; *(data + ((unsigned long )i + 1UL)) = 0ULL; *(data + ((unsigned long )i + 2UL)) = 0ULL; i = i + 3; goto ldv_56156; } else { } ldv_56157: start = u64_stats_fetch_begin_irq((struct u64_stats_sync const *)(& ring->syncp)); *(data + (unsigned long )i) = ring->stats.packets; *(data + ((unsigned long )i + 1UL)) = ring->stats.bytes; tmp___2 = u64_stats_fetch_retry_irq((struct u64_stats_sync const *)(& ring->syncp), start); if ((int )tmp___2) { goto ldv_56157; } else { } i = i + 2; *(data + (unsigned long )i) = ring->stats.yields; *(data + ((unsigned long )i + 1UL)) = ring->stats.misses; *(data + ((unsigned long )i + 2UL)) = ring->stats.cleaned; i = i + 3; ldv_56156: j = j + 1; ldv_56160: ; if ((unsigned int )j < netdev->num_tx_queues) { goto ldv_56159; } else { } j = 0; goto ldv_56163; ldv_56162: tmp___3 = i; i = i + 1; *(data + (unsigned long )tmp___3) = adapter->stats.pxontxc[j]; tmp___4 = i; i = i + 1; *(data + (unsigned long )tmp___4) = adapter->stats.pxofftxc[j]; j = j + 1; ldv_56163: ; if (j <= 7) { goto ldv_56162; } else { } j = 0; goto ldv_56166; ldv_56165: tmp___5 = i; i = i + 1; *(data + (unsigned long )tmp___5) = adapter->stats.pxonrxc[j]; tmp___6 = i; i = i + 1; *(data + (unsigned long )tmp___6) = adapter->stats.pxoffrxc[j]; j = j + 1; ldv_56166: ; if (j <= 7) { goto ldv_56165; } else { } return; } } static void ixgbe_get_strings(struct net_device *netdev , u32 stringset , u8 *data ) { char *p ; int i ; { p = (char *)data; switch (stringset) { case 0U: i = 0; goto ldv_56177; ldv_56176: memcpy((void *)data, (void const *)(& ixgbe_gstrings_test) + (unsigned long )i, 32UL); data = data + 32UL; i = i + 1; ldv_56177: ; if ((unsigned int )i <= 4U) { goto ldv_56176; } else { } goto ldv_56179; case 1U: i = 0; goto ldv_56184; ldv_56183: memcpy((void *)p, (void const *)(& ixgbe_gstrings_stats[i].stat_string), 32UL); p = p + 32UL; i = i + 1; ldv_56184: ; if ((unsigned int )i <= 56U) { goto ldv_56183; } else { } i = 0; goto ldv_56187; ldv_56186: sprintf(p, "tx_queue_%u_packets", i); p = p + 32UL; sprintf(p, "tx_queue_%u_bytes", i); p = p + 32UL; sprintf(p, "tx_queue_%u_bp_napi_yield", i); p = p + 32UL; sprintf(p, "tx_queue_%u_bp_misses", i); p = p + 32UL; sprintf(p, "tx_queue_%u_bp_cleaned", i); p = p + 32UL; i = i + 1; ldv_56187: ; if ((unsigned int )i < netdev->num_tx_queues) { goto ldv_56186; } else { } i = 0; goto ldv_56190; ldv_56189: sprintf(p, "rx_queue_%u_packets", i); p = p + 32UL; sprintf(p, "rx_queue_%u_bytes", i); p = p + 32UL; sprintf(p, "rx_queue_%u_bp_poll_yield", i); p = p + 32UL; sprintf(p, "rx_queue_%u_bp_misses", i); p = p + 32UL; sprintf(p, "rx_queue_%u_bp_cleaned", i); p = p + 32UL; i = i + 1; ldv_56190: ; if ((unsigned int )i < netdev->num_tx_queues) { goto ldv_56189; } else { } i = 0; goto ldv_56193; ldv_56192: sprintf(p, "tx_pb_%u_pxon", i); p = p + 32UL; sprintf(p, "tx_pb_%u_pxoff", i); p = p + 32UL; i = i + 1; ldv_56193: ; if (i <= 7) { goto ldv_56192; } else { } i = 0; goto ldv_56196; ldv_56195: sprintf(p, "rx_pb_%u_pxon", i); p = p + 32UL; sprintf(p, "rx_pb_%u_pxoff", i); p = p + 32UL; i = i + 1; ldv_56196: ; if (i <= 7) { goto ldv_56195; } else { } goto ldv_56179; } ldv_56179: ; return; } } static int ixgbe_link_test(struct ixgbe_adapter *adapter , u64 *data ) { struct ixgbe_hw *hw ; bool link_up ; u32 link_speed ; bool tmp ; { hw = & adapter->hw; link_speed = 0U; tmp = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp) { *data = 1ULL; return (1); } else { } *data = 0ULL; (*(hw->mac.ops.check_link))(hw, & link_speed, & link_up, 1); if ((int )link_up) { return ((int )*data); } else { *data = 1ULL; } return ((int )*data); } } static struct ixgbe_reg_test const reg_test_82599[20U] = { {12832U, 1U, 1U, 2148007920U, 2148007920U}, {12896U, 1U, 1U, 2148007920U, 2148007920U}, {12296U, 1U, 1U, 4294967295U, 4294967295U}, {20616U, 1U, 1U, 0U, 0U}, {4096U, 4U, 1U, 4294967168U, 4294967168U}, {4100U, 4U, 1U, 4294967295U, 4294967295U}, {4104U, 4U, 1U, 1048448U, 1048575U}, {4136U, 4U, 3U, 0U, 33554432U}, {4120U, 4U, 1U, 65535U, 65535U}, {4136U, 4U, 3U, 0U, 0U}, {12896U, 1U, 1U, 2148007920U, 2148007920U}, {12800U, 1U, 1U, 4294967295U, 4294967295U}, {24576U, 4U, 1U, 4294967168U, 4294967295U}, {24580U, 4U, 1U, 4294967295U, 4294967295U}, {24584U, 4U, 1U, 1048448U, 1048448U}, {12288U, 1U, 2U, 1U, 1U}, {21504U, 16U, 5U, 4294967295U, 4294967295U}, {21504U, 16U, 6U, 2147614719U, 2148335615U}, {20992U, 128U, 4U, 4294967295U, 4294967295U}, {0U, (unsigned char)0, (unsigned char)0, 0U, 0U}}; static struct ixgbe_reg_test const reg_test_82598[22U] = { {12832U, 1U, 1U, 2148007920U, 2148007920U}, {12896U, 1U, 1U, 2148007920U, 2148007920U}, {12296U, 1U, 1U, 4294967295U, 4294967295U}, {20616U, 1U, 1U, 0U, 0U}, {4096U, 4U, 1U, 4294967168U, 4294967295U}, {4100U, 4U, 1U, 4294967295U, 4294967295U}, {4104U, 4U, 1U, 1048448U, 1048575U}, {4136U, 4U, 3U, 0U, 33554432U}, {4120U, 4U, 1U, 65535U, 65535U}, {4136U, 4U, 3U, 0U, 0U}, {12896U, 1U, 1U, 2148007920U, 2148007920U}, {12800U, 1U, 1U, 4294967295U, 4294967295U}, {51968U, 1U, 1U, 255U, 255U}, {24576U, 4U, 1U, 4294967168U, 4294967295U}, {24580U, 4U, 1U, 4294967295U, 4294967295U}, {24584U, 4U, 1U, 1048448U, 1048575U}, {12288U, 1U, 2U, 3U, 3U}, {32256U, 1U, 2U, 5U, 5U}, {21504U, 16U, 5U, 4294967295U, 4294967295U}, {21504U, 16U, 6U, 2148335615U, 2148335615U}, {20992U, 128U, 4U, 4294967295U, 4294967295U}, {0U, (unsigned char)0, (unsigned char)0, 0U, 0U}}; static bool reg_pattern_test(struct ixgbe_adapter *adapter , u64 *data , int reg , u32 mask , u32 write ) { u32 pat ; u32 val ; u32 before ; u32 test_pattern[4U] ; bool tmp ; { test_pattern[0] = 1515870810U; test_pattern[1] = 2779096485U; test_pattern[2] = 0U; test_pattern[3] = 4294967295U; tmp = ixgbe_removed((void *)adapter->hw.hw_addr); if ((int )tmp) { *data = 1ULL; return (1); } else { } pat = 0U; goto ldv_56227; ldv_56226: before = ixgbe_read_reg(& adapter->hw, (u32 )reg); ixgbe_write_reg(& adapter->hw, (u32 )reg, test_pattern[pat] & write); val = ixgbe_read_reg(& adapter->hw, (u32 )reg); if (((test_pattern[pat] & write) & mask) != val) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", reg, val, (test_pattern[pat] & write) & mask); } else { } *data = (u64 )reg; ixgbe_write_reg(& adapter->hw, (u32 )reg, before); return (1); } else { } ixgbe_write_reg(& adapter->hw, (u32 )reg, before); pat = pat + 1U; ldv_56227: ; if (pat <= 3U) { goto ldv_56226; } else { } return (0); } } static bool reg_set_and_check(struct ixgbe_adapter *adapter , u64 *data , int reg , u32 mask , u32 write ) { u32 val ; u32 before ; bool tmp ; { tmp = ixgbe_removed((void *)adapter->hw.hw_addr); if ((int )tmp) { *data = 1ULL; return (1); } else { } before = ixgbe_read_reg(& adapter->hw, (u32 )reg); ixgbe_write_reg(& adapter->hw, (u32 )reg, write & mask); val = ixgbe_read_reg(& adapter->hw, (u32 )reg); if (((write ^ val) & mask) != 0U) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, val & mask, write & mask); } else { } *data = (u64 )reg; ixgbe_write_reg(& adapter->hw, (u32 )reg, before); return (1); } else { } ixgbe_write_reg(& adapter->hw, (u32 )reg, before); return (0); } } static int ixgbe_reg_test(struct ixgbe_adapter *adapter , u64 *data ) { struct ixgbe_reg_test const *test ; u32 value ; u32 before ; u32 after ; u32 i ; u32 toggle ; bool tmp ; u32 tmp___0 ; u32 tmp___1 ; bool b ; { tmp = ixgbe_removed((void *)adapter->hw.hw_addr); if ((int )tmp) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "Adapter removed - register test blocked\n"); } else { } *data = 1ULL; return (1); } else { } switch ((unsigned int )adapter->hw.mac.type) { case 1U: toggle = 2147480575U; test = (struct ixgbe_reg_test const *)(& reg_test_82598); goto ldv_56249; case 2U: ; case 3U: ; case 4U: ; case 5U: toggle = 2147480335U; test = (struct ixgbe_reg_test const *)(& reg_test_82599); goto ldv_56249; default: *data = 1ULL; return (1); } ldv_56249: before = ixgbe_read_reg(& adapter->hw, 8U); tmp___0 = ixgbe_read_reg(& adapter->hw, 8U); value = tmp___0 & toggle; ixgbe_write_reg(& adapter->hw, 8U, toggle); tmp___1 = ixgbe_read_reg(& adapter->hw, 8U); after = tmp___1 & toggle; if (value != after) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "failed STATUS register test got: 0x%08X expected: 0x%08X\n", after, value); } else { } *data = 1ULL; return (1); } else { } ixgbe_write_reg(& adapter->hw, 8U, before); goto ldv_56267; ldv_56266: i = 0U; goto ldv_56264; ldv_56263: b = 0; switch ((int )test->test_type) { case 1: b = reg_pattern_test(adapter, data, (int )((u32 )test->reg + i * 64U), test->mask, test->write); goto ldv_56257; case 2: b = reg_set_and_check(adapter, data, (int )((u32 )test->reg + i * 64U), test->mask, test->write); goto ldv_56257; case 3: ixgbe_write_reg(& adapter->hw, (u32 )test->reg + i * 64U, test->write); goto ldv_56257; case 4: b = reg_pattern_test(adapter, data, (int )((u32 )test->reg + i * 4U), test->mask, test->write); goto ldv_56257; case 5: b = reg_pattern_test(adapter, data, (int )((u32 )test->reg + i * 8U), test->mask, test->write); goto ldv_56257; case 6: b = reg_pattern_test(adapter, data, (int )(((u32 )test->reg + i * 8U) + 4U), test->mask, test->write); goto ldv_56257; } ldv_56257: ; if ((int )b) { return (1); } else { } i = i + 1U; ldv_56264: ; if ((u32 )test->array_len > i) { goto ldv_56263; } else { } test = test + 1; ldv_56267: ; if ((unsigned int )((unsigned short )test->reg) != 0U) { goto ldv_56266; } else { } *data = 0ULL; return (0); } } static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter , u64 *data ) { struct ixgbe_hw *hw ; s32 tmp ; { hw = & adapter->hw; tmp = (*(hw->eeprom.ops.validate_checksum))(hw, (u16 *)0U); if (tmp != 0) { *data = 1ULL; } else { *data = 0ULL; } return ((int )*data); } } static irqreturn_t ixgbe_test_intr(int irq , void *data ) { struct net_device *netdev ; struct ixgbe_adapter *adapter ; void *tmp ; u32 tmp___0 ; { netdev = (struct net_device *)data; tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; tmp___0 = ixgbe_read_reg(& adapter->hw, 2048U); adapter->test_icr = adapter->test_icr | tmp___0; return (1); } } static int ixgbe_intr_test(struct ixgbe_adapter *adapter , u64 *data ) { struct net_device *netdev ; u32 mask ; u32 i ; u32 shared_int ; u32 irq ; int tmp ; int tmp___0 ; int tmp___1 ; { netdev = adapter->netdev; i = 0U; shared_int = 1U; irq = (adapter->pdev)->irq; *data = 0ULL; if ((unsigned long )adapter->msix_entries != (unsigned long )((struct msix_entry *)0)) { return (0); } else if ((adapter->flags & 2U) != 0U) { shared_int = 0U; tmp = ldv_request_irq_43(irq, & ixgbe_test_intr, 0UL, (char const *)(& netdev->name), (void *)netdev); if (tmp != 0) { *data = 1ULL; return (-1); } else { } } else { tmp___1 = ldv_request_irq_44(irq, & ixgbe_test_intr, 256UL, (char const *)(& netdev->name), (void *)netdev); if (tmp___1 == 0) { shared_int = 0U; } else { tmp___0 = ldv_request_irq_46(irq, & ixgbe_test_intr, 128UL, (char const *)(& netdev->name), (void *)netdev); if (tmp___0 != 0) { *data = 1ULL; return (-1); } else { } } } if (((int )adapter->msg_enable & 8192) != 0) { netdev_info((struct net_device const *)adapter->netdev, "testing %s interrupt\n", shared_int != 0U ? (char *)"shared" : (char *)"unshared"); } else { } ixgbe_write_reg(& adapter->hw, 2184U, 4294967295U); ixgbe_read_reg(& adapter->hw, 8U); usleep_range(10000UL, 20000UL); goto ldv_56291; ldv_56290: mask = (u32 )(1 << (int )i); if (shared_int == 0U) { adapter->test_icr = 0U; ixgbe_write_reg(& adapter->hw, 2184U, ~ mask & 32767U); ixgbe_write_reg(& adapter->hw, 2056U, ~ mask & 32767U); ixgbe_read_reg(& adapter->hw, 8U); usleep_range(10000UL, 20000UL); if ((adapter->test_icr & mask) != 0U) { *data = 3ULL; goto ldv_56289; } else { } } else { } adapter->test_icr = 0U; ixgbe_write_reg(& adapter->hw, 2176U, mask); ixgbe_write_reg(& adapter->hw, 2056U, mask); ixgbe_read_reg(& adapter->hw, 8U); usleep_range(10000UL, 20000UL); if ((adapter->test_icr & mask) == 0U) { *data = 4ULL; goto ldv_56289; } else { } if (shared_int == 0U) { adapter->test_icr = 0U; ixgbe_write_reg(& adapter->hw, 2184U, ~ mask & 32767U); ixgbe_write_reg(& adapter->hw, 2056U, ~ mask & 32767U); ixgbe_read_reg(& adapter->hw, 8U); usleep_range(10000UL, 20000UL); if (adapter->test_icr != 0U) { *data = 5ULL; goto ldv_56289; } else { } } else { } i = i + 1U; ldv_56291: ; if (i <= 9U) { goto ldv_56290; } else { } ldv_56289: ixgbe_write_reg(& adapter->hw, 2184U, 4294967295U); ixgbe_read_reg(& adapter->hw, 8U); usleep_range(10000UL, 20000UL); ldv_free_irq_183(irq, (void *)netdev); return ((int )*data); } } static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter ) { struct ixgbe_ring *tx_ring ; struct ixgbe_ring *rx_ring ; struct ixgbe_hw *hw ; u32 reg_ctl ; { tx_ring = & adapter->test_tx_ring; rx_ring = & adapter->test_rx_ring; hw = & adapter->hw; (*(hw->mac.ops.disable_rx))(hw); ixgbe_disable_rx_queue(adapter, rx_ring); reg_ctl = ixgbe_read_reg(hw, (u32 )((int )tx_ring->reg_idx * 64 + 24616)); reg_ctl = reg_ctl & 4261412863U; ixgbe_write_reg(hw, (u32 )((int )tx_ring->reg_idx * 64 + 24616), reg_ctl); switch ((unsigned int )hw->mac.type) { case 2U: ; case 3U: ; case 4U: ; case 5U: reg_ctl = ixgbe_read_reg(hw, 19072U); reg_ctl = reg_ctl & 4294967294U; ixgbe_write_reg(hw, 19072U, reg_ctl); goto ldv_56303; default: ; goto ldv_56303; } ldv_56303: ixgbe_reset(adapter); ixgbe_free_tx_resources(& adapter->test_tx_ring); ixgbe_free_rx_resources(& adapter->test_rx_ring); return; } } static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter ) { struct ixgbe_ring *tx_ring ; struct ixgbe_ring *rx_ring ; struct ixgbe_hw *hw ; u32 rctl ; u32 reg_data ; int ret_val ; int err ; { tx_ring = & adapter->test_tx_ring; rx_ring = & adapter->test_rx_ring; hw = & adapter->hw; tx_ring->count = 512U; tx_ring->queue_index = 0U; tx_ring->dev = & (adapter->pdev)->dev; tx_ring->netdev = adapter->netdev; tx_ring->reg_idx = (adapter->tx_ring[0])->reg_idx; err = ixgbe_setup_tx_resources(tx_ring); if (err != 0) { return (1); } else { } switch ((unsigned int )adapter->hw.mac.type) { case 2U: ; case 3U: ; case 4U: ; case 5U: reg_data = ixgbe_read_reg(& adapter->hw, 19072U); reg_data = reg_data | 1U; ixgbe_write_reg(& adapter->hw, 19072U, reg_data); goto ldv_56319; default: ; goto ldv_56319; } ldv_56319: ixgbe_configure_tx_ring(adapter, tx_ring); rx_ring->count = 512U; rx_ring->queue_index = 0U; rx_ring->dev = & (adapter->pdev)->dev; rx_ring->netdev = adapter->netdev; rx_ring->reg_idx = (adapter->rx_ring[0])->reg_idx; err = ixgbe_setup_rx_resources(rx_ring); if (err != 0) { ret_val = 4; goto err_nomem; } else { } (*(hw->mac.ops.disable_rx))(hw); ixgbe_configure_rx_ring(adapter, rx_ring); rctl = ixgbe_read_reg(& adapter->hw, 12288U); rctl = rctl | 2U; ixgbe_write_reg(& adapter->hw, 12288U, rctl); (*(hw->mac.ops.enable_rx))(hw); return (0); err_nomem: ixgbe_free_desc_rings(adapter); return (ret_val); } } static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 reg_data ; u8 atlas ; { hw = & adapter->hw; reg_data = ixgbe_read_reg(hw, 16960U); reg_data = reg_data | 32768U; ixgbe_write_reg(hw, 16960U, reg_data); reg_data = ixgbe_read_reg(hw, 20608U); reg_data = reg_data | 1282U; ixgbe_write_reg(hw, 20608U, reg_data); switch ((unsigned int )adapter->hw.mac.type) { case 3U: ; case 4U: ; case 5U: reg_data = ixgbe_read_reg(hw, 17200U); reg_data = reg_data | 1U; ixgbe_write_reg(hw, 17200U, reg_data); goto ldv_56330; default: ; if (hw->mac.orig_autoc != 0U) { reg_data = hw->mac.orig_autoc | 1U; ixgbe_write_reg(hw, 17056U, reg_data); } else { return (10); } } ldv_56330: ixgbe_read_reg(hw, 8U); usleep_range(10000UL, 20000UL); if ((unsigned int )hw->mac.type == 1U) { (*(hw->mac.ops.read_analog_reg8))(hw, 36U, & atlas); atlas = (u8 )((unsigned int )atlas | 16U); (*(hw->mac.ops.write_analog_reg8))(hw, 36U, (int )atlas); (*(hw->mac.ops.read_analog_reg8))(hw, 11U, & atlas); atlas = (u8 )((unsigned int )atlas | 240U); (*(hw->mac.ops.write_analog_reg8))(hw, 11U, (int )atlas); (*(hw->mac.ops.read_analog_reg8))(hw, 12U, & atlas); atlas = (u8 )((unsigned int )atlas | 240U); (*(hw->mac.ops.write_analog_reg8))(hw, 12U, (int )atlas); (*(hw->mac.ops.read_analog_reg8))(hw, 13U, & atlas); atlas = (u8 )((unsigned int )atlas | 240U); (*(hw->mac.ops.write_analog_reg8))(hw, 13U, (int )atlas); } else { } return (0); } } static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter ) { u32 reg_data ; { reg_data = ixgbe_read_reg(& adapter->hw, 16960U); reg_data = reg_data & 4294934527U; ixgbe_write_reg(& adapter->hw, 16960U, reg_data); return; } } static void ixgbe_create_lbtest_frame(struct sk_buff *skb , unsigned int frame_size ) { { memset((void *)skb->data, 255, (size_t )frame_size); frame_size = frame_size >> 1; memset((void *)skb->data + (unsigned long )frame_size, 170, (size_t )(frame_size / 2U - 1U)); memset((void *)skb->data + (unsigned long )(frame_size + 10U), 190, 1UL); memset((void *)skb->data + (unsigned long )(frame_size + 12U), 175, 1UL); return; } } static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer , unsigned int frame_size ) { unsigned char *data ; bool match ; void *tmp ; { match = 1; frame_size = frame_size >> 1; tmp = kmap(rx_buffer->page); data = (unsigned char *)tmp + (unsigned long )rx_buffer->page_offset; if (((unsigned int )*(data + 3UL) != 255U || (unsigned int )*(data + (unsigned long )(frame_size + 10U)) != 190U) || (unsigned int )*(data + (unsigned long )(frame_size + 12U)) != 175U) { match = 0; } else { } kunmap(rx_buffer->page); return (match); } } static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring , struct ixgbe_ring *tx_ring , unsigned int size ) { union ixgbe_adv_rx_desc *rx_desc ; struct ixgbe_rx_buffer *rx_buffer ; struct ixgbe_tx_buffer *tx_buffer ; u16 rx_ntc ; u16 tx_ntc ; u16 count ; unsigned int tmp ; bool tmp___0 ; unsigned int tmp___1 ; __le32 tmp___2 ; struct netdev_queue *tmp___3 ; { count = 0U; rx_ntc = rx_ring->next_to_clean; tx_ntc = tx_ring->next_to_clean; rx_desc = (union ixgbe_adv_rx_desc *)rx_ring->desc + (unsigned long )rx_ntc; goto ldv_56359; ldv_56358: rx_buffer = rx_ring->__annonCompField118.rx_buffer_info + (unsigned long )rx_ntc; tmp = ixgbe_rx_bufsz(rx_ring); dma_sync_single_for_cpu(rx_ring->dev, rx_buffer->dma, (size_t )tmp, 2); tmp___0 = ixgbe_check_lbtest_frame(rx_buffer, size); if ((int )tmp___0) { count = (u16 )((int )count + 1); } else { } tmp___1 = ixgbe_rx_bufsz(rx_ring); dma_sync_single_for_device(rx_ring->dev, rx_buffer->dma, (size_t )tmp___1, 2); tx_buffer = tx_ring->__annonCompField118.tx_buffer_info + (unsigned long )tx_ntc; ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); rx_ntc = (u16 )((int )rx_ntc + 1); if ((int )rx_ring->count == (int )rx_ntc) { rx_ntc = 0U; } else { } tx_ntc = (u16 )((int )tx_ntc + 1); if ((int )tx_ring->count == (int )tx_ntc) { tx_ntc = 0U; } else { } rx_desc = (union ixgbe_adv_rx_desc *)rx_ring->desc + (unsigned long )rx_ntc; ldv_56359: tmp___2 = ixgbe_test_staterr(rx_desc, 1U); if (tmp___2 != 0U) { goto ldv_56358; } else { } tmp___3 = txring_txq((struct ixgbe_ring const *)tx_ring); netdev_tx_reset_queue(tmp___3); ixgbe_alloc_rx_buffers(rx_ring, (int )count); rx_ring->next_to_clean = rx_ntc; tx_ring->next_to_clean = tx_ntc; return (count); } } static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter ) { struct ixgbe_ring *tx_ring ; struct ixgbe_ring *rx_ring ; int i ; int j ; int lc ; int good_cnt ; int ret_val ; unsigned int size ; netdev_tx_t tx_ret_val ; struct sk_buff *skb ; u32 flags_orig ; u16 tmp ; { tx_ring = & adapter->test_tx_ring; rx_ring = & adapter->test_rx_ring; ret_val = 0; size = 1024U; flags_orig = adapter->flags; adapter->flags = adapter->flags & 4294963199U; skb = alloc_skb(size, 208U); if ((unsigned long )skb == (unsigned long )((struct sk_buff *)0)) { return (11); } else { } ixgbe_create_lbtest_frame(skb, size); skb_put(skb, size); if ((int )rx_ring->count <= (int )tx_ring->count) { lc = (int )((unsigned int )tx_ring->count / 64U) * 2 + 1; } else { lc = (int )((unsigned int )rx_ring->count / 64U) * 2 + 1; } j = 0; goto ldv_56380; ldv_56379: good_cnt = 0; i = 0; goto ldv_56376; ldv_56375: skb_get(skb); tx_ret_val = ixgbe_xmit_frame_ring(skb, adapter, tx_ring); if ((int )tx_ret_val == 0) { good_cnt = good_cnt + 1; } else { } i = i + 1; ldv_56376: ; if (i <= 63) { goto ldv_56375; } else { } if (good_cnt != 64) { ret_val = 12; goto ldv_56378; } else { } msleep(200U); tmp = ixgbe_clean_test_rings(rx_ring, tx_ring, size); good_cnt = (int )tmp; if (good_cnt != 64) { ret_val = 13; goto ldv_56378; } else { } j = j + 1; ldv_56380: ; if (j <= lc) { goto ldv_56379; } else { } ldv_56378: kfree_skb(skb); adapter->flags = flags_orig; return (ret_val); } } static int ixgbe_loopback_test(struct ixgbe_adapter *adapter , u64 *data ) { int tmp ; int tmp___0 ; int tmp___1 ; { tmp = ixgbe_setup_desc_rings(adapter); *data = (u64 )tmp; if (*data != 0ULL) { goto out; } else { } tmp___0 = ixgbe_setup_loopback_test(adapter); *data = (u64 )tmp___0; if (*data != 0ULL) { goto err_loopback; } else { } tmp___1 = ixgbe_run_loopback_test(adapter); *data = (u64 )tmp___1; ixgbe_loopback_cleanup(adapter); err_loopback: ixgbe_free_desc_rings(adapter); out: ; return ((int )*data); } } static void ixgbe_diag_test(struct net_device *netdev , struct ethtool_test *eth_test , u64 *data ) { struct ixgbe_adapter *adapter ; void *tmp ; bool if_running ; bool tmp___0 ; bool tmp___1 ; struct ixgbe_hw *hw ; int i ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; tmp___0 = netif_running((struct net_device const *)netdev); if_running = tmp___0; tmp___1 = ixgbe_removed((void *)adapter->hw.hw_addr); if ((int )tmp___1) { if (((int )adapter->msg_enable & 8192) != 0) { netdev_err((struct net_device const *)adapter->netdev, "Adapter removed - test blocked\n"); } else { } *data = 1ULL; *(data + 1UL) = 1ULL; *(data + 2UL) = 1ULL; *(data + 3UL) = 1ULL; *(data + 4UL) = 1ULL; eth_test->flags = eth_test->flags | 2U; return; } else { } set_bit(0L, (unsigned long volatile *)(& adapter->state)); if (eth_test->flags == 1U) { hw = & adapter->hw; if ((adapter->flags & 8388608U) != 0U) { i = 0; goto ldv_56398; ldv_56397: ; if ((int )(adapter->vfinfo + (unsigned long )i)->clear_to_send) { netdev_warn((struct net_device const *)netdev, "offline diagnostic is not supported when VFs are present\n"); *data = 1ULL; *(data + 1UL) = 1ULL; *(data + 2UL) = 1ULL; *(data + 3UL) = 1ULL; *(data + 4UL) = 1ULL; eth_test->flags = eth_test->flags | 2U; clear_bit(0L, (unsigned long volatile *)(& adapter->state)); goto skip_ol_tests; } else { } i = i + 1; ldv_56398: ; if ((unsigned int )i < adapter->num_vfs) { goto ldv_56397; } else { } } else { } if (((int )adapter->msg_enable & 8192) != 0) { netdev_info((struct net_device const *)adapter->netdev, "offline testing starting\n"); } else { } tmp___2 = ixgbe_link_test(adapter, data + 4UL); if (tmp___2 != 0) { eth_test->flags = eth_test->flags | 2U; } else { } if ((int )if_running) { dev_close(netdev); } else { ixgbe_reset(adapter); } if (((int )adapter->msg_enable & 8192) != 0) { netdev_info((struct net_device const *)adapter->netdev, "register testing starting\n"); } else { } tmp___3 = ixgbe_reg_test(adapter, data); if (tmp___3 != 0) { eth_test->flags = eth_test->flags | 2U; } else { } ixgbe_reset(adapter); if (((int )adapter->msg_enable & 8192) != 0) { netdev_info((struct net_device const *)adapter->netdev, "eeprom testing starting\n"); } else { } tmp___4 = ixgbe_eeprom_test(adapter, data + 1UL); if (tmp___4 != 0) { eth_test->flags = eth_test->flags | 2U; } else { } ixgbe_reset(adapter); if (((int )adapter->msg_enable & 8192) != 0) { netdev_info((struct net_device const *)adapter->netdev, "interrupt testing starting\n"); } else { } tmp___5 = ixgbe_intr_test(adapter, data + 2UL); if (tmp___5 != 0) { eth_test->flags = eth_test->flags | 2U; } else { } if ((adapter->flags & 8404992U) != 0U) { if (((int )adapter->msg_enable & 8192) != 0) { netdev_info((struct net_device const *)adapter->netdev, "Skip MAC loopback diagnostic in VT mode\n"); } else { } *(data + 3UL) = 0ULL; goto skip_loopback; } else { } ixgbe_reset(adapter); if (((int )adapter->msg_enable & 8192) != 0) { netdev_info((struct net_device const *)adapter->netdev, "loopback testing starting\n"); } else { } tmp___6 = ixgbe_loopback_test(adapter, data + 3UL); if (tmp___6 != 0) { eth_test->flags = eth_test->flags | 2U; } else { } skip_loopback: ixgbe_reset(adapter); clear_bit(0L, (unsigned long volatile *)(& adapter->state)); if ((int )if_running) { dev_open(netdev); } else if ((unsigned long )hw->mac.ops.disable_tx_laser != (unsigned long )((void (*)(struct ixgbe_hw * ))0)) { (*(hw->mac.ops.disable_tx_laser))(hw); } else { } } else { if (((int )adapter->msg_enable & 8192) != 0) { netdev_info((struct net_device const *)adapter->netdev, "online testing starting\n"); } else { } tmp___7 = ixgbe_link_test(adapter, data + 4UL); if (tmp___7 != 0) { eth_test->flags = eth_test->flags | 2U; } else { } *data = 0ULL; *(data + 1UL) = 0ULL; *(data + 2UL) = 0ULL; *(data + 3UL) = 0ULL; clear_bit(0L, (unsigned long volatile *)(& adapter->state)); } skip_ol_tests: msleep_interruptible(4000U); return; } } static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter , struct ethtool_wolinfo *wol ) { struct ixgbe_hw *hw ; int retval ; int tmp ; { hw = & adapter->hw; retval = 0; tmp = ixgbe_wol_supported(adapter, (int )hw->device_id, (int )hw->subsystem_device_id); if (tmp == 0) { retval = 1; wol->supported = 0U; } else { } return (retval); } } static void ixgbe_get_wol(struct net_device *netdev , struct ethtool_wolinfo *wol ) { struct ixgbe_adapter *adapter ; void *tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; wol->supported = 46U; wol->wolopts = 0U; tmp___0 = ixgbe_wol_exclusion(adapter, wol); if (tmp___0 != 0) { return; } else { tmp___1 = device_can_wakeup(& (adapter->pdev)->dev); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return; } else { } } if ((adapter->wol & 4U) != 0U) { wol->wolopts = wol->wolopts | 2U; } else { } if ((adapter->wol & 8U) != 0U) { wol->wolopts = wol->wolopts | 4U; } else { } if ((adapter->wol & 16U) != 0U) { wol->wolopts = wol->wolopts | 8U; } else { } if ((adapter->wol & 2U) != 0U) { wol->wolopts = wol->wolopts | 32U; } else { } return; } } static int ixgbe_set_wol(struct net_device *netdev , struct ethtool_wolinfo *wol ) { struct ixgbe_adapter *adapter ; void *tmp ; int tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; if ((wol->wolopts & 81U) != 0U) { return (-95); } else { } tmp___0 = ixgbe_wol_exclusion(adapter, wol); if (tmp___0 != 0) { return (wol->wolopts != 0U ? -95 : 0); } else { } adapter->wol = 0U; if ((wol->wolopts & 2U) != 0U) { adapter->wol = adapter->wol | 4U; } else { } if ((wol->wolopts & 4U) != 0U) { adapter->wol = adapter->wol | 8U; } else { } if ((wol->wolopts & 8U) != 0U) { adapter->wol = adapter->wol | 16U; } else { } if ((wol->wolopts & 32U) != 0U) { adapter->wol = adapter->wol | 2U; } else { } device_set_wakeup_enable(& (adapter->pdev)->dev, adapter->wol != 0U); return (0); } } static int ixgbe_nway_reset(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; bool tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; tmp___0 = netif_running((struct net_device const *)netdev); if ((int )tmp___0) { ixgbe_reinit_locked(adapter); } else { } return (0); } } static int ixgbe_set_phys_id(struct net_device *netdev , enum ethtool_phys_id_state state ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; switch ((unsigned int )state) { case 1U: adapter->led_reg = ixgbe_read_reg(hw, 512U); return (2); case 2U: (*(hw->mac.ops.led_on))(hw, 14U); goto ldv_56429; case 3U: (*(hw->mac.ops.led_off))(hw, 14U); goto ldv_56429; case 0U: ixgbe_write_reg(& adapter->hw, 512U, adapter->led_reg); goto ldv_56429; } ldv_56429: ; return (0); } } static int ixgbe_get_coalesce(struct net_device *netdev , struct ethtool_coalesce *ec ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; if ((unsigned int )adapter->rx_itr_setting <= 1U) { ec->rx_coalesce_usecs = (__u32 )adapter->rx_itr_setting; } else { ec->rx_coalesce_usecs = (__u32 )((int )adapter->rx_itr_setting >> 2); } if ((unsigned int )(adapter->q_vector[0])->tx.count != 0U && (unsigned int )(adapter->q_vector[0])->rx.count != 0U) { return (0); } else { } if ((unsigned int )adapter->tx_itr_setting <= 1U) { ec->tx_coalesce_usecs = (__u32 )adapter->tx_itr_setting; } else { ec->tx_coalesce_usecs = (__u32 )((int )adapter->tx_itr_setting >> 2); } return (0); } } static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter ) { struct net_device *netdev ; { netdev = adapter->netdev; if ((adapter->flags2 & 1U) == 0U || (netdev->features & 32768ULL) == 0ULL) { return (0); } else { } if ((unsigned int )adapter->rx_itr_setting == 1U || (unsigned int )adapter->rx_itr_setting > 24U) { if ((adapter->flags2 & 2U) == 0U) { adapter->flags2 = adapter->flags2 | 2U; if (((int )adapter->msg_enable & 2) != 0) { netdev_info((struct net_device const *)adapter->netdev, "rx-usecs value high enough to re-enable RSC\n"); } else { } return (1); } else { } } else if ((adapter->flags2 & 2U) != 0U) { adapter->flags2 = adapter->flags2 & 4294967293U; if (((int )adapter->msg_enable & 2) != 0) { netdev_info((struct net_device const *)adapter->netdev, "rx-usecs set too low, disabling RSC\n"); } else { } return (1); } else { } return (0); } } static int ixgbe_set_coalesce(struct net_device *netdev , struct ethtool_coalesce *ec ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_q_vector *q_vector ; int i ; u16 tx_itr_param ; u16 rx_itr_param ; u16 tx_itr_prev ; bool need_reset ; bool tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; need_reset = 0; if ((unsigned int )(adapter->q_vector[0])->tx.count != 0U && (unsigned int )(adapter->q_vector[0])->rx.count != 0U) { if (ec->tx_coalesce_usecs != 0U) { return (-22); } else { } tx_itr_prev = adapter->rx_itr_setting; } else { tx_itr_prev = adapter->tx_itr_setting; } if (ec->rx_coalesce_usecs > 1022U || ec->tx_coalesce_usecs > 1022U) { return (-22); } else { } if (ec->rx_coalesce_usecs > 1U) { adapter->rx_itr_setting = (int )((u16 )ec->rx_coalesce_usecs) << 2U; } else { adapter->rx_itr_setting = (u16 )ec->rx_coalesce_usecs; } if ((unsigned int )adapter->rx_itr_setting == 1U) { rx_itr_param = 200U; } else { rx_itr_param = adapter->rx_itr_setting; } if (ec->tx_coalesce_usecs > 1U) { adapter->tx_itr_setting = (int )((u16 )ec->tx_coalesce_usecs) << 2U; } else { adapter->tx_itr_setting = (u16 )ec->tx_coalesce_usecs; } if ((unsigned int )adapter->tx_itr_setting == 1U) { tx_itr_param = 400U; } else { tx_itr_param = adapter->tx_itr_setting; } if ((unsigned int )(adapter->q_vector[0])->tx.count != 0U && (unsigned int )(adapter->q_vector[0])->rx.count != 0U) { adapter->tx_itr_setting = adapter->rx_itr_setting; } else { } if ((unsigned int )adapter->tx_itr_setting != 1U && (unsigned int )adapter->tx_itr_setting <= 39U) { if ((unsigned int )tx_itr_prev == 1U || (unsigned int )tx_itr_prev > 39U) { need_reset = 1; } else { } } else if ((unsigned int )tx_itr_prev != 1U && (unsigned int )tx_itr_prev <= 39U) { need_reset = 1; } else { } tmp___0 = ixgbe_update_rsc(adapter); need_reset = ((int )need_reset | (int )tmp___0) != 0; i = 0; goto ldv_56453; ldv_56452: q_vector = adapter->q_vector[i]; if ((unsigned int )q_vector->tx.count != 0U && (unsigned int )q_vector->rx.count == 0U) { q_vector->itr = tx_itr_param; } else { q_vector->itr = rx_itr_param; } ixgbe_write_eitr(q_vector); i = i + 1; ldv_56453: ; if (adapter->num_q_vectors > i) { goto ldv_56452; } else { } if ((int )need_reset) { ixgbe_do_reset(netdev); } else { } return (0); } } static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter , struct ethtool_rxnfc *cmd ) { union ixgbe_atr_input *mask ; struct ethtool_rx_flow_spec *fsp ; struct hlist_node *node2 ; struct ixgbe_fdir_filter *rule ; struct hlist_node *____ptr ; struct hlist_node const *__mptr ; struct ixgbe_fdir_filter *tmp ; struct hlist_node *____ptr___0 ; struct hlist_node const *__mptr___0 ; struct ixgbe_fdir_filter *tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; { mask = & adapter->fdir_mask; fsp = & cmd->fs; rule = (struct ixgbe_fdir_filter *)0; cmd->data = (__u64 )((1024 << (int )adapter->fdir_pballoc) + -2); ____ptr = adapter->fdir_filter_list.first; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp = (struct ixgbe_fdir_filter *)__mptr; } else { tmp = (struct ixgbe_fdir_filter *)0; } rule = tmp; goto ldv_56474; ldv_56473: ; if (fsp->location <= (__u32 )rule->sw_idx) { goto ldv_56472; } else { } ____ptr___0 = node2; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___0 = (struct ixgbe_fdir_filter *)__mptr___0; } else { tmp___0 = (struct ixgbe_fdir_filter *)0; } rule = tmp___0; ldv_56474: ; if ((unsigned long )rule != (unsigned long )((struct ixgbe_fdir_filter *)0)) { node2 = rule->fdir_node.next; goto ldv_56473; } else { } ldv_56472: ; if ((unsigned long )rule == (unsigned long )((struct ixgbe_fdir_filter *)0) || fsp->location != (__u32 )rule->sw_idx) { return (-22); } else { } switch ((int )rule->filter.formatted.flow_type) { case 2: fsp->flow_type = 1U; goto ldv_56476; case 1: fsp->flow_type = 2U; goto ldv_56476; case 3: fsp->flow_type = 3U; goto ldv_56476; case 0: fsp->flow_type = 13U; fsp->h_u.usr_ip4_spec.ip_ver = 1U; fsp->h_u.usr_ip4_spec.proto = 0U; fsp->m_u.usr_ip4_spec.proto = 0U; goto ldv_56476; default: ; return (-22); } ldv_56476: fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; fsp->m_ext.vlan_tci = mask->formatted.vlan_id; fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; tmp___1 = __fswab32((__u32 )rule->filter.formatted.vm_pool); fsp->h_ext.data[1] = tmp___1; tmp___2 = __fswab32((__u32 )mask->formatted.vm_pool); fsp->m_ext.data[1] = tmp___2; fsp->flow_type = fsp->flow_type | 2147483648U; if ((unsigned int )rule->action == 127U) { fsp->ring_cookie = 0xffffffffffffffffULL; } else { fsp->ring_cookie = (__u64 )rule->action; } return (0); } } static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter , struct ethtool_rxnfc *cmd , u32 *rule_locs ) { struct hlist_node *node2 ; struct ixgbe_fdir_filter *rule ; int cnt ; struct hlist_node *____ptr ; struct hlist_node const *__mptr ; struct ixgbe_fdir_filter *tmp ; struct hlist_node *____ptr___0 ; struct hlist_node const *__mptr___0 ; struct ixgbe_fdir_filter *tmp___0 ; { cnt = 0; cmd->data = (__u64 )((1024 << (int )adapter->fdir_pballoc) + -2); ____ptr = adapter->fdir_filter_list.first; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp = (struct ixgbe_fdir_filter *)__mptr; } else { tmp = (struct ixgbe_fdir_filter *)0; } rule = tmp; goto ldv_56499; ldv_56498: ; if ((__u32 )cnt == cmd->rule_cnt) { return (-90); } else { } *(rule_locs + (unsigned long )cnt) = (u32 )rule->sw_idx; cnt = cnt + 1; ____ptr___0 = node2; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___0 = (struct ixgbe_fdir_filter *)__mptr___0; } else { tmp___0 = (struct ixgbe_fdir_filter *)0; } rule = tmp___0; ldv_56499: ; if ((unsigned long )rule != (unsigned long )((struct ixgbe_fdir_filter *)0)) { node2 = rule->fdir_node.next; goto ldv_56498; } else { } cmd->rule_cnt = (__u32 )cnt; return (0); } } static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter , struct ethtool_rxnfc *cmd ) { { cmd->data = 0ULL; switch (cmd->flow_type) { case 1U: cmd->data = cmd->data | 192ULL; case 2U: ; if ((adapter->flags2 & 256U) != 0U) { cmd->data = cmd->data | 192ULL; } else { } case 3U: ; case 4U: ; case 9U: ; case 10U: ; case 16U: cmd->data = cmd->data | 48ULL; goto ldv_56512; case 5U: cmd->data = cmd->data | 192ULL; case 6U: ; if ((adapter->flags2 & 512U) != 0U) { cmd->data = cmd->data | 192ULL; } else { } case 7U: ; case 8U: ; case 11U: ; case 12U: ; case 17U: cmd->data = cmd->data | 48ULL; goto ldv_56512; default: ; return (-22); } ldv_56512: ; return (0); } } static int ixgbe_get_rxnfc(struct net_device *dev , struct ethtool_rxnfc *cmd , u32 *rule_locs ) { struct ixgbe_adapter *adapter ; void *tmp ; int ret ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; ret = -95; switch (cmd->cmd) { case 45U: cmd->data = (__u64 )adapter->num_rx_queues; ret = 0; goto ldv_56529; case 46U: cmd->rule_cnt = (__u32 )adapter->fdir_filter_count; ret = 0; goto ldv_56529; case 47U: ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); goto ldv_56529; case 48U: ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); goto ldv_56529; case 41U: ret = ixgbe_get_rss_hash_opts(adapter, cmd); goto ldv_56529; default: ; goto ldv_56529; } ldv_56529: ; return (ret); } } static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter , struct ixgbe_fdir_filter *input , u16 sw_idx ) { struct ixgbe_hw *hw ; struct hlist_node *node2 ; struct ixgbe_fdir_filter *rule ; struct ixgbe_fdir_filter *parent ; int err ; struct hlist_node *____ptr ; struct hlist_node const *__mptr ; struct ixgbe_fdir_filter *tmp ; struct hlist_node *____ptr___0 ; struct hlist_node const *__mptr___0 ; struct ixgbe_fdir_filter *tmp___0 ; { hw = & adapter->hw; err = -22; parent = (struct ixgbe_fdir_filter *)0; rule = (struct ixgbe_fdir_filter *)0; ____ptr = adapter->fdir_filter_list.first; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp = (struct ixgbe_fdir_filter *)__mptr; } else { tmp = (struct ixgbe_fdir_filter *)0; } rule = tmp; goto ldv_56556; ldv_56555: ; if ((int )rule->sw_idx >= (int )sw_idx) { goto ldv_56554; } else { } parent = rule; ____ptr___0 = node2; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___0 = (struct ixgbe_fdir_filter *)__mptr___0; } else { tmp___0 = (struct ixgbe_fdir_filter *)0; } rule = tmp___0; ldv_56556: ; if ((unsigned long )rule != (unsigned long )((struct ixgbe_fdir_filter *)0)) { node2 = rule->fdir_node.next; goto ldv_56555; } else { } ldv_56554: ; if ((unsigned long )rule != (unsigned long )((struct ixgbe_fdir_filter *)0) && (int )rule->sw_idx == (int )sw_idx) { if ((unsigned long )input == (unsigned long )((struct ixgbe_fdir_filter *)0) || (int )rule->filter.formatted.bkt_hash != (int )input->filter.formatted.bkt_hash) { err = ixgbe_fdir_erase_perfect_filter_82599(hw, & rule->filter, (int )sw_idx); } else { } hlist_del(& rule->fdir_node); kfree((void const *)rule); adapter->fdir_filter_count = adapter->fdir_filter_count - 1; } else { } if ((unsigned long )input == (unsigned long )((struct ixgbe_fdir_filter *)0)) { return (err); } else { } INIT_HLIST_NODE(& input->fdir_node); if ((unsigned long )parent != (unsigned long )((struct ixgbe_fdir_filter *)0)) { hlist_add_behind(& input->fdir_node, & parent->fdir_node); } else { hlist_add_head(& input->fdir_node, & adapter->fdir_filter_list); } adapter->fdir_filter_count = adapter->fdir_filter_count + 1; return (0); } } static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp , u8 *flow_type ) { { switch (fsp->flow_type & 2147483647U) { case 1U: *flow_type = 2U; goto ldv_56562; case 2U: *flow_type = 1U; goto ldv_56562; case 3U: *flow_type = 3U; goto ldv_56562; case 13U: ; switch ((int )fsp->h_u.usr_ip4_spec.proto) { case 6: *flow_type = 2U; goto ldv_56567; case 17: *flow_type = 1U; goto ldv_56567; case 132: *flow_type = 3U; goto ldv_56567; case 0: ; if ((unsigned int )fsp->m_u.usr_ip4_spec.proto == 0U) { *flow_type = 0U; goto ldv_56567; } else { } default: ; return (0); } ldv_56567: ; goto ldv_56562; default: ; return (0); } ldv_56562: ; return (1); } } static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter , struct ethtool_rxnfc *cmd ) { struct ethtool_rx_flow_spec *fsp ; struct ixgbe_hw *hw ; struct ixgbe_fdir_filter *input ; union ixgbe_atr_input mask ; u8 queue ; int err ; u32 ring ; __u64 tmp ; u8 vf ; __u64 tmp___0 ; void *tmp___1 ; int tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; int tmp___5 ; int tmp___6 ; { fsp = & cmd->fs; hw = & adapter->hw; if ((adapter->flags & 524288U) == 0U) { return (-95); } else { } if (fsp->ring_cookie == 0xffffffffffffffffULL) { queue = 127U; } else { tmp = ethtool_get_flow_spec_ring(fsp->ring_cookie); ring = (u32 )tmp; tmp___0 = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); vf = (u8 )tmp___0; if ((unsigned int )vf == 0U && (u32 )adapter->num_rx_queues <= ring) { return (-22); } else if ((unsigned int )vf != 0U && ((unsigned int )vf > adapter->num_vfs || (u32 )adapter->num_rx_queues_per_pool <= ring)) { return (-22); } else { } if ((unsigned int )vf == 0U) { queue = (adapter->rx_ring[ring])->reg_idx; } else { queue = (int )((u8 )((int )vf + -1)) * (int )((u8 )adapter->num_rx_queues_per_pool) + (int )((u8 )ring); } } if (fsp->location >= (__u32 )((1024 << (int )adapter->fdir_pballoc) + -2)) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "Location out of range\n"); } else { } return (-22); } else { } tmp___1 = kzalloc(64UL, 32U); input = (struct ixgbe_fdir_filter *)tmp___1; if ((unsigned long )input == (unsigned long )((struct ixgbe_fdir_filter *)0)) { return (-12); } else { } memset((void *)(& mask), 0, 44UL); input->sw_idx = (u16 )fsp->location; tmp___2 = ixgbe_flowspec_to_flow_type(fsp, & input->filter.formatted.flow_type); if (tmp___2 == 0) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "Unrecognized flow type\n"); } else { } goto err_out; } else { } mask.formatted.flow_type = 7U; if ((unsigned int )input->filter.formatted.flow_type == 0U) { mask.formatted.flow_type = (unsigned int )mask.formatted.flow_type & 4U; } else { } input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; if ((int )fsp->flow_type < 0) { tmp___3 = __fswab32(fsp->h_ext.data[1]); input->filter.formatted.vm_pool = (unsigned char )tmp___3; tmp___4 = __fswab32(fsp->m_ext.data[1]); mask.formatted.vm_pool = (unsigned char )tmp___4; input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; mask.formatted.vlan_id = fsp->m_ext.vlan_tci; input->filter.formatted.flex_bytes = fsp->h_ext.vlan_etype; mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; } else { } if (fsp->ring_cookie == 0xffffffffffffffffULL) { input->action = 127U; } else { input->action = (u16 )fsp->ring_cookie; } spin_lock(& adapter->fdir_perfect_lock); tmp___6 = hlist_empty((struct hlist_head const *)(& adapter->fdir_filter_list)); if (tmp___6 != 0) { memcpy((void *)(& adapter->fdir_mask), (void const *)(& mask), 44UL); err = ixgbe_fdir_set_input_mask_82599(hw, & mask); if (err != 0) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "Error writing mask\n"); } else { } goto err_out_w_lock; } else { } } else { tmp___5 = memcmp((void const *)(& adapter->fdir_mask), (void const *)(& mask), 44UL); if (tmp___5 != 0) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "Only one mask supported per port\n"); } else { } goto err_out_w_lock; } else { } } ixgbe_atr_compute_perfect_hash_82599(& input->filter, & mask); err = ixgbe_fdir_write_perfect_filter_82599(hw, & input->filter, (int )input->sw_idx, (int )queue); if (err != 0) { goto err_out_w_lock; } else { } ixgbe_update_ethtool_fdir_entry(adapter, input, (int )input->sw_idx); spin_unlock(& adapter->fdir_perfect_lock); return (err); err_out_w_lock: spin_unlock(& adapter->fdir_perfect_lock); err_out: kfree((void const *)input); return (-22); } } static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter , struct ethtool_rxnfc *cmd ) { struct ethtool_rx_flow_spec *fsp ; int err ; { fsp = & cmd->fs; spin_lock(& adapter->fdir_perfect_lock); err = ixgbe_update_ethtool_fdir_entry(adapter, (struct ixgbe_fdir_filter *)0, (int )((u16 )fsp->location)); spin_unlock(& adapter->fdir_perfect_lock); return (err); } } static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter , struct ethtool_rxnfc *nfc ) { u32 flags2 ; struct ixgbe_hw *hw ; u32 mrqc ; unsigned int pf_pool ; { flags2 = adapter->flags2; if ((nfc->data & 0xffffffffffffff0fULL) != 0ULL) { return (-22); } else { } switch (nfc->flow_type) { case 1U: ; case 5U: ; if ((((nfc->data & 16ULL) == 0ULL || (nfc->data & 32ULL) == 0ULL) || (nfc->data & 64ULL) == 0ULL) || (nfc->data & 128ULL) == 0ULL) { return (-22); } else { } goto ldv_56600; case 2U: ; if ((nfc->data & 16ULL) == 0ULL || (nfc->data & 32ULL) == 0ULL) { return (-22); } else { } switch (nfc->data & 192ULL) { case 0ULL: flags2 = flags2 & 4294967039U; goto ldv_56603; case 192ULL: flags2 = flags2 | 256U; goto ldv_56603; default: ; return (-22); } ldv_56603: ; goto ldv_56600; case 6U: ; if ((nfc->data & 16ULL) == 0ULL || (nfc->data & 32ULL) == 0ULL) { return (-22); } else { } switch (nfc->data & 192ULL) { case 0ULL: flags2 = flags2 & 4294966783U; goto ldv_56608; case 192ULL: flags2 = flags2 | 512U; goto ldv_56608; default: ; return (-22); } ldv_56608: ; goto ldv_56600; case 4U: ; case 9U: ; case 10U: ; case 3U: ; case 8U: ; case 11U: ; case 12U: ; case 7U: ; if ((((nfc->data & 16ULL) == 0ULL || (nfc->data & 32ULL) == 0ULL) || (nfc->data & 64ULL) != 0ULL) || (nfc->data & 128ULL) != 0ULL) { return (-22); } else { } goto ldv_56600; default: ; return (-22); } ldv_56600: ; if (adapter->flags2 != flags2) { hw = & adapter->hw; pf_pool = adapter->num_vfs; if ((unsigned int )hw->mac.type > 3U && (adapter->flags & 8388608U) != 0U) { mrqc = ixgbe_read_reg(hw, (pf_pool + 3328U) * 4U); } else { mrqc = ixgbe_read_reg(hw, 22552U); } if ((flags2 & 768U) != 0U && (adapter->flags2 & 768U) == 0U) { if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); } else { } } else { } adapter->flags2 = flags2; mrqc = mrqc | 3342336U; mrqc = mrqc & 4282384383U; if ((flags2 & 256U) != 0U) { mrqc = mrqc | 4194304U; } else { } if ((flags2 & 512U) != 0U) { mrqc = mrqc | 8388608U; } else { } if ((unsigned int )hw->mac.type > 3U && (adapter->flags & 8388608U) != 0U) { ixgbe_write_reg(hw, (pf_pool + 3328U) * 4U, mrqc); } else { ixgbe_write_reg(hw, 22552U, mrqc); } } else { } return (0); } } static int ixgbe_set_rxnfc(struct net_device *dev , struct ethtool_rxnfc *cmd ) { struct ixgbe_adapter *adapter ; void *tmp ; int ret ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; ret = -95; switch (cmd->cmd) { case 50U: ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); goto ldv_56630; case 49U: ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); goto ldv_56630; case 42U: ret = ixgbe_set_rss_hash_opt(adapter, cmd); goto ldv_56630; default: ; goto ldv_56630; } ldv_56630: ; return (ret); } } static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; return (40U); } } static u32 ixgbe_rss_indir_size(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; u32 tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; tmp___0 = ixgbe_rss_indir_tbl_entries(adapter); return (tmp___0); } } static void ixgbe_get_reta(struct ixgbe_adapter *adapter , u32 *indir ) { int i ; int reta_size ; u32 tmp ; { tmp = ixgbe_rss_indir_tbl_entries(adapter); reta_size = (int )tmp; i = 0; goto ldv_56649; ldv_56648: *(indir + (unsigned long )i) = (u32 )adapter->rss_indir_tbl[i]; i = i + 1; ldv_56649: ; if (i < reta_size) { goto ldv_56648; } else { } return; } } static int ixgbe_get_rxfh(struct net_device *netdev , u32 *indir , u8 *key , u8 *hfunc ) { struct ixgbe_adapter *adapter ; void *tmp ; u32 tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; if ((unsigned long )hfunc != (unsigned long )((u8 *)0U)) { *hfunc = 1U; } else { } if ((unsigned long )indir != (unsigned long )((u32 *)0U)) { ixgbe_get_reta(adapter, indir); } else { } if ((unsigned long )key != (unsigned long )((u8 *)0U)) { tmp___0 = ixgbe_get_rxfh_key_size(netdev); memcpy((void *)key, (void const *)(& adapter->rss_key), (size_t )tmp___0); } else { } return (0); } } static int ixgbe_get_ts_info(struct net_device *dev , struct ethtool_ts_info *info ) { struct ixgbe_adapter *adapter ; void *tmp ; int tmp___0 ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; switch ((unsigned int )adapter->hw.mac.type) { case 4U: ; case 5U: ; case 3U: ; case 2U: info->so_timestamping = 95U; if ((unsigned long )adapter->ptp_clock != (unsigned long )((struct ptp_clock *)0)) { info->phc_index = ptp_clock_index(adapter->ptp_clock); } else { info->phc_index = -1; } info->tx_types = 3U; info->rx_filters = 32753U; goto ldv_56667; default: tmp___0 = ethtool_op_get_ts_info(dev, info); return (tmp___0); } ldv_56667: ; return (0); } } static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter ) { unsigned int max_combined ; u8 tcs ; int tmp ; u8 tmp___0 ; { tmp = netdev_get_num_tc(adapter->netdev); tcs = (u8 )tmp; if ((adapter->flags & 8U) == 0U) { max_combined = 1U; } else if ((adapter->flags & 8388608U) != 0U) { max_combined = 1U; } else if ((unsigned int )tcs > 1U) { if ((unsigned int )adapter->hw.mac.type == 1U) { max_combined = 4U; } else if ((unsigned int )tcs > 4U) { max_combined = 8U; } else { max_combined = 16U; } } else if (adapter->atr_sample_rate != 0U) { max_combined = 63U; } else { tmp___0 = ixgbe_max_rss_indices(adapter); max_combined = (unsigned int )tmp___0; } return (max_combined); } } static void ixgbe_get_channels(struct net_device *dev , struct ethtool_channels *ch ) { struct ixgbe_adapter *adapter ; void *tmp ; int tmp___0 ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; ch->max_combined = ixgbe_max_channels(adapter); if ((adapter->flags & 8U) != 0U) { ch->max_other = 1U; ch->other_count = 1U; } else { } ch->combined_count = (__u32 )adapter->ring_feature[2].indices; if (ch->combined_count == 1U) { return; } else { } if ((adapter->flags & 8388608U) != 0U) { return; } else { } tmp___0 = netdev_get_num_tc(dev); if (tmp___0 > 1) { return; } else { } if (adapter->atr_sample_rate == 0U) { return; } else { } ch->combined_count = (__u32 )adapter->ring_feature[3].indices; return; } } static int ixgbe_set_channels(struct net_device *dev , struct ethtool_channels *ch ) { struct ixgbe_adapter *adapter ; void *tmp ; unsigned int count ; u8 max_rss_indices ; u8 tmp___0 ; unsigned int tmp___1 ; int tmp___2 ; int tmp___3 ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; count = ch->combined_count; tmp___0 = ixgbe_max_rss_indices(adapter); max_rss_indices = tmp___0; if ((count == 0U || ch->rx_count != 0U) || ch->tx_count != 0U) { return (-22); } else { } if (ch->other_count != 1U) { return (-22); } else { } tmp___1 = ixgbe_max_channels(adapter); if (tmp___1 < count) { return (-22); } else { } adapter->ring_feature[3].limit = (u16 )count; if ((unsigned int )max_rss_indices < count) { count = (unsigned int )max_rss_indices; } else { } adapter->ring_feature[2].limit = (u16 )count; if (count > 8U) { count = 8U; } else { } adapter->ring_feature[4].limit = (u16 )count; tmp___2 = netdev_get_num_tc(dev); tmp___3 = ixgbe_setup_tc(dev, (int )((u8 )tmp___2)); return (tmp___3); } } static int ixgbe_get_module_info(struct net_device *dev , struct ethtool_modinfo *modinfo ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; s32 status ; u8 sff8472_rev ; u8 addr_mode ; bool page_swap ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; page_swap = 0; status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 94, & sff8472_rev); if (status != 0) { return (-5); } else { } status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 92, & addr_mode); if (status != 0) { return (-5); } else { } if (((int )addr_mode & 4) != 0) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); } else { } page_swap = 1; } else { } if ((unsigned int )sff8472_rev == 0U || (int )page_swap) { modinfo->type = 1U; modinfo->eeprom_len = 256U; } else { modinfo->type = 2U; modinfo->eeprom_len = 512U; } return (0); } } static int ixgbe_get_module_eeprom(struct net_device *dev , struct ethtool_eeprom *ee , u8 *data ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; s32 status ; u8 databyte ; int i ; int tmp___0 ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; status = -17; databyte = 255U; i = 0; if (ee->len == 0U) { return (-22); } else { } i = (int )ee->offset; goto ldv_56707; ldv_56706: tmp___0 = constant_test_bit(7L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 != 0) { return (-16); } else { } if (i <= 255) { status = (*(hw->phy.ops.read_i2c_eeprom))(hw, (int )((u8 )i), & databyte); } else { status = (*(hw->phy.ops.read_i2c_sff8472))(hw, (int )((u8 )i), & databyte); } if (status != 0) { return (-5); } else { } *(data + (unsigned long )((__u32 )i - ee->offset)) = databyte; i = i + 1; ldv_56707: ; if ((__u32 )i < ee->offset + ee->len) { goto ldv_56706; } else { } return (0); } } static struct ethtool_ops const ixgbe_ethtool_ops = {& ixgbe_get_settings, & ixgbe_set_settings, & ixgbe_get_drvinfo, & ixgbe_get_regs_len, & ixgbe_get_regs, & ixgbe_get_wol, & ixgbe_set_wol, & ixgbe_get_msglevel, & ixgbe_set_msglevel, & ixgbe_nway_reset, & ethtool_op_get_link, & ixgbe_get_eeprom_len, & ixgbe_get_eeprom, & ixgbe_set_eeprom, & ixgbe_get_coalesce, & ixgbe_set_coalesce, & ixgbe_get_ringparam, & ixgbe_set_ringparam, & ixgbe_get_pauseparam, & ixgbe_set_pauseparam, & ixgbe_diag_test, & ixgbe_get_strings, & ixgbe_set_phys_id, & ixgbe_get_ethtool_stats, 0, 0, 0, 0, & ixgbe_get_sset_count, & ixgbe_get_rxnfc, & ixgbe_set_rxnfc, 0, 0, & ixgbe_get_rxfh_key_size, & ixgbe_rss_indir_size, & ixgbe_get_rxfh, 0, & ixgbe_get_channels, & ixgbe_set_channels, 0, 0, 0, & ixgbe_get_ts_info, & ixgbe_get_module_info, & ixgbe_get_module_eeprom, 0, 0, 0, 0}; void ixgbe_set_ethtool_ops(struct net_device *netdev ) { { netdev->ethtool_ops = & ixgbe_ethtool_ops; return; } } void disable_suitable_irq_7(int line , void *data ) { { if (ldv_irq_7_0 != 0 && line == ldv_irq_line_7_0) { ldv_irq_7_0 = 0; return; } else { } if (ldv_irq_7_1 != 0 && line == ldv_irq_line_7_1) { ldv_irq_7_1 = 0; return; } else { } if (ldv_irq_7_2 != 0 && line == ldv_irq_line_7_2) { ldv_irq_7_2 = 0; return; } else { } if (ldv_irq_7_3 != 0 && line == ldv_irq_line_7_3) { ldv_irq_7_3 = 0; return; } else { } return; } } int ldv_irq_6(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = ixgbe_test_intr(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_56724; default: ldv_stop(); } ldv_56724: ; } else { } return (state); } } void activate_suitable_irq_6(int line , void *data ) { { if (ldv_irq_6_0 == 0) { ldv_irq_line_6_0 = line; ldv_irq_data_6_0 = data; ldv_irq_6_0 = 1; return; } else { } if (ldv_irq_6_1 == 0) { ldv_irq_line_6_1 = line; ldv_irq_data_6_1 = data; ldv_irq_6_1 = 1; return; } else { } if (ldv_irq_6_2 == 0) { ldv_irq_line_6_2 = line; ldv_irq_data_6_2 = data; ldv_irq_6_2 = 1; return; } else { } if (ldv_irq_6_3 == 0) { ldv_irq_line_6_3 = line; ldv_irq_data_6_3 = data; ldv_irq_6_3 = 1; return; } else { } return; } } void choose_interrupt_5(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_5_0 = ldv_irq_5(ldv_irq_5_0, ldv_irq_line_5_0, ldv_irq_data_5_0); goto ldv_56734; case 1: ldv_irq_5_0 = ldv_irq_5(ldv_irq_5_1, ldv_irq_line_5_1, ldv_irq_data_5_1); goto ldv_56734; case 2: ldv_irq_5_0 = ldv_irq_5(ldv_irq_5_2, ldv_irq_line_5_2, ldv_irq_data_5_2); goto ldv_56734; case 3: ldv_irq_5_0 = ldv_irq_5(ldv_irq_5_3, ldv_irq_line_5_3, ldv_irq_data_5_3); goto ldv_56734; default: ldv_stop(); } ldv_56734: ; return; } } void disable_suitable_irq_5(int line , void *data ) { { if (ldv_irq_5_0 != 0 && line == ldv_irq_line_5_0) { ldv_irq_5_0 = 0; return; } else { } if (ldv_irq_5_1 != 0 && line == ldv_irq_line_5_1) { ldv_irq_5_1 = 0; return; } else { } if (ldv_irq_5_2 != 0 && line == ldv_irq_line_5_2) { ldv_irq_5_2 = 0; return; } else { } if (ldv_irq_5_3 != 0 && line == ldv_irq_line_5_3) { ldv_irq_5_3 = 0; return; } else { } return; } } int ldv_irq_5(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = ixgbe_test_intr(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_56750; default: ldv_stop(); } ldv_56750: ; } else { } return (state); } } void ldv_initialize_ethtool_ops_35(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; void *tmp___3 ; void *tmp___4 ; void *tmp___5 ; void *tmp___6 ; void *tmp___7 ; { tmp = ldv_init_zalloc(36UL); ixgbe_ethtool_ops_group0 = (struct ethtool_ringparam *)tmp; tmp___0 = ldv_init_zalloc(16UL); ixgbe_ethtool_ops_group2 = (struct ethtool_eeprom *)tmp___0; tmp___1 = ldv_init_zalloc(44UL); ixgbe_ethtool_ops_group1 = (struct ethtool_cmd *)tmp___1; tmp___2 = ldv_init_zalloc(16UL); ixgbe_ethtool_ops_group3 = (struct ethtool_pauseparam *)tmp___2; tmp___3 = ldv_init_zalloc(36UL); ixgbe_ethtool_ops_group4 = (struct ethtool_channels *)tmp___3; tmp___4 = ldv_init_zalloc(92UL); ixgbe_ethtool_ops_group5 = (struct ethtool_coalesce *)tmp___4; tmp___5 = ldv_init_zalloc(3008UL); ixgbe_ethtool_ops_group6 = (struct net_device *)tmp___5; tmp___6 = ldv_init_zalloc(192UL); ixgbe_ethtool_ops_group7 = (struct ethtool_rxnfc *)tmp___6; tmp___7 = ldv_init_zalloc(20UL); ixgbe_ethtool_ops_group8 = (struct ethtool_wolinfo *)tmp___7; return; } } void activate_suitable_irq_7(int line , void *data ) { { if (ldv_irq_7_0 == 0) { ldv_irq_line_7_0 = line; ldv_irq_data_7_0 = data; ldv_irq_7_0 = 1; return; } else { } if (ldv_irq_7_1 == 0) { ldv_irq_line_7_1 = line; ldv_irq_data_7_1 = data; ldv_irq_7_1 = 1; return; } else { } if (ldv_irq_7_2 == 0) { ldv_irq_line_7_2 = line; ldv_irq_data_7_2 = data; ldv_irq_7_2 = 1; return; } else { } if (ldv_irq_7_3 == 0) { ldv_irq_line_7_3 = line; ldv_irq_data_7_3 = data; ldv_irq_7_3 = 1; return; } else { } return; } } int reg_check_6(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& ixgbe_test_intr)) { return (1); } else { } return (0); } } int reg_check_7(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& ixgbe_test_intr)) { return (1); } else { } return (0); } } void choose_interrupt_6(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_6_0 = ldv_irq_6(ldv_irq_6_0, ldv_irq_line_6_0, ldv_irq_data_6_0); goto ldv_56773; case 1: ldv_irq_6_0 = ldv_irq_6(ldv_irq_6_1, ldv_irq_line_6_1, ldv_irq_data_6_1); goto ldv_56773; case 2: ldv_irq_6_0 = ldv_irq_6(ldv_irq_6_2, ldv_irq_line_6_2, ldv_irq_data_6_2); goto ldv_56773; case 3: ldv_irq_6_0 = ldv_irq_6(ldv_irq_6_3, ldv_irq_line_6_3, ldv_irq_data_6_3); goto ldv_56773; default: ldv_stop(); } ldv_56773: ; return; } } void disable_suitable_irq_6(int line , void *data ) { { if (ldv_irq_6_0 != 0 && line == ldv_irq_line_6_0) { ldv_irq_6_0 = 0; return; } else { } if (ldv_irq_6_1 != 0 && line == ldv_irq_line_6_1) { ldv_irq_6_1 = 0; return; } else { } if (ldv_irq_6_2 != 0 && line == ldv_irq_line_6_2) { ldv_irq_6_2 = 0; return; } else { } if (ldv_irq_6_3 != 0 && line == ldv_irq_line_6_3) { ldv_irq_6_3 = 0; return; } else { } return; } } void activate_suitable_irq_5(int line , void *data ) { { if (ldv_irq_5_0 == 0) { ldv_irq_line_5_0 = line; ldv_irq_data_5_0 = data; ldv_irq_5_0 = 1; return; } else { } if (ldv_irq_5_1 == 0) { ldv_irq_line_5_1 = line; ldv_irq_data_5_1 = data; ldv_irq_5_1 = 1; return; } else { } if (ldv_irq_5_2 == 0) { ldv_irq_line_5_2 = line; ldv_irq_data_5_2 = data; ldv_irq_5_2 = 1; return; } else { } if (ldv_irq_5_3 == 0) { ldv_irq_line_5_3 = line; ldv_irq_data_5_3 = data; ldv_irq_5_3 = 1; return; } else { } return; } } int ldv_irq_7(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = ixgbe_test_intr(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_56793; default: ldv_stop(); } ldv_56793: ; } else { } return (state); } } void choose_interrupt_7(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_7_0 = ldv_irq_7(ldv_irq_7_0, ldv_irq_line_7_0, ldv_irq_data_7_0); goto ldv_56799; case 1: ldv_irq_7_0 = ldv_irq_7(ldv_irq_7_1, ldv_irq_line_7_1, ldv_irq_data_7_1); goto ldv_56799; case 2: ldv_irq_7_0 = ldv_irq_7(ldv_irq_7_2, ldv_irq_line_7_2, ldv_irq_data_7_2); goto ldv_56799; case 3: ldv_irq_7_0 = ldv_irq_7(ldv_irq_7_3, ldv_irq_line_7_3, ldv_irq_data_7_3); goto ldv_56799; default: ldv_stop(); } ldv_56799: ; return; } } int reg_check_5(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& ixgbe_test_intr)) { return (1); } else { } return (0); } } void ldv_main_exported_35(void) { u64 *ldvarg335 ; void *tmp ; u32 *ldvarg342 ; void *tmp___0 ; struct ethtool_regs *ldvarg328 ; void *tmp___1 ; struct ethtool_stats *ldvarg330 ; void *tmp___2 ; u8 *ldvarg325 ; void *tmp___3 ; struct ethtool_ts_info *ldvarg338 ; void *tmp___4 ; struct ethtool_modinfo *ldvarg332 ; void *tmp___5 ; void *ldvarg327 ; void *tmp___6 ; u8 *ldvarg340 ; void *tmp___7 ; u8 *ldvarg333 ; void *tmp___8 ; u64 *ldvarg329 ; void *tmp___9 ; struct ethtool_test *ldvarg336 ; void *tmp___10 ; u32 ldvarg334 ; struct ethtool_drvinfo *ldvarg331 ; void *tmp___11 ; u8 *ldvarg341 ; void *tmp___12 ; u8 *ldvarg326 ; void *tmp___13 ; u32 *ldvarg345 ; void *tmp___14 ; u8 *ldvarg339 ; void *tmp___15 ; enum ethtool_phys_id_state ldvarg344 ; u32 ldvarg337 ; int ldvarg343 ; int tmp___16 ; { tmp = ldv_init_zalloc(8UL); ldvarg335 = (u64 *)tmp; tmp___0 = ldv_init_zalloc(4UL); ldvarg342 = (u32 *)tmp___0; tmp___1 = ldv_init_zalloc(12UL); ldvarg328 = (struct ethtool_regs *)tmp___1; tmp___2 = ldv_init_zalloc(8UL); ldvarg330 = (struct ethtool_stats *)tmp___2; tmp___3 = ldv_init_zalloc(1UL); ldvarg325 = (u8 *)tmp___3; tmp___4 = ldv_init_zalloc(44UL); ldvarg338 = (struct ethtool_ts_info *)tmp___4; tmp___5 = ldv_init_zalloc(44UL); ldvarg332 = (struct ethtool_modinfo *)tmp___5; tmp___6 = ldv_init_zalloc(1UL); ldvarg327 = tmp___6; tmp___7 = ldv_init_zalloc(1UL); ldvarg340 = (u8 *)tmp___7; tmp___8 = ldv_init_zalloc(1UL); ldvarg333 = (u8 *)tmp___8; tmp___9 = ldv_init_zalloc(8UL); ldvarg329 = (u64 *)tmp___9; tmp___10 = ldv_init_zalloc(16UL); ldvarg336 = (struct ethtool_test *)tmp___10; tmp___11 = ldv_init_zalloc(196UL); ldvarg331 = (struct ethtool_drvinfo *)tmp___11; tmp___12 = ldv_init_zalloc(1UL); ldvarg341 = (u8 *)tmp___12; tmp___13 = ldv_init_zalloc(1UL); ldvarg326 = (u8 *)tmp___13; tmp___14 = ldv_init_zalloc(4UL); ldvarg345 = (u32 *)tmp___14; tmp___15 = ldv_init_zalloc(1UL); ldvarg339 = (u8 *)tmp___15; ldv_memset((void *)(& ldvarg334), 0, 4UL); ldv_memset((void *)(& ldvarg344), 0, 4UL); ldv_memset((void *)(& ldvarg337), 0, 4UL); ldv_memset((void *)(& ldvarg343), 0, 4UL); tmp___16 = __VERIFIER_nondet_int(); switch (tmp___16) { case 0: ; if (ldv_state_variable_35 == 1) { ixgbe_set_rxnfc(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group7); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 1: ; if (ldv_state_variable_35 == 1) { ixgbe_get_rxnfc(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group7, ldvarg345); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 2: ; if (ldv_state_variable_35 == 1) { ixgbe_get_ringparam(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group0); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 3: ; if (ldv_state_variable_35 == 1) { ixgbe_get_pauseparam(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group3); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 4: ; if (ldv_state_variable_35 == 1) { ixgbe_set_phys_id(ixgbe_ethtool_ops_group6, ldvarg344); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 5: ; if (ldv_state_variable_35 == 1) { ixgbe_get_settings(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group1); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 6: ; if (ldv_state_variable_35 == 1) { ixgbe_get_sset_count(ixgbe_ethtool_ops_group6, ldvarg343); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 7: ; if (ldv_state_variable_35 == 1) { ixgbe_get_rxfh(ixgbe_ethtool_ops_group6, ldvarg342, ldvarg341, ldvarg340); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 8: ; if (ldv_state_variable_35 == 1) { ixgbe_set_channels(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group4); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 9: ; if (ldv_state_variable_35 == 1) { ixgbe_set_coalesce(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group5); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 10: ; if (ldv_state_variable_35 == 1) { ixgbe_get_module_eeprom(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group2, ldvarg339); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 11: ; if (ldv_state_variable_35 == 1) { ixgbe_get_ts_info(ixgbe_ethtool_ops_group6, ldvarg338); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 12: ; if (ldv_state_variable_35 == 1) { ixgbe_set_msglevel(ixgbe_ethtool_ops_group6, ldvarg337); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 13: ; if (ldv_state_variable_35 == 1) { ixgbe_get_eeprom_len(ixgbe_ethtool_ops_group6); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 14: ; if (ldv_state_variable_35 == 1) { ixgbe_diag_test(ixgbe_ethtool_ops_group6, ldvarg336, ldvarg335); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 15: ; if (ldv_state_variable_35 == 1) { ixgbe_nway_reset(ixgbe_ethtool_ops_group6); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 16: ; if (ldv_state_variable_35 == 1) { ixgbe_get_strings(ixgbe_ethtool_ops_group6, ldvarg334, ldvarg333); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 17: ; if (ldv_state_variable_35 == 1) { ixgbe_get_module_info(ixgbe_ethtool_ops_group6, ldvarg332); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 18: ; if (ldv_state_variable_35 == 1) { ixgbe_get_rxfh_key_size(ixgbe_ethtool_ops_group6); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 19: ; if (ldv_state_variable_35 == 1) { ethtool_op_get_link(ixgbe_ethtool_ops_group6); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 20: ; if (ldv_state_variable_35 == 1) { ixgbe_get_channels(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group4); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 21: ; if (ldv_state_variable_35 == 1) { ixgbe_get_drvinfo(ixgbe_ethtool_ops_group6, ldvarg331); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 22: ; if (ldv_state_variable_35 == 1) { ixgbe_set_pauseparam(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group3); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 23: ; if (ldv_state_variable_35 == 1) { ixgbe_get_ethtool_stats(ixgbe_ethtool_ops_group6, ldvarg330, ldvarg329); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 24: ; if (ldv_state_variable_35 == 1) { ixgbe_get_coalesce(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group5); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 25: ; if (ldv_state_variable_35 == 1) { ixgbe_get_regs(ixgbe_ethtool_ops_group6, ldvarg328, ldvarg327); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 26: ; if (ldv_state_variable_35 == 1) { ixgbe_rss_indir_size(ixgbe_ethtool_ops_group6); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 27: ; if (ldv_state_variable_35 == 1) { ixgbe_set_wol(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group8); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 28: ; if (ldv_state_variable_35 == 1) { ixgbe_set_settings(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group1); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 29: ; if (ldv_state_variable_35 == 1) { ixgbe_get_eeprom(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group2, ldvarg326); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 30: ; if (ldv_state_variable_35 == 1) { ixgbe_get_wol(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group8); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 31: ; if (ldv_state_variable_35 == 1) { ixgbe_set_eeprom(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group2, ldvarg325); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 32: ; if (ldv_state_variable_35 == 1) { ixgbe_get_msglevel(ixgbe_ethtool_ops_group6); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 33: ; if (ldv_state_variable_35 == 1) { ixgbe_get_regs_len(ixgbe_ethtool_ops_group6); ldv_state_variable_35 = 1; } else { } goto ldv_56834; case 34: ; if (ldv_state_variable_35 == 1) { ixgbe_set_ringparam(ixgbe_ethtool_ops_group6, ixgbe_ethtool_ops_group0); ldv_state_variable_35 = 1; } else { } goto ldv_56834; default: ldv_stop(); } ldv_56834: ; return; } } bool ldv_queue_work_on_151(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_152(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_153(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_154(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_155(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_161(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static struct sk_buff *alloc_skb(unsigned int size , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_167(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_169(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_171(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_172(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_173(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_174(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_175(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_176(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_177(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_178(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vmalloc_179(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void ldv_free_irq_183(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_7((int )ldv_func_arg1, ldv_func_arg2); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_207(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_209(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_208(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_211(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_210(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void writeq(unsigned long val , void volatile *addr ) { { __asm__ volatile ("movq %0,%1": : "r" (val), "m" (*((unsigned long volatile *)addr)): "memory"); return; } } void *ldv_kmem_cache_alloc_217(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_234(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_225(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_233(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_227(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_223(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_231(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_232(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_228(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_229(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_230(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct ixgbe_mbx_operations mbx_ops_generic ; __inline static void ixgbe_write_reg64(struct ixgbe_hw *hw , u32 reg , u64 value ) { u8 *reg_addr ; u8 *__var ; bool tmp ; { __var = (u8 *)0U; reg_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ixgbe_removed((void *)reg_addr); if ((int )tmp) { return; } else { } writeq((unsigned long )value, (void volatile *)reg_addr + (unsigned long )reg); return; } } s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw , u32 reg_addr , u32 device_type , u16 *phy_data ) ; s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw , u32 reg_addr , u32 device_type , u16 phy_data ) ; s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) ; s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw , ixgbe_link_speed *speed , bool *autoneg ) ; bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw ) ; s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw , ixgbe_link_speed *speed , bool *link_up ) ; s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw ) ; s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw , u16 *firmware_version ) ; s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw , u16 *list_offset , u16 *data_offset ) ; s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw ) ; s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw , u8 byte_offset , u8 dev_addr , u8 *data ) ; s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw , u8 byte_offset , u8 dev_addr , u8 data ) ; s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw , u8 byte_offset , u8 *eeprom_data ) ; s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw , u8 byte_offset , u8 *sff8472_data ) ; s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw , u8 byte_offset , u8 eeprom_data ) ; static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw ) ; static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw ) ; static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw ) ; static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) ; static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) ; static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw ) ; static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw , bool autoneg_wait_to_complete ) ; static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) ; static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) ; static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw ) ; static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw , u8 byte_offset , u8 dev_addr , u8 *data ) ; static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw , u8 byte_offset , u8 dev_addr , u8 data ) ; static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw ) ; static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw ) ; bool ixgbe_mng_enabled(struct ixgbe_hw *hw ) { u32 fwsm ; u32 manc ; u32 factps ; { fwsm = ixgbe_read_reg(hw, *(hw->mvals + 7UL)); if ((fwsm & 14U) != 4U) { return (0); } else { } manc = ixgbe_read_reg(hw, 22560U); if ((manc & 131072U) == 0U) { return (0); } else { } factps = ixgbe_read_reg(hw, *(hw->mvals + 4UL)); if ((factps & 536870912U) != 0U) { return (0); } else { } return (1); } } static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw ) { struct ixgbe_mac_info *mac ; enum ixgbe_media_type tmp ; bool tmp___0 ; int tmp___1 ; enum ixgbe_media_type tmp___2 ; bool tmp___3 ; int tmp___4 ; { mac = & hw->mac; tmp = (*(mac->ops.get_media_type))(hw); if ((unsigned int )tmp == 1U) { tmp___0 = ixgbe_mng_enabled(hw); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { mac->ops.disable_tx_laser = & ixgbe_disable_tx_laser_multispeed_fiber; mac->ops.enable_tx_laser = & ixgbe_enable_tx_laser_multispeed_fiber; mac->ops.flap_tx_laser = & ixgbe_flap_tx_laser_multispeed_fiber; } else { mac->ops.disable_tx_laser = (void (*)(struct ixgbe_hw * ))0; mac->ops.enable_tx_laser = (void (*)(struct ixgbe_hw * ))0; mac->ops.flap_tx_laser = (void (*)(struct ixgbe_hw * ))0; } } else { mac->ops.disable_tx_laser = (void (*)(struct ixgbe_hw * ))0; mac->ops.enable_tx_laser = (void (*)(struct ixgbe_hw * ))0; mac->ops.flap_tx_laser = (void (*)(struct ixgbe_hw * ))0; } if ((int )hw->phy.multispeed_fiber) { mac->ops.setup_link = & ixgbe_setup_mac_link_multispeed_fiber; } else { tmp___2 = (*(mac->ops.get_media_type))(hw); if ((unsigned int )tmp___2 == 5U && ((unsigned int )hw->phy.smart_speed == 0U || (unsigned int )hw->phy.smart_speed == 1U)) { tmp___3 = ixgbe_verify_lesm_fw_enabled_82599(hw); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { mac->ops.setup_link = & ixgbe_setup_mac_link_smartspeed; } else { mac->ops.setup_link = & ixgbe_setup_mac_link_82599; } } else { mac->ops.setup_link = & ixgbe_setup_mac_link_82599; } } return; } } static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw ) { s32 ret_val ; u16 list_offset ; u16 data_offset ; u16 data_value ; s32 tmp ; s32 tmp___0 ; struct _ddebug descriptor ; long tmp___1 ; { if ((unsigned int )hw->phy.sfp_type != 65535U) { ixgbe_init_mac_link_ops_82599(hw); hw->phy.ops.reset = (s32 (*)(struct ixgbe_hw * ))0; ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, & list_offset, & data_offset); if (ret_val != 0) { return (ret_val); } else { } ret_val = (*(hw->mac.ops.acquire_swfw_sync))(hw, 8U); if (ret_val != 0) { return (-16); } else { } data_offset = (u16 )((int )data_offset + 1); tmp = (*(hw->eeprom.ops.read))(hw, (int )data_offset, & data_value); if (tmp != 0) { goto setup_sfp_err; } else { } goto ldv_55493; ldv_55492: ixgbe_write_reg(hw, 85760U, (u32 )data_value); ixgbe_read_reg(hw, 8U); data_offset = (u16 )((int )data_offset + 1); tmp___0 = (*(hw->eeprom.ops.read))(hw, (int )data_offset, & data_value); if (tmp___0 != 0) { goto setup_sfp_err; } else { } ldv_55493: ; if ((unsigned int )data_value != 65535U) { goto ldv_55492; } else { } (*(hw->mac.ops.release_swfw_sync))(hw, 8U); usleep_range((unsigned long )(hw->eeprom.semaphore_delay * 1000U), (unsigned long )(hw->eeprom.semaphore_delay * 2000U)); ret_val = (*(hw->mac.ops.prot_autoc_write))(hw, hw->mac.orig_autoc | 24576U, 0); if (ret_val != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_setup_sfp_modules_82599"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor.format = " sfp module setup not complete\n"; descriptor.lineno = 169U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " sfp module setup not complete\n"); } else { } return (-30); } else { } } else { } return (0); setup_sfp_err: (*(hw->mac.ops.release_swfw_sync))(hw, 8U); usleep_range((unsigned long )(hw->eeprom.semaphore_delay * 1000U), (unsigned long )(hw->eeprom.semaphore_delay * 2000U)); netdev_err((struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "eeprom read at offset %d failed\n", (int )data_offset); return (-30); } } static s32 prot_autoc_read_82599(struct ixgbe_hw *hw , bool *locked , u32 *reg_val ) { s32 ret_val ; bool tmp ; { *locked = 0; tmp = ixgbe_verify_lesm_fw_enabled_82599(hw); if ((int )tmp) { ret_val = (*(hw->mac.ops.acquire_swfw_sync))(hw, 8U); if (ret_val != 0) { return (-16); } else { } *locked = 1; } else { } *reg_val = ixgbe_read_reg(hw, 17056U); return (0); } } static s32 prot_autoc_write_82599(struct ixgbe_hw *hw , u32 autoc , bool locked ) { s32 ret_val ; bool tmp ; bool tmp___0 ; { ret_val = 0; tmp = ixgbe_check_reset_blocked(hw); if ((int )tmp) { goto out; } else { } if (! locked) { tmp___0 = ixgbe_verify_lesm_fw_enabled_82599(hw); if ((int )tmp___0) { ret_val = (*(hw->mac.ops.acquire_swfw_sync))(hw, 8U); if (ret_val != 0) { return (-16); } else { } locked = 1; } else { } } else { } ixgbe_write_reg(hw, 17056U, autoc); ret_val = ixgbe_reset_pipeline_82599(hw); out: ; if ((int )locked) { (*(hw->mac.ops.release_swfw_sync))(hw, 8U); } else { } return (ret_val); } } static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw ) { struct ixgbe_mac_info *mac ; { mac = & hw->mac; ixgbe_init_mac_link_ops_82599(hw); mac->mcft_size = 128U; mac->vft_size = 128U; mac->num_rar_entries = 128U; mac->rx_pb_size = 512U; mac->max_rx_queues = 128U; mac->max_tx_queues = 128U; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); return (0); } } static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw ) { struct ixgbe_mac_info *mac ; struct ixgbe_phy_info *phy ; s32 ret_val ; u32 esdp ; enum ixgbe_media_type tmp ; { mac = & hw->mac; phy = & hw->phy; if ((unsigned int )hw->device_id == 5464U) { hw->phy.qsfp_shared_i2c_bus = 1; esdp = ixgbe_read_reg(hw, 32U); esdp = esdp | 256U; esdp = esdp & 4294966783U; esdp = esdp & 4294967294U; esdp = esdp & 4294901759U; esdp = esdp & 4294836223U; ixgbe_write_reg(hw, 32U, esdp); ixgbe_read_reg(hw, 8U); phy->ops.read_i2c_byte = & ixgbe_read_i2c_byte_82599; phy->ops.write_i2c_byte = & ixgbe_write_i2c_byte_82599; } else { } ret_val = (*(phy->ops.identify))(hw); ixgbe_init_mac_link_ops_82599(hw); tmp = (*(mac->ops.get_media_type))(hw); if ((unsigned int )tmp == 4U) { mac->ops.setup_link = & ixgbe_setup_copper_link_82599; mac->ops.get_link_capabilities = & ixgbe_get_copper_link_capabilities_generic; } else { } switch ((unsigned int )hw->phy.type) { case 2U: phy->ops.check_link = & ixgbe_check_phy_link_tnx; phy->ops.setup_link = & ixgbe_setup_phy_link_tnx; phy->ops.get_firmware_version = & ixgbe_get_phy_firmware_version_tnx; goto ldv_55522; default: ; goto ldv_55522; } ldv_55522: ; return (ret_val); } } static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw , ixgbe_link_speed *speed , bool *autoneg ) { u32 autoc ; { autoc = 0U; if ((((((unsigned int )hw->phy.sfp_type == 9U || (unsigned int )hw->phy.sfp_type == 10U) || (unsigned int )hw->phy.sfp_type == 13U) || (unsigned int )hw->phy.sfp_type == 14U) || (unsigned int )hw->phy.sfp_type == 11U) || (unsigned int )hw->phy.sfp_type == 12U) { *speed = 32U; *autoneg = 1; return (0); } else { } if ((int )hw->mac.orig_link_settings_stored) { autoc = hw->mac.orig_autoc; } else { autoc = ixgbe_read_reg(hw, 17056U); } switch (autoc & 57344U) { case 0U: *speed = 32U; *autoneg = 0; goto ldv_55531; case 8192U: *speed = 128U; *autoneg = 0; goto ldv_55531; case 16384U: *speed = 32U; *autoneg = 1; goto ldv_55531; case 24576U: *speed = 128U; *autoneg = 0; goto ldv_55531; case 32768U: ; case 49152U: *speed = 0U; if ((autoc & 65536U) != 0U) { *speed = *speed | 128U; } else { } if ((int )autoc < 0) { *speed = *speed | 128U; } else { } if ((autoc & 1073741824U) != 0U) { *speed = *speed | 32U; } else { } *autoneg = 1; goto ldv_55531; case 57344U: *speed = 8U; if ((autoc & 65536U) != 0U) { *speed = *speed | 128U; } else { } if ((int )autoc < 0) { *speed = *speed | 128U; } else { } if ((autoc & 1073741824U) != 0U) { *speed = *speed | 32U; } else { } *autoneg = 1; goto ldv_55531; case 40960U: *speed = 40U; *autoneg = 0; goto ldv_55531; default: ; return (-8); } ldv_55531: ; if ((int )hw->phy.multispeed_fiber) { *speed = *speed | 160U; if ((unsigned int )hw->phy.media_type == 2U) { *autoneg = 0; } else { *autoneg = 1; } } else { } return (0); } } static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw ) { { switch ((unsigned int )hw->phy.type) { case 7U: ; case 2U: ; return (4); default: ; goto ldv_55546; } ldv_55546: ; switch ((int )hw->device_id) { case 4343: ; case 5396: ; case 4344: ; case 5399: ; case 5418: ; case 4348: ; return (5); case 4347: ; case 5417: ; case 5383: ; case 5453: ; case 5450: ; case 5463: ; return (1); case 4345: ; return (6); case 5404: ; return (4); case 5455: ; return (3); case 5464: ; return (2); default: ; return (0); } } } static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw ) { u32 autoc2_reg ; u32 fwsm ; u16 ee_ctrl_2 ; { ee_ctrl_2 = 0U; (*(hw->eeprom.ops.read))(hw, 1, & ee_ctrl_2); fwsm = ixgbe_read_reg(hw, *(hw->mvals + 7UL)); if (((fwsm & 14U) != 4U && ! hw->wol_enabled) && ((int )ee_ctrl_2 & 2) != 0) { autoc2_reg = ixgbe_read_reg(hw, 17064U); autoc2_reg = autoc2_reg | 1342177280U; ixgbe_write_reg(hw, 17064U, autoc2_reg); } else { } return; } } static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw , bool autoneg_wait_to_complete ) { u32 autoc_reg ; u32 links_reg ; u32 i ; s32 status ; bool got_lock ; bool tmp ; struct _ddebug descriptor ; long tmp___0 ; { status = 0; got_lock = 0; tmp = ixgbe_verify_lesm_fw_enabled_82599(hw); if ((int )tmp) { status = (*(hw->mac.ops.acquire_swfw_sync))(hw, 8U); if (status != 0) { return (status); } else { } got_lock = 1; } else { } ixgbe_reset_pipeline_82599(hw); if ((int )got_lock) { (*(hw->mac.ops.release_swfw_sync))(hw, 8U); } else { } if ((int )autoneg_wait_to_complete) { autoc_reg = ixgbe_read_reg(hw, 17056U); if (((autoc_reg & 57344U) == 32768U || (autoc_reg & 57344U) == 49152U) || (autoc_reg & 57344U) == 57344U) { links_reg = 0U; i = 0U; goto ldv_55581; ldv_55580: links_reg = ixgbe_read_reg(hw, 17060U); if ((int )links_reg < 0) { goto ldv_55579; } else { } msleep(100U); i = i + 1U; ldv_55581: ; if (i <= 44U) { goto ldv_55580; } else { } ldv_55579: ; if ((int )links_reg >= 0) { status = -14; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_start_mac_link_82599"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor.format = "Autoneg did not complete.\n"; descriptor.lineno = 574U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Autoneg did not complete.\n"); } else { } } else { } } else { } } else { } msleep(50U); return (status); } } static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw ) { u32 esdp_reg ; u32 tmp ; bool tmp___0 ; { tmp = ixgbe_read_reg(hw, 32U); esdp_reg = tmp; tmp___0 = ixgbe_check_reset_blocked(hw); if ((int )tmp___0) { return; } else { } esdp_reg = esdp_reg | 8U; ixgbe_write_reg(hw, 32U, esdp_reg); ixgbe_read_reg(hw, 8U); __const_udelay(429500UL); return; } } static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw ) { u32 esdp_reg ; u32 tmp ; { tmp = ixgbe_read_reg(hw, 32U); esdp_reg = tmp; esdp_reg = esdp_reg & 4294967287U; ixgbe_write_reg(hw, 32U, esdp_reg); ixgbe_read_reg(hw, 8U); msleep(100U); return; } } static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw ) { bool tmp ; { tmp = ixgbe_check_reset_blocked(hw); if ((int )tmp) { return; } else { } if ((int )hw->mac.autotry_restart) { ixgbe_disable_tx_laser_multispeed_fiber(hw); ixgbe_enable_tx_laser_multispeed_fiber(hw); hw->mac.autotry_restart = 0; } else { } return; } } static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) { s32 status ; ixgbe_link_speed link_speed ; ixgbe_link_speed highest_link_speed ; u32 speedcnt ; u32 esdp_reg ; u32 tmp ; u32 i ; bool link_up ; bool autoneg ; struct _ddebug descriptor ; long tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; { status = 0; link_speed = 0U; highest_link_speed = 0U; speedcnt = 0U; tmp = ixgbe_read_reg(hw, 32U); esdp_reg = tmp; i = 0U; link_up = 0; autoneg = 0; status = (*(hw->mac.ops.get_link_capabilities))(hw, & link_speed, & autoneg); if (status != 0) { return (status); } else { } speed = speed & link_speed; if ((speed & 128U) != 0U) { speedcnt = speedcnt + 1U; highest_link_speed = 128U; status = (*(hw->mac.ops.check_link))(hw, & link_speed, & link_up, 0); if (status != 0) { return (status); } else { } if (link_speed == 128U && (int )link_up) { goto out; } else { } switch ((unsigned int )hw->phy.media_type) { case 1U: esdp_reg = esdp_reg | 8224U; ixgbe_write_reg(hw, 32U, esdp_reg); ixgbe_read_reg(hw, 8U); goto ldv_55610; case 2U: ; goto ldv_55610; default: descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_setup_mac_link_multispeed_fiber"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor.format = "Unexpected media type.\n"; descriptor.lineno = 709U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Unexpected media type.\n"); } else { } goto ldv_55610; } ldv_55610: msleep(40U); status = ixgbe_setup_mac_link_82599(hw, 128U, (int )autoneg_wait_to_complete); if (status != 0) { return (status); } else { } if ((unsigned long )hw->mac.ops.flap_tx_laser != (unsigned long )((void (*)(struct ixgbe_hw * ))0)) { (*(hw->mac.ops.flap_tx_laser))(hw); } else { } i = 0U; goto ldv_55616; ldv_55615: msleep(100U); status = (*(hw->mac.ops.check_link))(hw, & link_speed, & link_up, 0); if (status != 0) { return (status); } else { } if ((int )link_up) { goto out; } else { } i = i + 1U; ldv_55616: ; if (i <= 4U) { goto ldv_55615; } else { } } else { } if ((speed & 32U) != 0U) { speedcnt = speedcnt + 1U; if (highest_link_speed == 0U) { highest_link_speed = 32U; } else { } status = (*(hw->mac.ops.check_link))(hw, & link_speed, & link_up, 0); if (status != 0) { return (status); } else { } if (link_speed == 32U && (int )link_up) { goto out; } else { } switch ((unsigned int )hw->phy.media_type) { case 1U: esdp_reg = esdp_reg & 4294967263U; esdp_reg = esdp_reg | 8192U; ixgbe_write_reg(hw, 32U, esdp_reg); ixgbe_read_reg(hw, 8U); goto ldv_55619; case 2U: ; goto ldv_55619; default: descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_setup_mac_link_multispeed_fiber"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor___0.format = "Unexpected media type.\n"; descriptor___0.lineno = 772U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Unexpected media type.\n"); } else { } goto ldv_55619; } ldv_55619: msleep(40U); status = ixgbe_setup_mac_link_82599(hw, 32U, (int )autoneg_wait_to_complete); if (status != 0) { return (status); } else { } if ((unsigned long )hw->mac.ops.flap_tx_laser != (unsigned long )((void (*)(struct ixgbe_hw * ))0)) { (*(hw->mac.ops.flap_tx_laser))(hw); } else { } msleep(100U); status = (*(hw->mac.ops.check_link))(hw, & link_speed, & link_up, 0); if (status != 0) { return (status); } else { } if ((int )link_up) { goto out; } else { } } else { } if (speedcnt > 1U) { status = ixgbe_setup_mac_link_multispeed_fiber(hw, highest_link_speed, (int )autoneg_wait_to_complete); } else { } out: hw->phy.autoneg_advertised = 0U; if ((speed & 128U) != 0U) { hw->phy.autoneg_advertised = hw->phy.autoneg_advertised | 128U; } else { } if ((speed & 32U) != 0U) { hw->phy.autoneg_advertised = hw->phy.autoneg_advertised | 32U; } else { } return (status); } } static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) { s32 status ; ixgbe_link_speed link_speed ; s32 i ; s32 j ; bool link_up ; u32 autoc_reg ; u32 tmp ; unsigned long __ms ; unsigned long tmp___0 ; unsigned long __ms___0 ; unsigned long tmp___1 ; struct _ddebug descriptor ; long tmp___2 ; { status = 0; link_speed = 0U; link_up = 0; tmp = ixgbe_read_reg(hw, 17056U); autoc_reg = tmp; hw->phy.autoneg_advertised = 0U; if ((speed & 128U) != 0U) { hw->phy.autoneg_advertised = hw->phy.autoneg_advertised | 128U; } else { } if ((speed & 32U) != 0U) { hw->phy.autoneg_advertised = hw->phy.autoneg_advertised | 32U; } else { } if ((speed & 8U) != 0U) { hw->phy.autoneg_advertised = hw->phy.autoneg_advertised | 8U; } else { } hw->phy.smart_speed_active = 0; j = 0; goto ldv_55643; ldv_55642: status = ixgbe_setup_mac_link_82599(hw, speed, (int )autoneg_wait_to_complete); if (status != 0) { goto out; } else { } i = 0; goto ldv_55640; ldv_55639: __ms = 100UL; goto ldv_55637; ldv_55636: __const_udelay(4295000UL); ldv_55637: tmp___0 = __ms; __ms = __ms - 1UL; if (tmp___0 != 0UL) { goto ldv_55636; } else { } status = (*(hw->mac.ops.check_link))(hw, & link_speed, & link_up, 0); if (status != 0) { goto out; } else { } if ((int )link_up) { goto out; } else { } i = i + 1; ldv_55640: ; if (i <= 4) { goto ldv_55639; } else { } j = j + 1; ldv_55643: ; if (j <= 2) { goto ldv_55642; } else { } if ((autoc_reg & 65536U) == 0U || (autoc_reg & 3221225472U) == 0U) { goto out; } else { } hw->phy.smart_speed_active = 1; status = ixgbe_setup_mac_link_82599(hw, speed, (int )autoneg_wait_to_complete); if (status != 0) { goto out; } else { } i = 0; goto ldv_55650; ldv_55649: __ms___0 = 100UL; goto ldv_55647; ldv_55646: __const_udelay(4295000UL); ldv_55647: tmp___1 = __ms___0; __ms___0 = __ms___0 - 1UL; if (tmp___1 != 0UL) { goto ldv_55646; } else { } status = (*(hw->mac.ops.check_link))(hw, & link_speed, & link_up, 0); if (status != 0) { goto out; } else { } if ((int )link_up) { goto out; } else { } i = i + 1; ldv_55650: ; if (i <= 5) { goto ldv_55649; } else { } hw->phy.smart_speed_active = 0; status = ixgbe_setup_mac_link_82599(hw, speed, (int )autoneg_wait_to_complete); out: ; if ((int )link_up && link_speed == 32U) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_setup_mac_link_smartspeed"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor.format = "Smartspeed has downgraded the link speed from the maximum advertised\n"; descriptor.lineno = 931U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Smartspeed has downgraded the link speed from the maximum advertised\n"); } else { } } else { } return (status); } } static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) { bool autoneg ; s32 status ; u32 pma_pmd_1g ; u32 link_mode ; u32 links_reg ; u32 i ; u32 autoc2 ; u32 tmp ; u32 pma_pmd_10g_serial ; ixgbe_link_speed link_capabilities ; u32 current_autoc ; u32 tmp___0 ; u32 orig_autoc ; u32 autoc ; struct _ddebug descriptor ; long tmp___1 ; { autoneg = 0; tmp = ixgbe_read_reg(hw, 17064U); autoc2 = tmp; pma_pmd_10g_serial = autoc2 & 196608U; link_capabilities = 0U; tmp___0 = ixgbe_read_reg(hw, 17056U); current_autoc = tmp___0; orig_autoc = 0U; autoc = current_autoc; status = (*(hw->mac.ops.get_link_capabilities))(hw, & link_capabilities, & autoneg); if (status != 0) { return (status); } else { } speed = speed & link_capabilities; if (speed == 0U) { return (-8); } else { } if ((int )hw->mac.orig_link_settings_stored) { orig_autoc = hw->mac.orig_autoc; } else { orig_autoc = autoc; } link_mode = autoc & 57344U; pma_pmd_1g = autoc & 512U; if ((link_mode == 32768U || link_mode == 49152U) || link_mode == 57344U) { autoc = autoc & 1073676287U; if ((speed & 128U) != 0U) { if ((int )orig_autoc < 0) { autoc = autoc | 2147483648U; } else { } if ((orig_autoc & 65536U) != 0U && ! hw->phy.smart_speed_active) { autoc = autoc | 65536U; } else { } } else { } if ((speed & 32U) != 0U) { autoc = autoc | 1073741824U; } else { } } else if (pma_pmd_1g == 0U && (link_mode == 0U || link_mode == 16384U)) { if (speed == 128U && pma_pmd_10g_serial == 131072U) { autoc = autoc & 4294909951U; autoc = autoc | 24576U; } else { } } else if (pma_pmd_10g_serial == 131072U && link_mode == 24576U) { if (speed == 32U && pma_pmd_1g == 0U) { autoc = autoc & 4294909951U; if ((int )autoneg) { autoc = autoc | 16384U; } else { autoc = autoc; } } else { } } else { } if (autoc != current_autoc) { status = (*(hw->mac.ops.prot_autoc_write))(hw, autoc, 0); if (status != 0) { return (status); } else { } if ((int )autoneg_wait_to_complete) { if ((link_mode == 32768U || link_mode == 49152U) || link_mode == 57344U) { links_reg = 0U; i = 0U; goto ldv_55673; ldv_55672: links_reg = ixgbe_read_reg(hw, 17060U); if ((int )links_reg < 0) { goto ldv_55671; } else { } msleep(100U); i = i + 1U; ldv_55673: ; if (i <= 44U) { goto ldv_55672; } else { } ldv_55671: ; if ((int )links_reg >= 0) { status = -14; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_setup_mac_link_82599"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor.format = "Autoneg did not complete.\n"; descriptor.lineno = 1039U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Autoneg did not complete.\n"); } else { } } else { } } else { } } else { } msleep(50U); } else { } return (status); } } static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) { s32 status ; { status = (*(hw->phy.ops.setup_link_speed))(hw, speed, (int )autoneg_wait_to_complete); ixgbe_start_mac_link_82599(hw, (int )autoneg_wait_to_complete); return (status); } } static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw ) { ixgbe_link_speed link_speed ; s32 status ; u32 ctrl ; u32 i ; u32 autoc ; u32 autoc2 ; u32 curr_lms ; bool link_up ; u32 tmp ; u32 tmp___0 ; struct _ddebug descriptor ; long tmp___1 ; bool tmp___2 ; bool tmp___3 ; { link_up = 0; status = (*(hw->mac.ops.stop_adapter))(hw); if (status != 0) { return (status); } else { } ixgbe_clear_tx_pending(hw); status = (*(hw->phy.ops.init))(hw); if (status == -19) { return (status); } else { } if ((int )hw->phy.sfp_setup_needed) { status = (*(hw->mac.ops.setup_sfp))(hw); hw->phy.sfp_setup_needed = 0; } else { } if (status == -19) { return (status); } else { } if (! hw->phy.reset_disable && (unsigned long )hw->phy.ops.reset != (unsigned long )((s32 (*)(struct ixgbe_hw * ))0)) { (*(hw->phy.ops.reset))(hw); } else { } tmp = ixgbe_read_reg(hw, 17056U); curr_lms = tmp & 57344U; mac_reset_top: ctrl = 8U; if (! hw->force_full_reset) { (*(hw->mac.ops.check_link))(hw, & link_speed, & link_up, 0); if ((int )link_up) { ctrl = 67108864U; } else { } } else { } tmp___0 = ixgbe_read_reg(hw, 0U); ctrl = tmp___0 | ctrl; ixgbe_write_reg(hw, 0U, ctrl); ixgbe_read_reg(hw, 8U); i = 0U; goto ldv_55696; ldv_55695: __const_udelay(4295UL); ctrl = ixgbe_read_reg(hw, 0U); if ((ctrl & 67108872U) == 0U) { goto ldv_55694; } else { } i = i + 1U; ldv_55696: ; if (i <= 9U) { goto ldv_55695; } else { } ldv_55694: ; if ((ctrl & 67108872U) != 0U) { status = -15; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_reset_hw_82599"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor.format = "Reset polling failed to complete.\n"; descriptor.lineno = 1150U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Reset polling failed to complete.\n"); } else { } } else { } msleep(50U); if ((int )hw->mac.flags & 1) { hw->mac.flags = (unsigned int )hw->mac.flags & 254U; goto mac_reset_top; } else { } autoc = ixgbe_read_reg(hw, 17056U); autoc2 = ixgbe_read_reg(hw, 17064U); if ((autoc2 & 1879048192U) != 0U) { autoc2 = autoc2 & 2415919103U; ixgbe_write_reg(hw, 17064U, autoc2); ixgbe_read_reg(hw, 8U); } else { } if (! hw->mac.orig_link_settings_stored) { hw->mac.orig_autoc = autoc; hw->mac.orig_autoc2 = autoc2; hw->mac.orig_link_settings_stored = 1; } else { if ((int )hw->phy.multispeed_fiber) { tmp___2 = ixgbe_mng_enabled(hw); if ((int )tmp___2) { hw->mac.orig_autoc = (hw->mac.orig_autoc & 4294909951U) | curr_lms; } else { goto _L; } } else _L: /* CIL Label */ if ((int )hw->wol_enabled) { hw->mac.orig_autoc = (hw->mac.orig_autoc & 4294909951U) | curr_lms; } else { } if (hw->mac.orig_autoc != autoc) { status = (*(hw->mac.ops.prot_autoc_write))(hw, hw->mac.orig_autoc, 0); if (status != 0) { return (status); } else { } } else { } if (((hw->mac.orig_autoc2 ^ autoc2) & 4294901760U) != 0U) { autoc2 = autoc2 & 65535U; autoc2 = (hw->mac.orig_autoc2 & 4294901760U) | autoc2; ixgbe_write_reg(hw, 17064U, autoc2); } else { } } (*(hw->mac.ops.get_mac_addr))(hw, (u8 *)(& hw->mac.perm_addr)); hw->mac.num_rar_entries = 128U; (*(hw->mac.ops.init_rx_addrs))(hw); (*(hw->mac.ops.get_san_mac_addr))(hw, (u8 *)(& hw->mac.san_addr)); tmp___3 = is_valid_ether_addr((u8 const *)(& hw->mac.san_addr)); if ((int )tmp___3) { (*(hw->mac.ops.set_rar))(hw, hw->mac.num_rar_entries - 1U, (u8 *)(& hw->mac.san_addr), 0U, 2147483648U); hw->mac.san_mac_rar_index = (unsigned int )((u8 )hw->mac.num_rar_entries) - 1U; hw->mac.num_rar_entries = hw->mac.num_rar_entries - 1U; } else { } (*(hw->mac.ops.get_wwn_prefix))(hw, & hw->mac.wwnn_prefix, & hw->mac.wwpn_prefix); return (status); } } s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw ) { int i ; u32 fdirctrl ; u32 tmp ; u32 tmp___0 ; struct _ddebug descriptor ; long tmp___1 ; u32 tmp___2 ; u32 tmp___3 ; u32 tmp___4 ; struct _ddebug descriptor___0 ; long tmp___5 ; { tmp = ixgbe_read_reg(hw, 60928U); fdirctrl = tmp; fdirctrl = fdirctrl & 4294967287U; i = 0; goto ldv_55706; ldv_55705: tmp___0 = ixgbe_read_reg(hw, 60972U); if ((tmp___0 & 3U) == 0U) { goto ldv_55704; } else { } __const_udelay(42950UL); i = i + 1; ldv_55706: ; if (i <= 9) { goto ldv_55705; } else { } ldv_55704: ; if (i > 9) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_reinit_fdir_tables_82599"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor.format = "Flow Director previous command isn\'t complete, aborting table re-initialization.\n"; descriptor.lineno = 1270U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flow Director previous command isn\'t complete, aborting table re-initialization.\n"); } else { } return (-23); } else { } ixgbe_write_reg(hw, 60984U, 0U); ixgbe_read_reg(hw, 8U); tmp___2 = ixgbe_read_reg(hw, 60972U); ixgbe_write_reg(hw, 60972U, tmp___2 | 256U); ixgbe_read_reg(hw, 8U); tmp___3 = ixgbe_read_reg(hw, 60972U); ixgbe_write_reg(hw, 60972U, tmp___3 & 4294967039U); ixgbe_read_reg(hw, 8U); ixgbe_write_reg(hw, 60968U, 0U); ixgbe_read_reg(hw, 8U); ixgbe_write_reg(hw, 60928U, fdirctrl); ixgbe_read_reg(hw, 8U); i = 0; goto ldv_55711; ldv_55710: tmp___4 = ixgbe_read_reg(hw, 60928U); if ((tmp___4 & 8U) != 0U) { goto ldv_55709; } else { } usleep_range(1000UL, 2000UL); i = i + 1; ldv_55711: ; if (i <= 9) { goto ldv_55710; } else { } ldv_55709: ; if (i > 9) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_reinit_fdir_tables_82599"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor___0.format = "Flow Director Signature poll time exceeded!\n"; descriptor___0.lineno = 1309U; descriptor___0.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___5 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flow Director Signature poll time exceeded!\n"); } else { } return (-23); } else { } ixgbe_read_reg(hw, 61008U); ixgbe_read_reg(hw, 61012U); ixgbe_read_reg(hw, 61016U); ixgbe_read_reg(hw, 61020U); ixgbe_read_reg(hw, 61004U); return (0); } } static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw , u32 fdirctrl ) { int i ; u32 tmp ; struct _ddebug descriptor ; long tmp___0 ; { ixgbe_write_reg(hw, 61032U, 1034753250U); ixgbe_write_reg(hw, 61036U, 390936084U); ixgbe_write_reg(hw, 60928U, fdirctrl); ixgbe_read_reg(hw, 8U); i = 0; goto ldv_55720; ldv_55719: tmp = ixgbe_read_reg(hw, 60928U); if ((tmp & 8U) != 0U) { goto ldv_55718; } else { } usleep_range(1000UL, 2000UL); i = i + 1; ldv_55720: ; if (i <= 9) { goto ldv_55719; } else { } ldv_55718: ; if (i > 9) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_fdir_enable_82599"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor.format = "Flow Director poll time exceeded!\n"; descriptor.lineno = 1359U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flow Director poll time exceeded!\n"); } else { } } else { } return; } } s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw , u32 fdirctrl ) { { fdirctrl = fdirctrl | 1241907200U; ixgbe_fdir_enable_82599(hw, fdirctrl); return (0); } } s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw , u32 fdirctrl ) { { fdirctrl = fdirctrl | 1241939760U; ixgbe_fdir_enable_82599(hw, fdirctrl); return (0); } } static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input , union ixgbe_atr_hash_dword common ) { u32 hi_hash_dword ; u32 lo_hash_dword ; u32 flow_vm_vlan ; u32 sig_hash ; u32 bucket_hash ; u32 common_hash ; __u32 tmp ; __u32 tmp___0 ; u32 n ; u32 n___0 ; u32 n___1 ; u32 n___2 ; u32 n___3 ; u32 n___4 ; u32 n___5 ; u32 n___6 ; u32 n___7 ; u32 n___8 ; u32 n___9 ; u32 n___10 ; u32 n___11 ; u32 n___12 ; u32 n___13 ; u32 n___14 ; { sig_hash = 0U; bucket_hash = 0U; common_hash = 0U; tmp = __fswab32(input.dword); flow_vm_vlan = tmp; tmp___0 = __fswab32(common.dword); hi_hash_dword = tmp___0; lo_hash_dword = (hi_hash_dword << 16) | (hi_hash_dword >> (8UL * sizeof(hi_hash_dword) - 16UL)); hi_hash_dword = ((flow_vm_vlan >> 16) ^ flow_vm_vlan) ^ hi_hash_dword; n = 0U; if ((353178624 >> (int )n) & 1) { common_hash = (lo_hash_dword >> (int )n) ^ common_hash; } else if ((1034753250 >> (int )n) & 1) { bucket_hash = (lo_hash_dword >> (int )n) ^ bucket_hash; } else if ((390936084 >> (int )n) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n)) ^ sig_hash; } else { } if ((353178624 >> (int )(n + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n) ^ common_hash; } else if ((1034753250 >> (int )(n + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n) ^ bucket_hash; } else if ((390936084 >> (int )(n + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n)) ^ sig_hash; } else { } lo_hash_dword = ((flow_vm_vlan << 16) ^ flow_vm_vlan) ^ lo_hash_dword; n___0 = 1U; if ((353178624 >> (int )n___0) & 1) { common_hash = (lo_hash_dword >> (int )n___0) ^ common_hash; } else if ((1034753250 >> (int )n___0) & 1) { bucket_hash = (lo_hash_dword >> (int )n___0) ^ bucket_hash; } else if ((390936084 >> (int )n___0) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___0)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___0 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___0) ^ common_hash; } else if ((1034753250 >> (int )(n___0 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___0) ^ bucket_hash; } else if ((390936084 >> (int )(n___0 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___0)) ^ sig_hash; } else { } n___1 = 2U; if ((353178624 >> (int )n___1) & 1) { common_hash = (lo_hash_dword >> (int )n___1) ^ common_hash; } else if ((1034753250 >> (int )n___1) & 1) { bucket_hash = (lo_hash_dword >> (int )n___1) ^ bucket_hash; } else if ((390936084 >> (int )n___1) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___1)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___1 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___1) ^ common_hash; } else if ((1034753250 >> (int )(n___1 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___1) ^ bucket_hash; } else if ((390936084 >> (int )(n___1 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___1)) ^ sig_hash; } else { } n___2 = 3U; if ((353178624 >> (int )n___2) & 1) { common_hash = (lo_hash_dword >> (int )n___2) ^ common_hash; } else if ((1034753250 >> (int )n___2) & 1) { bucket_hash = (lo_hash_dword >> (int )n___2) ^ bucket_hash; } else if ((390936084 >> (int )n___2) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___2)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___2 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___2) ^ common_hash; } else if ((1034753250 >> (int )(n___2 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___2) ^ bucket_hash; } else if ((390936084 >> (int )(n___2 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___2)) ^ sig_hash; } else { } n___3 = 4U; if ((353178624 >> (int )n___3) & 1) { common_hash = (lo_hash_dword >> (int )n___3) ^ common_hash; } else if ((1034753250 >> (int )n___3) & 1) { bucket_hash = (lo_hash_dword >> (int )n___3) ^ bucket_hash; } else if ((390936084 >> (int )n___3) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___3)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___3 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___3) ^ common_hash; } else if ((1034753250 >> (int )(n___3 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___3) ^ bucket_hash; } else if ((390936084 >> (int )(n___3 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___3)) ^ sig_hash; } else { } n___4 = 5U; if ((353178624 >> (int )n___4) & 1) { common_hash = (lo_hash_dword >> (int )n___4) ^ common_hash; } else if ((1034753250 >> (int )n___4) & 1) { bucket_hash = (lo_hash_dword >> (int )n___4) ^ bucket_hash; } else if ((390936084 >> (int )n___4) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___4)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___4 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___4) ^ common_hash; } else if ((1034753250 >> (int )(n___4 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___4) ^ bucket_hash; } else if ((390936084 >> (int )(n___4 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___4)) ^ sig_hash; } else { } n___5 = 6U; if ((353178624 >> (int )n___5) & 1) { common_hash = (lo_hash_dword >> (int )n___5) ^ common_hash; } else if ((1034753250 >> (int )n___5) & 1) { bucket_hash = (lo_hash_dword >> (int )n___5) ^ bucket_hash; } else if ((390936084 >> (int )n___5) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___5)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___5 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___5) ^ common_hash; } else if ((1034753250 >> (int )(n___5 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___5) ^ bucket_hash; } else if ((390936084 >> (int )(n___5 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___5)) ^ sig_hash; } else { } n___6 = 7U; if ((353178624 >> (int )n___6) & 1) { common_hash = (lo_hash_dword >> (int )n___6) ^ common_hash; } else if ((1034753250 >> (int )n___6) & 1) { bucket_hash = (lo_hash_dword >> (int )n___6) ^ bucket_hash; } else if ((390936084 >> (int )n___6) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___6)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___6 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___6) ^ common_hash; } else if ((1034753250 >> (int )(n___6 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___6) ^ bucket_hash; } else if ((390936084 >> (int )(n___6 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___6)) ^ sig_hash; } else { } n___7 = 8U; if ((353178624 >> (int )n___7) & 1) { common_hash = (lo_hash_dword >> (int )n___7) ^ common_hash; } else if ((1034753250 >> (int )n___7) & 1) { bucket_hash = (lo_hash_dword >> (int )n___7) ^ bucket_hash; } else if ((390936084 >> (int )n___7) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___7)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___7 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___7) ^ common_hash; } else if ((1034753250 >> (int )(n___7 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___7) ^ bucket_hash; } else if ((390936084 >> (int )(n___7 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___7)) ^ sig_hash; } else { } n___8 = 9U; if ((353178624 >> (int )n___8) & 1) { common_hash = (lo_hash_dword >> (int )n___8) ^ common_hash; } else if ((1034753250 >> (int )n___8) & 1) { bucket_hash = (lo_hash_dword >> (int )n___8) ^ bucket_hash; } else if ((390936084 >> (int )n___8) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___8)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___8 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___8) ^ common_hash; } else if ((1034753250 >> (int )(n___8 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___8) ^ bucket_hash; } else if ((390936084 >> (int )(n___8 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___8)) ^ sig_hash; } else { } n___9 = 10U; if ((353178624 >> (int )n___9) & 1) { common_hash = (lo_hash_dword >> (int )n___9) ^ common_hash; } else if ((1034753250 >> (int )n___9) & 1) { bucket_hash = (lo_hash_dword >> (int )n___9) ^ bucket_hash; } else if ((390936084 >> (int )n___9) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___9)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___9 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___9) ^ common_hash; } else if ((1034753250 >> (int )(n___9 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___9) ^ bucket_hash; } else if ((390936084 >> (int )(n___9 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___9)) ^ sig_hash; } else { } n___10 = 11U; if ((353178624 >> (int )n___10) & 1) { common_hash = (lo_hash_dword >> (int )n___10) ^ common_hash; } else if ((1034753250 >> (int )n___10) & 1) { bucket_hash = (lo_hash_dword >> (int )n___10) ^ bucket_hash; } else if ((390936084 >> (int )n___10) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___10)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___10 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___10) ^ common_hash; } else if ((1034753250 >> (int )(n___10 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___10) ^ bucket_hash; } else if ((390936084 >> (int )(n___10 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___10)) ^ sig_hash; } else { } n___11 = 12U; if ((353178624 >> (int )n___11) & 1) { common_hash = (lo_hash_dword >> (int )n___11) ^ common_hash; } else if ((1034753250 >> (int )n___11) & 1) { bucket_hash = (lo_hash_dword >> (int )n___11) ^ bucket_hash; } else if ((390936084 >> (int )n___11) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___11)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___11 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___11) ^ common_hash; } else if ((1034753250 >> (int )(n___11 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___11) ^ bucket_hash; } else if ((390936084 >> (int )(n___11 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___11)) ^ sig_hash; } else { } n___12 = 13U; if ((353178624 >> (int )n___12) & 1) { common_hash = (lo_hash_dword >> (int )n___12) ^ common_hash; } else if ((1034753250 >> (int )n___12) & 1) { bucket_hash = (lo_hash_dword >> (int )n___12) ^ bucket_hash; } else if ((390936084 >> (int )n___12) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___12)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___12 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___12) ^ common_hash; } else if ((1034753250 >> (int )(n___12 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___12) ^ bucket_hash; } else if ((390936084 >> (int )(n___12 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___12)) ^ sig_hash; } else { } n___13 = 14U; if ((353178624 >> (int )n___13) & 1) { common_hash = (lo_hash_dword >> (int )n___13) ^ common_hash; } else if ((1034753250 >> (int )n___13) & 1) { bucket_hash = (lo_hash_dword >> (int )n___13) ^ bucket_hash; } else if ((390936084 >> (int )n___13) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___13)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___13 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___13) ^ common_hash; } else if ((1034753250 >> (int )(n___13 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___13) ^ bucket_hash; } else if ((390936084 >> (int )(n___13 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___13)) ^ sig_hash; } else { } n___14 = 15U; if ((353178624 >> (int )n___14) & 1) { common_hash = (lo_hash_dword >> (int )n___14) ^ common_hash; } else if ((1034753250 >> (int )n___14) & 1) { bucket_hash = (lo_hash_dword >> (int )n___14) ^ bucket_hash; } else if ((390936084 >> (int )n___14) & 1) { sig_hash = (lo_hash_dword << (int )(16U - n___14)) ^ sig_hash; } else { } if ((353178624 >> (int )(n___14 + 16U)) & 1) { common_hash = (hi_hash_dword >> (int )n___14) ^ common_hash; } else if ((1034753250 >> (int )(n___14 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___14) ^ bucket_hash; } else if ((390936084 >> (int )(n___14 + 16U)) & 1) { sig_hash = (hi_hash_dword << (int )(16U - n___14)) ^ sig_hash; } else { } bucket_hash = bucket_hash ^ common_hash; bucket_hash = bucket_hash & 32767U; sig_hash = (common_hash << 16) ^ sig_hash; sig_hash = sig_hash & 2147418112U; return (sig_hash ^ bucket_hash); } } s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw , union ixgbe_atr_hash_dword input , union ixgbe_atr_hash_dword common , u8 queue ) { u64 fdirhashcmd ; u32 fdircmd ; struct _ddebug descriptor ; long tmp ; u32 tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; { switch ((int )input.formatted.flow_type) { case 2: ; case 1: ; case 3: ; case 6: ; case 5: ; case 7: ; goto ldv_55771; default: descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_fdir_add_signature_filter_82599"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor.format = " Error on flow type input\n"; descriptor.lineno = 1534U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " Error on flow type input\n"); } else { } return (-4); } ldv_55771: fdircmd = 34825U; fdircmd = (u32 )((int )input.formatted.flow_type << 5) | fdircmd; fdircmd = ((unsigned int )queue << 16) | fdircmd; fdirhashcmd = (unsigned long long )fdircmd << 32; tmp___0 = ixgbe_atr_compute_sig_hash_82599(input, common); fdirhashcmd = (u64 )tmp___0 | fdirhashcmd; ixgbe_write_reg64(hw, 60968U, fdirhashcmd); descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_fdir_add_signature_filter_82599"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor___0.format = "Tx Queue=%x hash=%x\n"; descriptor___0.lineno = 1552U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Tx Queue=%x hash=%x\n", (int )queue, (unsigned int )fdirhashcmd); } else { } return (0); } } void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input , union ixgbe_atr_input *input_mask ) { u32 hi_hash_dword ; u32 lo_hash_dword ; u32 flow_vm_vlan ; u32 bucket_hash ; u32 hi_dword ; int i ; __u32 tmp ; __u32 tmp___0 ; u32 n ; u32 n___0 ; { bucket_hash = 0U; hi_dword = 0U; i = 0; goto ldv_55787; ldv_55786: input->dword_stream[i] = input->dword_stream[i] & input_mask->dword_stream[i]; i = i + 1; ldv_55787: ; if (i <= 10) { goto ldv_55786; } else { } tmp = __fswab32(input->dword_stream[0]); flow_vm_vlan = tmp; i = 1; goto ldv_55790; ldv_55789: hi_dword = input->dword_stream[i] ^ hi_dword; i = i + 1; ldv_55790: ; if (i <= 10) { goto ldv_55789; } else { } tmp___0 = __fswab32(hi_dword); hi_hash_dword = tmp___0; lo_hash_dword = (hi_hash_dword << 16) | (hi_hash_dword >> (8UL * sizeof(hi_hash_dword) - 16UL)); hi_hash_dword = ((flow_vm_vlan >> 16) ^ flow_vm_vlan) ^ hi_hash_dword; n = 0U; if ((1034753250 >> (int )n) & 1) { bucket_hash = (lo_hash_dword >> (int )n) ^ bucket_hash; } else { } if ((1034753250 >> (int )(n + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n) ^ bucket_hash; } else { } lo_hash_dword = ((flow_vm_vlan << 16) ^ flow_vm_vlan) ^ lo_hash_dword; i = 1; goto ldv_55795; ldv_55794: n___0 = (u32 )i; if ((1034753250 >> (int )n___0) & 1) { bucket_hash = (lo_hash_dword >> (int )n___0) ^ bucket_hash; } else { } if ((1034753250 >> (int )(n___0 + 16U)) & 1) { bucket_hash = (hi_hash_dword >> (int )n___0) ^ bucket_hash; } else { } i = i + 1; ldv_55795: ; if (i <= 15) { goto ldv_55794; } else { } input->formatted.bkt_hash = (unsigned int )((__be16 )bucket_hash) & 8191U; return; } } static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask ) { u32 mask ; __u16 tmp ; __u16 tmp___0 ; { tmp = __fswab16((int )input_mask->formatted.dst_port); mask = (u32 )tmp; mask = mask << 16; tmp___0 = __fswab16((int )input_mask->formatted.src_port); mask = (u32 )tmp___0 | mask; mask = ((mask & 1431655765U) << 1) | ((mask & 2863311530U) >> 1); mask = ((mask & 858993459U) << 2) | ((mask & 3435973836U) >> 2); mask = ((mask & 252645135U) << 4) | ((mask & 4042322160U) >> 4); return (((mask & 16711935U) << 8) | ((mask & 4278255360U) >> 8)); } } s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw , union ixgbe_atr_input *input_mask ) { u32 fdirm ; u32 fdirtcpm ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; struct _ddebug descriptor___2 ; long tmp___2 ; __u16 tmp___3 ; struct _ddebug descriptor___3 ; long tmp___4 ; struct _ddebug descriptor___4 ; long tmp___5 ; __u32 tmp___6 ; __u32 tmp___7 ; __u32 tmp___8 ; __u32 tmp___9 ; __u32 tmp___10 ; __u32 tmp___11 ; __u32 tmp___12 ; __u32 tmp___13 ; { fdirm = 32U; if ((unsigned int )input_mask->formatted.bkt_hash != 0U) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_fdir_set_input_mask_82599"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor.format = " bucket hash should always be 0 in mask\n"; descriptor.lineno = 1681U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " bucket hash should always be 0 in mask\n"); } else { } } else { } switch ((int )input_mask->formatted.vm_pool & 127) { case 0: fdirm = fdirm | 4U; case 127: ; goto ldv_55811; default: descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_fdir_set_input_mask_82599"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor___0.format = " Error on vm pool mask\n"; descriptor___0.lineno = 1690U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " Error on vm pool mask\n"); } else { } return (-4); } ldv_55811: ; switch ((int )input_mask->formatted.flow_type & 3) { case 0: fdirm = fdirm | 8U; if ((unsigned int )input_mask->formatted.dst_port != 0U || (unsigned int )input_mask->formatted.src_port != 0U) { descriptor___1.modname = "ixgbe"; descriptor___1.function = "ixgbe_fdir_set_input_mask_82599"; descriptor___1.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor___1.format = " Error on src/dst port mask\n"; descriptor___1.lineno = 1699U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " Error on src/dst port mask\n"); } else { } return (-4); } else { } case 3: ; goto ldv_55817; default: descriptor___2.modname = "ixgbe"; descriptor___2.function = "ixgbe_fdir_set_input_mask_82599"; descriptor___2.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor___2.format = " Error on flow type mask\n"; descriptor___2.lineno = 1705U; descriptor___2.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " Error on flow type mask\n"); } else { } return (-4); } ldv_55817: tmp___3 = __fswab16((int )input_mask->formatted.vlan_id); switch ((int )tmp___3 & 61439) { case 0: fdirm = fdirm | 1U; case 4095: fdirm = fdirm | 2U; goto ldv_55822; case 57344: fdirm = fdirm | 1U; case 61439: ; goto ldv_55822; default: descriptor___3.modname = "ixgbe"; descriptor___3.function = "ixgbe_fdir_set_input_mask_82599"; descriptor___3.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor___3.format = " Error on VLAN mask\n"; descriptor___3.lineno = 1724U; descriptor___3.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_netdev_dbg(& descriptor___3, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " Error on VLAN mask\n"); } else { } return (-4); } ldv_55822: ; switch ((int )input_mask->formatted.flex_bytes) { case 0: fdirm = fdirm | 16U; case 65535: ; goto ldv_55829; default: descriptor___4.modname = "ixgbe"; descriptor___4.function = "ixgbe_fdir_set_input_mask_82599"; descriptor___4.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor___4.format = " Error on flexible byte mask\n"; descriptor___4.lineno = 1735U; descriptor___4.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___4.flags & 1L, 0L); if (tmp___5 != 0L) { __dynamic_netdev_dbg(& descriptor___4, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, " Error on flexible byte mask\n"); } else { } return (-4); } ldv_55829: ixgbe_write_reg(hw, 61040U, fdirm); fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); ixgbe_write_reg(hw, 60996U, ~ fdirtcpm); ixgbe_write_reg(hw, 61000U, ~ fdirtcpm); tmp___6 = __fswab32(~ input_mask->formatted.src_ip[0]); tmp___7 = __fswab32(~ input_mask->formatted.src_ip[0]); tmp___8 = __fswab32(~ input_mask->formatted.src_ip[0]); tmp___9 = __fswab32(~ input_mask->formatted.src_ip[0]); ixgbe_write_reg(hw, 60992U, (((tmp___6 >> 24) | ((tmp___7 & 16711680U) >> 8)) | ((tmp___8 & 65280U) << 8)) | (tmp___9 << 24)); tmp___10 = __fswab32(~ input_mask->formatted.dst_ip[0]); tmp___11 = __fswab32(~ input_mask->formatted.dst_ip[0]); tmp___12 = __fswab32(~ input_mask->formatted.dst_ip[0]); tmp___13 = __fswab32(~ input_mask->formatted.dst_ip[0]); ixgbe_write_reg(hw, 60988U, (((tmp___10 >> 24) | ((tmp___11 & 16711680U) >> 8)) | ((tmp___12 & 65280U) << 8)) | (tmp___13 << 24)); return (0); } } s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw , union ixgbe_atr_input *input , u16 soft_id , u8 queue ) { u32 fdirport ; u32 fdirvlan ; u32 fdirhash ; u32 fdircmd ; __u32 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; __u32 tmp___5 ; __u32 tmp___6 ; __u32 tmp___7 ; __u32 tmp___8 ; __u32 tmp___9 ; __u32 tmp___10 ; __u32 tmp___11 ; __u32 tmp___12 ; __u32 tmp___13 ; __u32 tmp___14 ; __u32 tmp___15 ; __u32 tmp___16 ; __u32 tmp___17 ; __u32 tmp___18 ; __u16 tmp___19 ; __u16 tmp___20 ; __u16 tmp___21 ; __u16 tmp___22 ; { tmp = __fswab32(input->formatted.src_ip[0]); tmp___0 = __fswab32(input->formatted.src_ip[0]); tmp___1 = __fswab32(input->formatted.src_ip[0]); tmp___2 = __fswab32(input->formatted.src_ip[0]); ixgbe_write_reg(hw, 60940U, (((tmp >> 24) | ((tmp___0 & 16711680U) >> 8)) | ((tmp___1 & 65280U) << 8)) | (tmp___2 << 24)); tmp___3 = __fswab32(input->formatted.src_ip[1]); tmp___4 = __fswab32(input->formatted.src_ip[1]); tmp___5 = __fswab32(input->formatted.src_ip[1]); tmp___6 = __fswab32(input->formatted.src_ip[1]); ixgbe_write_reg(hw, 60944U, (((tmp___3 >> 24) | ((tmp___4 & 16711680U) >> 8)) | ((tmp___5 & 65280U) << 8)) | (tmp___6 << 24)); tmp___7 = __fswab32(input->formatted.src_ip[2]); tmp___8 = __fswab32(input->formatted.src_ip[2]); tmp___9 = __fswab32(input->formatted.src_ip[2]); tmp___10 = __fswab32(input->formatted.src_ip[2]); ixgbe_write_reg(hw, 60948U, (((tmp___7 >> 24) | ((tmp___8 & 16711680U) >> 8)) | ((tmp___9 & 65280U) << 8)) | (tmp___10 << 24)); tmp___11 = __fswab32(input->formatted.src_ip[0]); tmp___12 = __fswab32(input->formatted.src_ip[0]); tmp___13 = __fswab32(input->formatted.src_ip[0]); tmp___14 = __fswab32(input->formatted.src_ip[0]); ixgbe_write_reg(hw, 60952U, (((tmp___11 >> 24) | ((tmp___12 & 16711680U) >> 8)) | ((tmp___13 & 65280U) << 8)) | (tmp___14 << 24)); tmp___15 = __fswab32(input->formatted.dst_ip[0]); tmp___16 = __fswab32(input->formatted.dst_ip[0]); tmp___17 = __fswab32(input->formatted.dst_ip[0]); tmp___18 = __fswab32(input->formatted.dst_ip[0]); ixgbe_write_reg(hw, 60956U, (((tmp___15 >> 24) | ((tmp___16 & 16711680U) >> 8)) | ((tmp___17 & 65280U) << 8)) | (tmp___18 << 24)); tmp___19 = __fswab16((int )input->formatted.dst_port); fdirport = (u32 )tmp___19; fdirport = fdirport << 16; tmp___20 = __fswab16((int )input->formatted.src_port); fdirport = (u32 )tmp___20 | fdirport; ixgbe_write_reg(hw, 60960U, fdirport); tmp___21 = __fswab16((int )((unsigned short )((int )((short )((int )input->formatted.flex_bytes >> 8)) | (int )((short )((int )input->formatted.flex_bytes << 8))))); fdirvlan = (u32 )tmp___21; fdirvlan = fdirvlan << 16; tmp___22 = __fswab16((int )input->formatted.vlan_id); fdirvlan = (u32 )tmp___22 | fdirvlan; ixgbe_write_reg(hw, 60964U, fdirvlan); fdirhash = (u32 )input->formatted.bkt_hash; fdirhash = (u32 )((int )soft_id << 16) | fdirhash; ixgbe_write_reg(hw, 60968U, fdirhash); ixgbe_read_reg(hw, 8U); fdircmd = 34825U; if ((unsigned int )queue == 127U) { fdircmd = fdircmd | 512U; } else { } fdircmd = (u32 )((int )input->formatted.flow_type << 5) | fdircmd; fdircmd = ((unsigned int )queue << 16) | fdircmd; fdircmd = ((unsigned int )input->formatted.vm_pool << 24) | fdircmd; ixgbe_write_reg(hw, 60972U, fdircmd); return (0); } } s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw , union ixgbe_atr_input *input , u16 soft_id ) { u32 fdirhash ; u32 fdircmd ; u32 retry_count ; s32 err ; { fdircmd = 0U; err = 0; fdirhash = (u32 )input->formatted.bkt_hash; fdirhash = (u32 )((int )soft_id << 16) | fdirhash; ixgbe_write_reg(hw, 60968U, fdirhash); ixgbe_read_reg(hw, 8U); ixgbe_write_reg(hw, 60972U, 3U); retry_count = 10U; goto ldv_55853; ldv_55852: __const_udelay(42950UL); fdircmd = ixgbe_read_reg(hw, 60972U); if ((fdircmd & 3U) == 0U) { goto ldv_55851; } else { } retry_count = retry_count - 1U; ldv_55853: ; if (retry_count != 0U) { goto ldv_55852; } else { } ldv_55851: ; if (retry_count == 0U) { err = -23; } else { } if ((fdircmd & 4U) != 0U) { ixgbe_write_reg(hw, 60968U, fdirhash); ixgbe_read_reg(hw, 8U); ixgbe_write_reg(hw, 60972U, 2U); } else { } return (err); } } static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw , u32 reg , u8 *val ) { u32 core_ctl ; { ixgbe_write_reg(hw, 85760U, (reg << 8) | 65536U); ixgbe_read_reg(hw, 8U); __const_udelay(42950UL); core_ctl = ixgbe_read_reg(hw, 85760U); *val = (unsigned char )core_ctl; return (0); } } static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw , u32 reg , u8 val ) { u32 core_ctl ; { core_ctl = (reg << 8) | (u32 )val; ixgbe_write_reg(hw, 85760U, core_ctl); ixgbe_read_reg(hw, 8U); __const_udelay(42950UL); return (0); } } static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw ) { s32 ret_val ; s32 tmp ; { ret_val = 0; ret_val = ixgbe_start_hw_generic(hw); if (ret_val != 0) { return (ret_val); } else { } ret_val = ixgbe_start_hw_gen2(hw); if (ret_val != 0) { return (ret_val); } else { } hw->mac.autotry_restart = 1; if (ret_val != 0) { return (ret_val); } else { } tmp = ixgbe_verify_fw_version_82599(hw); return (tmp); } } static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw ) { s32 status ; enum ixgbe_media_type tmp ; { status = ixgbe_identify_phy_generic(hw); if (status != 0) { tmp = (*(hw->mac.ops.get_media_type))(hw); if ((unsigned int )tmp == 4U) { return (status); } else { } status = ixgbe_identify_module_generic(hw); } else { } if ((unsigned int )hw->phy.type == 0U) { hw->phy.type = 1; status = 0; } else { } if ((unsigned int )hw->phy.type == 23U) { return (-19); } else { } return (status); } } static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw , u32 regval ) { { (*(hw->mac.ops.disable_rx_buff))(hw); if ((int )regval & 1) { (*(hw->mac.ops.enable_rx))(hw); } else { (*(hw->mac.ops.disable_rx))(hw); } (*(hw->mac.ops.enable_rx_buff))(hw); return (0); } } static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw ) { s32 status ; u16 fw_offset ; u16 fw_ptp_cfg_offset ; u16 offset ; u16 fw_version ; s32 tmp ; s32 tmp___0 ; s32 tmp___1 ; { status = -24; fw_version = 0U; if ((unsigned int )hw->phy.media_type != 1U) { return (0); } else { } offset = 15U; tmp = (*(hw->eeprom.ops.read))(hw, (int )offset, & fw_offset); if (tmp != 0) { goto fw_version_err; } else { } if ((unsigned int )fw_offset == 0U || (unsigned int )fw_offset == 65535U) { return (-24); } else { } offset = (unsigned int )fw_offset + 4U; tmp___0 = (*(hw->eeprom.ops.read))(hw, (int )offset, & fw_ptp_cfg_offset); if (tmp___0 != 0) { goto fw_version_err; } else { } if ((unsigned int )fw_ptp_cfg_offset == 0U || (unsigned int )fw_ptp_cfg_offset == 65535U) { return (-24); } else { } offset = (unsigned int )fw_ptp_cfg_offset + 7U; tmp___1 = (*(hw->eeprom.ops.read))(hw, (int )offset, & fw_version); if (tmp___1 != 0) { goto fw_version_err; } else { } if ((unsigned int )fw_version > 5U) { status = 0; } else { } return (status); fw_version_err: netdev_err((struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "eeprom read at offset %d failed\n", (int )offset); return (-24); } } static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw ) { u16 fw_offset ; u16 fw_lesm_param_offset ; u16 fw_lesm_state ; s32 status ; { status = (*(hw->eeprom.ops.read))(hw, 15, & fw_offset); if ((status != 0 || (unsigned int )fw_offset == 0U) || (unsigned int )fw_offset == 65535U) { return (0); } else { } status = (*(hw->eeprom.ops.read))(hw, (int )((unsigned int )fw_offset + 2U), & fw_lesm_param_offset); if ((status != 0 || (unsigned int )fw_lesm_param_offset == 0U) || (unsigned int )fw_lesm_param_offset == 65535U) { return (0); } else { } status = (*(hw->eeprom.ops.read))(hw, (int )((unsigned int )fw_lesm_param_offset + 1U), & fw_lesm_state); if (status == 0 && (int )((short )fw_lesm_state) < 0) { return (1); } else { } return (0); } } static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) { struct ixgbe_eeprom_info *eeprom ; s32 tmp ; s32 tmp___0 ; { eeprom = & hw->eeprom; if ((unsigned int )eeprom->type == 1U && (int )offset + ((int )words + -1) <= 16383) { tmp = ixgbe_read_eerd_buffer_generic(hw, (int )offset, (int )words, data); return (tmp); } else { } tmp___0 = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, (int )offset, (int )words, data); return (tmp___0); } } static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw , u16 offset , u16 *data ) { struct ixgbe_eeprom_info *eeprom ; s32 tmp ; s32 tmp___0 ; { eeprom = & hw->eeprom; if ((unsigned int )eeprom->type == 1U && (unsigned int )offset <= 16383U) { tmp = ixgbe_read_eerd_generic(hw, (int )offset, data); return (tmp); } else { } tmp___0 = ixgbe_read_eeprom_bit_bang_generic(hw, (int )offset, data); return (tmp___0); } } static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw ) { s32 ret_val ; u32 anlp1_reg ; u32 i ; u32 autoc_reg ; u32 autoc2_reg ; struct _ddebug descriptor ; long tmp ; { anlp1_reg = 0U; autoc2_reg = ixgbe_read_reg(hw, 17064U); if ((autoc2_reg & 1879048192U) != 0U) { autoc2_reg = autoc2_reg & 2415919103U; ixgbe_write_reg(hw, 17064U, autoc2_reg); ixgbe_read_reg(hw, 8U); } else { } autoc_reg = ixgbe_read_reg(hw, 17056U); autoc_reg = autoc_reg | 4096U; ixgbe_write_reg(hw, 17056U, autoc_reg ^ 32768U); i = 0U; goto ldv_55917; ldv_55916: usleep_range(4000UL, 8000UL); anlp1_reg = ixgbe_read_reg(hw, 17072U); if ((anlp1_reg & 983040U) != 0U) { goto ldv_55915; } else { } i = i + 1U; ldv_55917: ; if (i <= 9U) { goto ldv_55916; } else { } ldv_55915: ; if ((anlp1_reg & 983040U) == 0U) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_reset_pipeline_82599"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor.format = "auto negotiation not completed\n"; descriptor.lineno = 2171U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "auto negotiation not completed\n"); } else { } ret_val = -15; goto reset_pipeline_out; } else { } ret_val = 0; reset_pipeline_out: ixgbe_write_reg(hw, 17056U, autoc_reg); ixgbe_read_reg(hw, 8U); return (ret_val); } } static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw , u8 byte_offset , u8 dev_addr , u8 *data ) { u32 esdp ; s32 status ; s32 timeout ; struct _ddebug descriptor ; long tmp ; { timeout = 200; if ((int )hw->phy.qsfp_shared_i2c_bus) { esdp = ixgbe_read_reg(hw, 32U); esdp = esdp | 1U; ixgbe_write_reg(hw, 32U, esdp); ixgbe_read_reg(hw, 8U); goto ldv_55932; ldv_55931: esdp = ixgbe_read_reg(hw, 32U); if ((esdp & 2U) != 0U) { goto ldv_55930; } else { } usleep_range(5000UL, 10000UL); timeout = timeout - 1; ldv_55932: ; if (timeout != 0) { goto ldv_55931; } else { } ldv_55930: ; if (timeout == 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_read_i2c_byte_82599"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor.format = "Driver can\'t access resource, acquiring I2C bus timeout.\n"; descriptor.lineno = 2219U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Driver can\'t access resource, acquiring I2C bus timeout.\n"); } else { } status = -18; goto release_i2c_access; } else { } } else { } status = ixgbe_read_i2c_byte_generic(hw, (int )byte_offset, (int )dev_addr, data); release_i2c_access: ; if ((int )hw->phy.qsfp_shared_i2c_bus) { esdp = ixgbe_read_reg(hw, 32U); esdp = esdp & 4294967294U; ixgbe_write_reg(hw, 32U, esdp); ixgbe_read_reg(hw, 8U); } else { } return (status); } } static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw , u8 byte_offset , u8 dev_addr , u8 data ) { u32 esdp ; s32 status ; s32 timeout ; struct _ddebug descriptor ; long tmp ; { timeout = 200; if ((int )hw->phy.qsfp_shared_i2c_bus) { esdp = ixgbe_read_reg(hw, 32U); esdp = esdp | 1U; ixgbe_write_reg(hw, 32U, esdp); ixgbe_read_reg(hw, 8U); goto ldv_55947; ldv_55946: esdp = ixgbe_read_reg(hw, 32U); if ((esdp & 2U) != 0U) { goto ldv_55945; } else { } usleep_range(5000UL, 10000UL); timeout = timeout - 1; ldv_55947: ; if (timeout != 0) { goto ldv_55946; } else { } ldv_55945: ; if (timeout == 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_write_i2c_byte_82599"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c"; descriptor.format = "Driver can\'t access resource, acquiring I2C bus timeout.\n"; descriptor.lineno = 2272U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Driver can\'t access resource, acquiring I2C bus timeout.\n"); } else { } status = -18; goto release_i2c_access; } else { } } else { } status = ixgbe_write_i2c_byte_generic(hw, (int )byte_offset, (int )dev_addr, (int )data); release_i2c_access: ; if ((int )hw->phy.qsfp_shared_i2c_bus) { esdp = ixgbe_read_reg(hw, 32U); esdp = esdp & 4294967294U; ixgbe_write_reg(hw, 32U, esdp); ixgbe_read_reg(hw, 8U); } else { } return (status); } } static struct ixgbe_mac_operations mac_ops_82599 = {& ixgbe_init_hw_generic, & ixgbe_reset_hw_82599, & ixgbe_start_hw_82599, & ixgbe_clear_hw_cntrs_generic, & ixgbe_get_media_type_82599, & ixgbe_get_mac_addr_generic, & ixgbe_get_san_mac_addr_generic, & ixgbe_get_device_caps_generic, & ixgbe_get_wwn_prefix_generic, & ixgbe_stop_adapter_generic, & ixgbe_get_bus_info_generic, & ixgbe_set_lan_id_multi_port_pcie, & ixgbe_read_analog_reg8_82599, & ixgbe_write_analog_reg8_82599, & ixgbe_setup_sfp_modules_82599, & ixgbe_disable_rx_buff_generic, & ixgbe_enable_rx_buff_generic, & ixgbe_enable_rx_dma_82599, & ixgbe_acquire_swfw_sync, & ixgbe_release_swfw_sync, & prot_autoc_read_82599, & prot_autoc_write_82599, 0, 0, 0, & ixgbe_stop_mac_link_on_d3_82599, & ixgbe_setup_mac_link_82599, & ixgbe_check_mac_link_generic, & ixgbe_get_link_capabilities_82599, & ixgbe_set_rxpba_generic, & ixgbe_led_on_generic, & ixgbe_led_off_generic, & ixgbe_blink_led_start_generic, & ixgbe_blink_led_stop_generic, & ixgbe_set_rar_generic, & ixgbe_clear_rar_generic, & ixgbe_set_vmdq_generic, & ixgbe_set_vmdq_san_mac_generic, & ixgbe_clear_vmdq_generic, & ixgbe_init_rx_addrs_generic, & ixgbe_update_mc_addr_list_generic, & ixgbe_enable_mc_generic, & ixgbe_disable_mc_generic, & ixgbe_clear_vfta_generic, & ixgbe_set_vfta_generic, & ixgbe_init_uta_tables_generic, & ixgbe_set_mac_anti_spoofing, & ixgbe_set_vlan_anti_spoofing, & ixgbe_fc_enable_generic, & ixgbe_set_fw_drv_ver_generic, & ixgbe_get_thermal_sensor_data_generic, & ixgbe_init_thermal_sensor_thresh_generic, & ixgbe_disable_rx_generic, & ixgbe_enable_rx_generic, 0, 0, 0, 0, 0}; static struct ixgbe_eeprom_operations eeprom_ops_82599 = {& ixgbe_init_eeprom_params_generic, & ixgbe_read_eeprom_82599, & ixgbe_read_eeprom_buffer_82599, & ixgbe_write_eeprom_generic, & ixgbe_write_eeprom_buffer_bit_bang_generic, & ixgbe_validate_eeprom_checksum_generic, & ixgbe_update_eeprom_checksum_generic, & ixgbe_calc_eeprom_checksum_generic}; static struct ixgbe_phy_operations phy_ops_82599 = {& ixgbe_identify_phy_82599, & ixgbe_identify_module_generic, & ixgbe_init_phy_ops_82599, & ixgbe_reset_phy_generic, & ixgbe_read_phy_reg_generic, & ixgbe_write_phy_reg_generic, 0, 0, & ixgbe_setup_phy_link_generic, 0, & ixgbe_setup_phy_link_speed_generic, 0, 0, & ixgbe_read_i2c_byte_generic, & ixgbe_write_i2c_byte_generic, & ixgbe_read_i2c_sff8472_generic, & ixgbe_read_i2c_eeprom_generic, & ixgbe_write_i2c_eeprom_generic, 0, 0, & ixgbe_tn_check_overtemp, 0, 0}; struct ixgbe_info ixgbe_82599_info = {2, & ixgbe_get_invariants_82599, & mac_ops_82599, & eeprom_ops_82599, & phy_ops_82599, & mbx_ops_generic, (u32 const *)(& ixgbe_mvals_8259X)}; extern int ldv_release_33(void) ; extern int ldv_setup_32(void) ; extern int ldv_release_34(void) ; extern int ldv_probe_33(void) ; extern int ldv_release_32(void) ; extern int ldv_probe_34(void) ; void ldv_initialize_ixgbe_mac_operations_34(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); mac_ops_82599_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_initialize_ixgbe_phy_operations_32(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); phy_ops_82599_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_initialize_ixgbe_eeprom_operations_33(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); eeprom_ops_82599_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_main_exported_33(void) { u16 ldvarg2 ; u16 ldvarg9 ; u16 ldvarg10 ; u16 ldvarg4 ; u16 *ldvarg1 ; void *tmp ; u16 *ldvarg8 ; void *tmp___0 ; u16 ldvarg6 ; u16 *ldvarg5 ; void *tmp___1 ; u16 *ldvarg0 ; void *tmp___2 ; u16 ldvarg3 ; u16 ldvarg7 ; int tmp___3 ; { tmp = ldv_init_zalloc(2UL); ldvarg1 = (u16 *)tmp; tmp___0 = ldv_init_zalloc(2UL); ldvarg8 = (u16 *)tmp___0; tmp___1 = ldv_init_zalloc(2UL); ldvarg5 = (u16 *)tmp___1; tmp___2 = ldv_init_zalloc(2UL); ldvarg0 = (u16 *)tmp___2; ldv_memset((void *)(& ldvarg2), 0, 2UL); ldv_memset((void *)(& ldvarg9), 0, 2UL); ldv_memset((void *)(& ldvarg10), 0, 2UL); ldv_memset((void *)(& ldvarg4), 0, 2UL); ldv_memset((void *)(& ldvarg6), 0, 2UL); ldv_memset((void *)(& ldvarg3), 0, 2UL); ldv_memset((void *)(& ldvarg7), 0, 2UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_33 == 2) { ixgbe_write_eeprom_buffer_bit_bang_generic(eeprom_ops_82599_group0, (int )ldvarg10, (int )ldvarg9, ldvarg8); ldv_state_variable_33 = 2; } else { } goto ldv_55991; case 1: ; if (ldv_state_variable_33 == 2) { ixgbe_read_eeprom_buffer_82599(eeprom_ops_82599_group0, (int )ldvarg7, (int )ldvarg6, ldvarg5); ldv_state_variable_33 = 2; } else { } goto ldv_55991; case 2: ; if (ldv_state_variable_33 == 1) { ixgbe_update_eeprom_checksum_generic(eeprom_ops_82599_group0); ldv_state_variable_33 = 1; } else { } if (ldv_state_variable_33 == 2) { ixgbe_update_eeprom_checksum_generic(eeprom_ops_82599_group0); ldv_state_variable_33 = 2; } else { } goto ldv_55991; case 3: ; if (ldv_state_variable_33 == 1) { ixgbe_calc_eeprom_checksum_generic(eeprom_ops_82599_group0); ldv_state_variable_33 = 1; } else { } if (ldv_state_variable_33 == 2) { ixgbe_calc_eeprom_checksum_generic(eeprom_ops_82599_group0); ldv_state_variable_33 = 2; } else { } goto ldv_55991; case 4: ; if (ldv_state_variable_33 == 1) { ixgbe_write_eeprom_generic(eeprom_ops_82599_group0, (int )ldvarg4, (int )ldvarg3); ldv_state_variable_33 = 1; } else { } if (ldv_state_variable_33 == 2) { ixgbe_write_eeprom_generic(eeprom_ops_82599_group0, (int )ldvarg4, (int )ldvarg3); ldv_state_variable_33 = 2; } else { } goto ldv_55991; case 5: ; if (ldv_state_variable_33 == 1) { ixgbe_read_eeprom_82599(eeprom_ops_82599_group0, (int )ldvarg2, ldvarg1); ldv_state_variable_33 = 1; } else { } if (ldv_state_variable_33 == 2) { ixgbe_read_eeprom_82599(eeprom_ops_82599_group0, (int )ldvarg2, ldvarg1); ldv_state_variable_33 = 2; } else { } goto ldv_55991; case 6: ; if (ldv_state_variable_33 == 1) { ixgbe_init_eeprom_params_generic(eeprom_ops_82599_group0); ldv_state_variable_33 = 1; } else { } if (ldv_state_variable_33 == 2) { ixgbe_init_eeprom_params_generic(eeprom_ops_82599_group0); ldv_state_variable_33 = 2; } else { } goto ldv_55991; case 7: ; if (ldv_state_variable_33 == 1) { ixgbe_validate_eeprom_checksum_generic(eeprom_ops_82599_group0, ldvarg0); ldv_state_variable_33 = 1; } else { } if (ldv_state_variable_33 == 2) { ixgbe_validate_eeprom_checksum_generic(eeprom_ops_82599_group0, ldvarg0); ldv_state_variable_33 = 2; } else { } goto ldv_55991; case 8: ; if (ldv_state_variable_33 == 2) { ldv_release_33(); ldv_state_variable_33 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_55991; case 9: ; if (ldv_state_variable_33 == 1) { ldv_probe_33(); ldv_state_variable_33 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_55991; default: ldv_stop(); } ldv_55991: ; return; } } void ldv_main_exported_32(void) { u8 ldvarg28 ; u32 ldvarg19 ; u8 ldvarg22 ; u8 ldvarg25 ; u16 *ldvarg14 ; void *tmp ; u16 ldvarg17 ; ixgbe_link_speed ldvarg21 ; u32 ldvarg16 ; u32 ldvarg15 ; u8 ldvarg30 ; u8 ldvarg26 ; u8 *ldvarg27 ; void *tmp___0 ; u8 ldvarg24 ; u8 *ldvarg29 ; void *tmp___1 ; u8 ldvarg13 ; u8 ldvarg12 ; u8 ldvarg23 ; bool ldvarg20 ; u8 *ldvarg11 ; void *tmp___2 ; u32 ldvarg18 ; int tmp___3 ; { tmp = ldv_init_zalloc(2UL); ldvarg14 = (u16 *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg27 = (u8 *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg29 = (u8 *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg11 = (u8 *)tmp___2; ldv_memset((void *)(& ldvarg28), 0, 1UL); ldv_memset((void *)(& ldvarg19), 0, 4UL); ldv_memset((void *)(& ldvarg22), 0, 1UL); ldv_memset((void *)(& ldvarg25), 0, 1UL); ldv_memset((void *)(& ldvarg17), 0, 2UL); ldv_memset((void *)(& ldvarg21), 0, 4UL); ldv_memset((void *)(& ldvarg16), 0, 4UL); ldv_memset((void *)(& ldvarg15), 0, 4UL); ldv_memset((void *)(& ldvarg30), 0, 1UL); ldv_memset((void *)(& ldvarg26), 0, 1UL); ldv_memset((void *)(& ldvarg24), 0, 1UL); ldv_memset((void *)(& ldvarg13), 0, 1UL); ldv_memset((void *)(& ldvarg12), 0, 1UL); ldv_memset((void *)(& ldvarg23), 0, 1UL); ldv_memset((void *)(& ldvarg20), 0, 1UL); ldv_memset((void *)(& ldvarg18), 0, 4UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_32 == 1) { ixgbe_reset_phy_generic(phy_ops_82599_group0); ldv_state_variable_32 = 1; } else { } if (ldv_state_variable_32 == 3) { ixgbe_reset_phy_generic(phy_ops_82599_group0); ldv_state_variable_32 = 3; } else { } if (ldv_state_variable_32 == 2) { ixgbe_reset_phy_generic(phy_ops_82599_group0); ldv_state_variable_32 = 2; } else { } goto ldv_56026; case 1: ; if (ldv_state_variable_32 == 3) { ixgbe_read_i2c_eeprom_generic(phy_ops_82599_group0, (int )ldvarg30, ldvarg29); ldv_state_variable_32 = 3; } else { } goto ldv_56026; case 2: ; if (ldv_state_variable_32 == 1) { ixgbe_read_i2c_sff8472_generic(phy_ops_82599_group0, (int )ldvarg28, ldvarg27); ldv_state_variable_32 = 1; } else { } if (ldv_state_variable_32 == 3) { ixgbe_read_i2c_sff8472_generic(phy_ops_82599_group0, (int )ldvarg28, ldvarg27); ldv_state_variable_32 = 3; } else { } if (ldv_state_variable_32 == 2) { ixgbe_read_i2c_sff8472_generic(phy_ops_82599_group0, (int )ldvarg28, ldvarg27); ldv_state_variable_32 = 2; } else { } goto ldv_56026; case 3: ; if (ldv_state_variable_32 == 3) { ixgbe_write_i2c_byte_generic(phy_ops_82599_group0, (int )ldvarg26, (int )ldvarg25, (int )ldvarg24); ldv_state_variable_32 = 3; } else { } goto ldv_56026; case 4: ; if (ldv_state_variable_32 == 1) { ixgbe_identify_phy_82599(phy_ops_82599_group0); ldv_state_variable_32 = 1; } else { } if (ldv_state_variable_32 == 3) { ixgbe_identify_phy_82599(phy_ops_82599_group0); ldv_state_variable_32 = 3; } else { } if (ldv_state_variable_32 == 2) { ixgbe_identify_phy_82599(phy_ops_82599_group0); ldv_state_variable_32 = 2; } else { } goto ldv_56026; case 5: ; if (ldv_state_variable_32 == 1) { ixgbe_setup_phy_link_generic(phy_ops_82599_group0); ldv_state_variable_32 = 1; } else { } if (ldv_state_variable_32 == 3) { ixgbe_setup_phy_link_generic(phy_ops_82599_group0); ldv_state_variable_32 = 3; } else { } if (ldv_state_variable_32 == 2) { ixgbe_setup_phy_link_generic(phy_ops_82599_group0); ldv_state_variable_32 = 2; } else { } goto ldv_56026; case 6: ; if (ldv_state_variable_32 == 1) { ixgbe_write_i2c_eeprom_generic(phy_ops_82599_group0, (int )ldvarg23, (int )ldvarg22); ldv_state_variable_32 = 1; } else { } if (ldv_state_variable_32 == 3) { ixgbe_write_i2c_eeprom_generic(phy_ops_82599_group0, (int )ldvarg23, (int )ldvarg22); ldv_state_variable_32 = 3; } else { } if (ldv_state_variable_32 == 2) { ixgbe_write_i2c_eeprom_generic(phy_ops_82599_group0, (int )ldvarg23, (int )ldvarg22); ldv_state_variable_32 = 2; } else { } goto ldv_56026; case 7: ; if (ldv_state_variable_32 == 1) { ixgbe_setup_phy_link_speed_generic(phy_ops_82599_group0, ldvarg21, (int )ldvarg20); ldv_state_variable_32 = 1; } else { } if (ldv_state_variable_32 == 3) { ixgbe_setup_phy_link_speed_generic(phy_ops_82599_group0, ldvarg21, (int )ldvarg20); ldv_state_variable_32 = 3; } else { } if (ldv_state_variable_32 == 2) { ixgbe_setup_phy_link_speed_generic(phy_ops_82599_group0, ldvarg21, (int )ldvarg20); ldv_state_variable_32 = 2; } else { } goto ldv_56026; case 8: ; if (ldv_state_variable_32 == 1) { ixgbe_write_phy_reg_generic(phy_ops_82599_group0, ldvarg19, ldvarg18, (int )ldvarg17); ldv_state_variable_32 = 1; } else { } if (ldv_state_variable_32 == 3) { ixgbe_write_phy_reg_generic(phy_ops_82599_group0, ldvarg19, ldvarg18, (int )ldvarg17); ldv_state_variable_32 = 3; } else { } if (ldv_state_variable_32 == 2) { ixgbe_write_phy_reg_generic(phy_ops_82599_group0, ldvarg19, ldvarg18, (int )ldvarg17); ldv_state_variable_32 = 2; } else { } goto ldv_56026; case 9: ; if (ldv_state_variable_32 == 1) { ixgbe_identify_module_generic(phy_ops_82599_group0); ldv_state_variable_32 = 1; } else { } if (ldv_state_variable_32 == 3) { ixgbe_identify_module_generic(phy_ops_82599_group0); ldv_state_variable_32 = 3; } else { } if (ldv_state_variable_32 == 2) { ixgbe_identify_module_generic(phy_ops_82599_group0); ldv_state_variable_32 = 2; } else { } goto ldv_56026; case 10: ; if (ldv_state_variable_32 == 1) { ixgbe_read_phy_reg_generic(phy_ops_82599_group0, ldvarg16, ldvarg15, ldvarg14); ldv_state_variable_32 = 1; } else { } if (ldv_state_variable_32 == 3) { ixgbe_read_phy_reg_generic(phy_ops_82599_group0, ldvarg16, ldvarg15, ldvarg14); ldv_state_variable_32 = 3; } else { } if (ldv_state_variable_32 == 2) { ixgbe_read_phy_reg_generic(phy_ops_82599_group0, ldvarg16, ldvarg15, ldvarg14); ldv_state_variable_32 = 2; } else { } goto ldv_56026; case 11: ; if (ldv_state_variable_32 == 1) { ixgbe_tn_check_overtemp(phy_ops_82599_group0); ldv_state_variable_32 = 1; } else { } if (ldv_state_variable_32 == 3) { ixgbe_tn_check_overtemp(phy_ops_82599_group0); ldv_state_variable_32 = 3; } else { } if (ldv_state_variable_32 == 2) { ixgbe_tn_check_overtemp(phy_ops_82599_group0); ldv_state_variable_32 = 2; } else { } goto ldv_56026; case 12: ; if (ldv_state_variable_32 == 1) { ixgbe_read_i2c_byte_generic(phy_ops_82599_group0, (int )ldvarg13, (int )ldvarg12, ldvarg11); ldv_state_variable_32 = 1; } else { } if (ldv_state_variable_32 == 3) { ixgbe_read_i2c_byte_generic(phy_ops_82599_group0, (int )ldvarg13, (int )ldvarg12, ldvarg11); ldv_state_variable_32 = 3; } else { } if (ldv_state_variable_32 == 2) { ixgbe_read_i2c_byte_generic(phy_ops_82599_group0, (int )ldvarg13, (int )ldvarg12, ldvarg11); ldv_state_variable_32 = 2; } else { } goto ldv_56026; case 13: ; if (ldv_state_variable_32 == 2) { ixgbe_init_phy_ops_82599(phy_ops_82599_group0); ldv_state_variable_32 = 3; } else { } goto ldv_56026; case 14: ; if (ldv_state_variable_32 == 1) { ldv_setup_32(); ldv_state_variable_32 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_56026; case 15: ; if (ldv_state_variable_32 == 3) { ldv_release_32(); ldv_state_variable_32 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_32 == 2) { ldv_release_32(); ldv_state_variable_32 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_56026; default: ldv_stop(); } ldv_56026: ; return; } } void ldv_main_exported_34(void) { u32 ldvarg505 ; int ldvarg537 ; u32 ldvarg513 ; bool *ldvarg511 ; void *tmp ; bool ldvarg538 ; u32 ldvarg551 ; u8 *ldvarg503 ; void *tmp___0 ; int ldvarg532 ; u32 ldvarg528 ; u32 *ldvarg545 ; void *tmp___1 ; bool ldvarg548 ; u32 ldvarg530 ; u16 *ldvarg525 ; void *tmp___2 ; u16 *ldvarg526 ; void *tmp___3 ; u32 ldvarg531 ; bool ldvarg523 ; u8 *ldvarg517 ; void *tmp___4 ; struct net_device *ldvarg527 ; void *tmp___5 ; u32 ldvarg544 ; u32 ldvarg549 ; bool ldvarg510 ; u8 *ldvarg521 ; void *tmp___6 ; u32 ldvarg502 ; u8 ldvarg508 ; bool ldvarg533 ; u32 ldvarg519 ; ixgbe_link_speed *ldvarg516 ; void *tmp___7 ; bool *ldvarg546 ; void *tmp___8 ; u32 ldvarg547 ; u32 ldvarg529 ; u32 ldvarg504 ; u8 ldvarg507 ; u32 ldvarg552 ; u32 ldvarg520 ; u32 ldvarg542 ; u8 *ldvarg514 ; void *tmp___9 ; u32 ldvarg550 ; u8 ldvarg509 ; int ldvarg534 ; bool *ldvarg515 ; void *tmp___10 ; u8 ldvarg506 ; u16 *ldvarg501 ; void *tmp___11 ; u32 ldvarg540 ; ixgbe_link_speed ldvarg524 ; ixgbe_link_speed *ldvarg512 ; void *tmp___12 ; u32 ldvarg541 ; u8 ldvarg518 ; bool ldvarg543 ; u32 ldvarg522 ; u32 ldvarg535 ; int ldvarg536 ; u32 ldvarg539 ; int tmp___13 ; { tmp = ldv_init_zalloc(1UL); ldvarg511 = (bool *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg503 = (u8 *)tmp___0; tmp___1 = ldv_init_zalloc(4UL); ldvarg545 = (u32 *)tmp___1; tmp___2 = ldv_init_zalloc(2UL); ldvarg525 = (u16 *)tmp___2; tmp___3 = ldv_init_zalloc(2UL); ldvarg526 = (u16 *)tmp___3; tmp___4 = ldv_init_zalloc(1UL); ldvarg517 = (u8 *)tmp___4; tmp___5 = ldv_init_zalloc(3008UL); ldvarg527 = (struct net_device *)tmp___5; tmp___6 = ldv_init_zalloc(1UL); ldvarg521 = (u8 *)tmp___6; tmp___7 = ldv_init_zalloc(4UL); ldvarg516 = (ixgbe_link_speed *)tmp___7; tmp___8 = ldv_init_zalloc(1UL); ldvarg546 = (bool *)tmp___8; tmp___9 = ldv_init_zalloc(1UL); ldvarg514 = (u8 *)tmp___9; tmp___10 = ldv_init_zalloc(1UL); ldvarg515 = (bool *)tmp___10; tmp___11 = ldv_init_zalloc(2UL); ldvarg501 = (u16 *)tmp___11; tmp___12 = ldv_init_zalloc(4UL); ldvarg512 = (ixgbe_link_speed *)tmp___12; ldv_memset((void *)(& ldvarg505), 0, 4UL); ldv_memset((void *)(& ldvarg537), 0, 4UL); ldv_memset((void *)(& ldvarg513), 0, 4UL); ldv_memset((void *)(& ldvarg538), 0, 1UL); ldv_memset((void *)(& ldvarg551), 0, 4UL); ldv_memset((void *)(& ldvarg532), 0, 4UL); ldv_memset((void *)(& ldvarg528), 0, 4UL); ldv_memset((void *)(& ldvarg548), 0, 1UL); ldv_memset((void *)(& ldvarg530), 0, 4UL); ldv_memset((void *)(& ldvarg531), 0, 4UL); ldv_memset((void *)(& ldvarg523), 0, 1UL); ldv_memset((void *)(& ldvarg544), 0, 4UL); ldv_memset((void *)(& ldvarg549), 0, 4UL); ldv_memset((void *)(& ldvarg510), 0, 1UL); ldv_memset((void *)(& ldvarg502), 0, 4UL); ldv_memset((void *)(& ldvarg508), 0, 1UL); ldv_memset((void *)(& ldvarg533), 0, 1UL); ldv_memset((void *)(& ldvarg519), 0, 4UL); ldv_memset((void *)(& ldvarg547), 0, 4UL); ldv_memset((void *)(& ldvarg529), 0, 4UL); ldv_memset((void *)(& ldvarg504), 0, 4UL); ldv_memset((void *)(& ldvarg507), 0, 1UL); ldv_memset((void *)(& ldvarg552), 0, 4UL); ldv_memset((void *)(& ldvarg520), 0, 4UL); ldv_memset((void *)(& ldvarg542), 0, 4UL); ldv_memset((void *)(& ldvarg550), 0, 4UL); ldv_memset((void *)(& ldvarg509), 0, 1UL); ldv_memset((void *)(& ldvarg534), 0, 4UL); ldv_memset((void *)(& ldvarg506), 0, 1UL); ldv_memset((void *)(& ldvarg540), 0, 4UL); ldv_memset((void *)(& ldvarg524), 0, 4UL); ldv_memset((void *)(& ldvarg541), 0, 4UL); ldv_memset((void *)(& ldvarg518), 0, 1UL); ldv_memset((void *)(& ldvarg543), 0, 1UL); ldv_memset((void *)(& ldvarg522), 0, 4UL); ldv_memset((void *)(& ldvarg535), 0, 4UL); ldv_memset((void *)(& ldvarg536), 0, 4UL); ldv_memset((void *)(& ldvarg539), 0, 4UL); tmp___13 = __VERIFIER_nondet_int(); switch (tmp___13) { case 0: ; if (ldv_state_variable_34 == 1) { ixgbe_stop_adapter_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_stop_adapter_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 1: ; if (ldv_state_variable_34 == 1) { ixgbe_set_vmdq_san_mac_generic(mac_ops_82599_group0, ldvarg552); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_set_vmdq_san_mac_generic(mac_ops_82599_group0, ldvarg552); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 2: ; if (ldv_state_variable_34 == 1) { ixgbe_led_off_generic(mac_ops_82599_group0, ldvarg551); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_led_off_generic(mac_ops_82599_group0, ldvarg551); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 3: ; if (ldv_state_variable_34 == 1) { ixgbe_set_vfta_generic(mac_ops_82599_group0, ldvarg550, ldvarg549, (int )ldvarg548); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_set_vfta_generic(mac_ops_82599_group0, ldvarg550, ldvarg549, (int )ldvarg548); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 4: ; if (ldv_state_variable_34 == 1) { ixgbe_stop_mac_link_on_d3_82599(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_stop_mac_link_on_d3_82599(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 5: ; if (ldv_state_variable_34 == 1) { ixgbe_enable_rx_dma_82599(mac_ops_82599_group0, ldvarg547); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_enable_rx_dma_82599(mac_ops_82599_group0, ldvarg547); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 6: ; if (ldv_state_variable_34 == 2) { prot_autoc_read_82599(mac_ops_82599_group0, ldvarg546, ldvarg545); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 7: ; if (ldv_state_variable_34 == 1) { ixgbe_enable_rx_buff_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_enable_rx_buff_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 8: ; if (ldv_state_variable_34 == 2) { prot_autoc_write_82599(mac_ops_82599_group0, ldvarg544, (int )ldvarg543); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 9: ; if (ldv_state_variable_34 == 1) { ixgbe_led_on_generic(mac_ops_82599_group0, ldvarg542); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_led_on_generic(mac_ops_82599_group0, ldvarg542); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 10: ; if (ldv_state_variable_34 == 1) { ixgbe_clear_rar_generic(mac_ops_82599_group0, ldvarg541); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_clear_rar_generic(mac_ops_82599_group0, ldvarg541); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 11: ; if (ldv_state_variable_34 == 1) { ixgbe_blink_led_stop_generic(mac_ops_82599_group0, ldvarg540); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_blink_led_stop_generic(mac_ops_82599_group0, ldvarg540); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 12: ; if (ldv_state_variable_34 == 1) { ixgbe_enable_rx_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_enable_rx_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 13: ; if (ldv_state_variable_34 == 1) { ixgbe_get_bus_info_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_get_bus_info_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 14: ; if (ldv_state_variable_34 == 1) { ixgbe_blink_led_start_generic(mac_ops_82599_group0, ldvarg539); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_blink_led_start_generic(mac_ops_82599_group0, ldvarg539); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 15: ; if (ldv_state_variable_34 == 1) { ixgbe_disable_mc_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_disable_mc_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 16: ; if (ldv_state_variable_34 == 1) { ixgbe_set_vlan_anti_spoofing(mac_ops_82599_group0, (int )ldvarg538, ldvarg537); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_set_vlan_anti_spoofing(mac_ops_82599_group0, (int )ldvarg538, ldvarg537); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 17: ; if (ldv_state_variable_34 == 1) { ixgbe_set_rxpba_generic(mac_ops_82599_group0, ldvarg536, ldvarg535, ldvarg534); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_set_rxpba_generic(mac_ops_82599_group0, ldvarg536, ldvarg535, ldvarg534); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 18: ; if (ldv_state_variable_34 == 1) { ixgbe_init_uta_tables_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_init_uta_tables_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 19: ; if (ldv_state_variable_34 == 1) { ixgbe_set_mac_anti_spoofing(mac_ops_82599_group0, (int )ldvarg533, ldvarg532); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_set_mac_anti_spoofing(mac_ops_82599_group0, (int )ldvarg533, ldvarg532); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 20: ; if (ldv_state_variable_34 == 1) { ixgbe_clear_vmdq_generic(mac_ops_82599_group0, ldvarg531, ldvarg530); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_clear_vmdq_generic(mac_ops_82599_group0, ldvarg531, ldvarg530); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 21: ; if (ldv_state_variable_34 == 1) { ixgbe_set_vmdq_generic(mac_ops_82599_group0, ldvarg529, ldvarg528); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_set_vmdq_generic(mac_ops_82599_group0, ldvarg529, ldvarg528); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 22: ; if (ldv_state_variable_34 == 1) { ixgbe_clear_vfta_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_clear_vfta_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 23: ; if (ldv_state_variable_34 == 1) { ixgbe_get_media_type_82599(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_get_media_type_82599(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 24: ; if (ldv_state_variable_34 == 1) { ixgbe_update_mc_addr_list_generic(mac_ops_82599_group0, ldvarg527); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_update_mc_addr_list_generic(mac_ops_82599_group0, ldvarg527); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 25: ; if (ldv_state_variable_34 == 1) { ixgbe_get_thermal_sensor_data_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_get_thermal_sensor_data_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 26: ; if (ldv_state_variable_34 == 1) { ixgbe_init_rx_addrs_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_init_rx_addrs_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 27: ; if (ldv_state_variable_34 == 1) { ixgbe_fc_enable_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_fc_enable_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 28: ; if (ldv_state_variable_34 == 1) { ixgbe_get_wwn_prefix_generic(mac_ops_82599_group0, ldvarg526, ldvarg525); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_get_wwn_prefix_generic(mac_ops_82599_group0, ldvarg526, ldvarg525); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 29: ; if (ldv_state_variable_34 == 1) { ixgbe_clear_hw_cntrs_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_clear_hw_cntrs_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 30: ; if (ldv_state_variable_34 == 1) { ixgbe_setup_mac_link_82599(mac_ops_82599_group0, ldvarg524, (int )ldvarg523); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_setup_mac_link_82599(mac_ops_82599_group0, ldvarg524, (int )ldvarg523); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 31: ; if (ldv_state_variable_34 == 1) { ixgbe_read_analog_reg8_82599(mac_ops_82599_group0, ldvarg522, ldvarg521); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_read_analog_reg8_82599(mac_ops_82599_group0, ldvarg522, ldvarg521); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 32: ; if (ldv_state_variable_34 == 1) { ixgbe_disable_rx_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_disable_rx_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 33: ; if (ldv_state_variable_34 == 1) { ixgbe_acquire_swfw_sync(mac_ops_82599_group0, ldvarg520); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_acquire_swfw_sync(mac_ops_82599_group0, ldvarg520); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 34: ; if (ldv_state_variable_34 == 1) { ixgbe_set_lan_id_multi_port_pcie(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_set_lan_id_multi_port_pcie(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 35: ; if (ldv_state_variable_34 == 1) { ixgbe_start_hw_82599(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_start_hw_82599(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 36: ; if (ldv_state_variable_34 == 1) { ixgbe_write_analog_reg8_82599(mac_ops_82599_group0, ldvarg519, (int )ldvarg518); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_write_analog_reg8_82599(mac_ops_82599_group0, ldvarg519, (int )ldvarg518); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 37: ; if (ldv_state_variable_34 == 1) { ixgbe_enable_mc_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_enable_mc_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 38: ; if (ldv_state_variable_34 == 1) { ixgbe_get_mac_addr_generic(mac_ops_82599_group0, ldvarg517); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_get_mac_addr_generic(mac_ops_82599_group0, ldvarg517); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 39: ; if (ldv_state_variable_34 == 1) { ixgbe_get_link_capabilities_82599(mac_ops_82599_group0, ldvarg516, ldvarg515); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_get_link_capabilities_82599(mac_ops_82599_group0, ldvarg516, ldvarg515); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 40: ; if (ldv_state_variable_34 == 1) { ixgbe_get_san_mac_addr_generic(mac_ops_82599_group0, ldvarg514); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_get_san_mac_addr_generic(mac_ops_82599_group0, ldvarg514); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 41: ; if (ldv_state_variable_34 == 1) { ixgbe_init_thermal_sensor_thresh_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_init_thermal_sensor_thresh_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 42: ; if (ldv_state_variable_34 == 1) { ixgbe_init_hw_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_init_hw_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 43: ; if (ldv_state_variable_34 == 1) { ixgbe_reset_hw_82599(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_reset_hw_82599(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 44: ; if (ldv_state_variable_34 == 1) { ixgbe_release_swfw_sync(mac_ops_82599_group0, ldvarg513); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_release_swfw_sync(mac_ops_82599_group0, ldvarg513); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 45: ; if (ldv_state_variable_34 == 1) { ixgbe_check_mac_link_generic(mac_ops_82599_group0, ldvarg512, ldvarg511, (int )ldvarg510); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_check_mac_link_generic(mac_ops_82599_group0, ldvarg512, ldvarg511, (int )ldvarg510); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 46: ; if (ldv_state_variable_34 == 1) { ixgbe_set_fw_drv_ver_generic(mac_ops_82599_group0, (int )ldvarg508, (int )ldvarg507, (int )ldvarg506, (int )ldvarg509); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_set_fw_drv_ver_generic(mac_ops_82599_group0, (int )ldvarg508, (int )ldvarg507, (int )ldvarg506, (int )ldvarg509); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 47: ; if (ldv_state_variable_34 == 1) { ixgbe_set_rar_generic(mac_ops_82599_group0, ldvarg504, ldvarg503, ldvarg502, ldvarg505); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_set_rar_generic(mac_ops_82599_group0, ldvarg504, ldvarg503, ldvarg502, ldvarg505); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 48: ; if (ldv_state_variable_34 == 1) { ixgbe_setup_sfp_modules_82599(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_setup_sfp_modules_82599(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 49: ; if (ldv_state_variable_34 == 1) { ixgbe_get_device_caps_generic(mac_ops_82599_group0, ldvarg501); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_get_device_caps_generic(mac_ops_82599_group0, ldvarg501); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 50: ; if (ldv_state_variable_34 == 1) { ixgbe_disable_rx_buff_generic(mac_ops_82599_group0); ldv_state_variable_34 = 1; } else { } if (ldv_state_variable_34 == 2) { ixgbe_disable_rx_buff_generic(mac_ops_82599_group0); ldv_state_variable_34 = 2; } else { } goto ldv_56099; case 51: ; if (ldv_state_variable_34 == 2) { ldv_release_34(); ldv_state_variable_34 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_56099; case 52: ; if (ldv_state_variable_34 == 1) { ldv_probe_34(); ldv_state_variable_34 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_56099; default: ldv_stop(); } ldv_56099: ; return; } } void ldv_main_exported_31(void) { struct ixgbe_hw *ldvarg324 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1696UL); ldvarg324 = (struct ixgbe_hw *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_31 == 1) { ixgbe_get_invariants_82599(ldvarg324); ldv_state_variable_31 = 1; } else { } goto ldv_56158; default: ldv_stop(); } ldv_56158: ; return; } } bool ldv_queue_work_on_207(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_208(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_209(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_210(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_211(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_217(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_223(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_225(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_227(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_228(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_229(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_230(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_231(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_232(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_233(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_234(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_254(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_256(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_255(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_258(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_257(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_264(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_281(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_272(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_280(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_274(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_270(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_278(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_279(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_275(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_276(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_277(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw , u32 reg_addr , u32 device_type , u16 *phy_data ) ; s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw , u32 reg_addr , u32 device_type , u16 phy_data ) ; s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw ) ; static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) ; static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw , u8 byte_offset , u8 *eeprom_data ) ; static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw ) { u32 gcr ; u32 tmp ; u16 pcie_devctl2 ; bool tmp___0 ; { tmp = ixgbe_read_reg(hw, 69632U); gcr = tmp; tmp___0 = ixgbe_removed((void *)hw->hw_addr); if ((int )tmp___0) { return; } else { } if ((gcr & 61440U) != 0U) { goto out; } else { } if ((gcr & 262144U) == 0U) { gcr = gcr | 4096U; goto out; } else { } pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, 200U); pcie_devctl2 = (u16 )((unsigned int )pcie_devctl2 | 5U); ixgbe_write_pci_cfg_word(hw, 200U, (int )pcie_devctl2); out: gcr = gcr & 4294901759U; ixgbe_write_reg(hw, 69632U, gcr); return; } } static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw ) { struct ixgbe_mac_info *mac ; { mac = & hw->mac; ixgbe_identify_phy_generic(hw); mac->mcft_size = 128U; mac->vft_size = 128U; mac->num_rar_entries = 16U; mac->rx_pb_size = 512U; mac->max_rx_queues = 64U; mac->max_tx_queues = 32U; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); return (0); } } static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw ) { struct ixgbe_mac_info *mac ; struct ixgbe_phy_info *phy ; s32 ret_val ; u16 list_offset ; u16 data_offset ; enum ixgbe_media_type tmp ; { mac = & hw->mac; phy = & hw->phy; (*(phy->ops.identify))(hw); tmp = (*(mac->ops.get_media_type))(hw); if ((unsigned int )tmp == 4U) { mac->ops.setup_link = & ixgbe_setup_copper_link_82598; mac->ops.get_link_capabilities = & ixgbe_get_copper_link_capabilities_generic; } else { } switch ((unsigned int )hw->phy.type) { case 2U: phy->ops.setup_link = & ixgbe_setup_phy_link_tnx; phy->ops.check_link = & ixgbe_check_phy_link_tnx; phy->ops.get_firmware_version = & ixgbe_get_phy_firmware_version_tnx; goto ldv_55458; case 10U: phy->ops.reset = & ixgbe_reset_phy_nl; ret_val = (*(phy->ops.identify_sfp))(hw); if (ret_val != 0) { return (ret_val); } else { } if ((unsigned int )hw->phy.sfp_type == 65535U) { return (-19); } else { } ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, & list_offset, & data_offset); if (ret_val != 0) { return (-19); } else { } goto ldv_55458; default: ; goto ldv_55458; } ldv_55458: ; return (0); } } static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw ) { u32 regval ; u32 i ; s32 ret_val ; { ret_val = ixgbe_start_hw_generic(hw); i = 0U; goto ldv_55468; ldv_55467: regval = ixgbe_read_reg(hw, (i + 7296U) * 4U); regval = regval & 4294965247U; ixgbe_write_reg(hw, (i + 7296U) * 4U, regval); i = i + 1U; ldv_55468: ; if (hw->mac.max_tx_queues > i && i <= 15U) { goto ldv_55467; } else { } i = 0U; goto ldv_55471; ldv_55470: regval = ixgbe_read_reg(hw, i <= 15U ? (i + 2176U) * 4U : (i <= 63U ? i * 64U + 4108U : (i + 67108800U) * 64U + 53260U)); regval = regval & 4294926335U; ixgbe_write_reg(hw, i <= 15U ? (i + 2176U) * 4U : (i <= 63U ? i * 64U + 4108U : (i + 67108800U) * 64U + 53260U), regval); i = i + 1U; ldv_55471: ; if (hw->mac.max_rx_queues > i && i <= 15U) { goto ldv_55470; } else { } if (ret_val != 0) { return (ret_val); } else { } ixgbe_set_pcie_completion_timeout(hw); return (0); } } static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw , ixgbe_link_speed *speed , bool *autoneg ) { u32 autoc ; { autoc = 0U; if ((int )hw->mac.orig_link_settings_stored) { autoc = hw->mac.orig_autoc; } else { autoc = ixgbe_read_reg(hw, 17056U); } switch (autoc & 57344U) { case 0U: *speed = 32U; *autoneg = 0; goto ldv_55480; case 8192U: *speed = 128U; *autoneg = 0; goto ldv_55480; case 16384U: *speed = 32U; *autoneg = 1; goto ldv_55480; case 32768U: ; case 49152U: *speed = 0U; if ((int )autoc < 0) { *speed = *speed | 128U; } else { } if ((autoc & 1073741824U) != 0U) { *speed = *speed | 32U; } else { } *autoneg = 1; goto ldv_55480; default: ; return (-8); } ldv_55480: ; return (0); } } static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw ) { { switch ((unsigned int )hw->phy.type) { case 7U: ; case 2U: ; return (4); default: ; goto ldv_55492; } ldv_55492: ; switch ((int )hw->device_id) { case 4278: ; case 5384: ; return (5); case 4294: ; case 4295: ; case 4337: ; case 4321: ; case 4340: ; case 4315: ; return (1); case 4317: ; case 4332: ; return (6); case 4296: ; case 5387: ; return (4); default: ; return (0); } } } static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw ) { u32 fctrl_reg ; u32 rmcs_reg ; u32 reg ; u32 fcrtl ; u32 fcrth ; u32 link_speed ; int i ; bool link_up ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; { link_speed = 0U; if ((unsigned int )hw->fc.pause_time == 0U) { return (-13); } else { } i = 0; goto ldv_55520; ldv_55519: ; if (((unsigned int )hw->fc.current_mode & 2U) != 0U && hw->fc.high_water[i] != 0U) { if (hw->fc.low_water[i] == 0U || hw->fc.low_water[i] >= hw->fc.high_water[i]) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_fc_enable_82598"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c"; descriptor.format = "Invalid water mark configuration\n"; descriptor.lineno = 343U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Invalid water mark configuration\n"); } else { } return (-13); } else { } } else { } i = i + 1; ldv_55520: ; if (i <= 7) { goto ldv_55519; } else { } (*(hw->mac.ops.check_link))(hw, & link_speed, & link_up, 0); if ((int )link_up && link_speed == 32U) { switch ((unsigned int )hw->fc.requested_mode) { case 3U: hw->fc.requested_mode = 2; goto ldv_55523; case 1U: hw->fc.requested_mode = 0; goto ldv_55523; default: ; goto ldv_55523; } ldv_55523: ; } else { } ixgbe_fc_autoneg(hw); fctrl_reg = ixgbe_read_reg(hw, 20608U); fctrl_reg = fctrl_reg & 4294918143U; rmcs_reg = ixgbe_read_reg(hw, 15616U); rmcs_reg = rmcs_reg & 4294967271U; switch ((unsigned int )hw->fc.current_mode) { case 0U: ; goto ldv_55527; case 1U: fctrl_reg = fctrl_reg | 32768U; goto ldv_55527; case 2U: rmcs_reg = rmcs_reg | 8U; goto ldv_55527; case 3U: fctrl_reg = fctrl_reg | 32768U; rmcs_reg = rmcs_reg | 8U; goto ldv_55527; default: descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_fc_enable_82598"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c"; descriptor___0.format = "Flow control param set incorrectly\n"; descriptor___0.lineno = 420U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flow control param set incorrectly\n"); } else { } return (-4); } ldv_55527: fctrl_reg = fctrl_reg | 8192U; ixgbe_write_reg(hw, 20608U, fctrl_reg); ixgbe_write_reg(hw, 15616U, rmcs_reg); i = 0; goto ldv_55534; ldv_55533: ; if (((unsigned int )hw->fc.current_mode & 2U) != 0U && hw->fc.high_water[i] != 0U) { fcrtl = (hw->fc.low_water[i] << 10) | 2147483648U; fcrth = (hw->fc.high_water[i] << 10) | 2147483648U; ixgbe_write_reg(hw, (u32 )((i + 1604) * 8), fcrtl); ixgbe_write_reg(hw, (u32 )((i + 1612) * 8), fcrth); } else { ixgbe_write_reg(hw, (u32 )((i + 1604) * 8), 0U); ixgbe_write_reg(hw, (u32 )((i + 1612) * 8), 0U); } i = i + 1; ldv_55534: ; if (i <= 7) { goto ldv_55533; } else { } reg = (u32 )((int )hw->fc.pause_time * 65537); i = 0; goto ldv_55537; ldv_55536: ixgbe_write_reg(hw, (u32 )((i + 3200) * 4), reg); i = i + 1; ldv_55537: ; if (i <= 3) { goto ldv_55536; } else { } ixgbe_write_reg(hw, 12960U, (unsigned int )hw->fc.pause_time / 2U); return (0); } } static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw , bool autoneg_wait_to_complete ) { u32 autoc_reg ; u32 links_reg ; u32 i ; s32 status ; struct _ddebug descriptor ; long tmp ; { status = 0; autoc_reg = ixgbe_read_reg(hw, 17056U); autoc_reg = autoc_reg | 4096U; ixgbe_write_reg(hw, 17056U, autoc_reg); if ((int )autoneg_wait_to_complete) { if ((autoc_reg & 57344U) == 32768U || (autoc_reg & 57344U) == 49152U) { links_reg = 0U; i = 0U; goto ldv_55549; ldv_55548: links_reg = ixgbe_read_reg(hw, 17060U); if ((int )links_reg < 0) { goto ldv_55547; } else { } msleep(100U); i = i + 1U; ldv_55549: ; if (i <= 44U) { goto ldv_55548; } else { } ldv_55547: ; if ((int )links_reg >= 0) { status = -14; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_start_mac_link_82598"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c"; descriptor.format = "Autonegotiation did not complete.\n"; descriptor.lineno = 490U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Autonegotiation did not complete.\n"); } else { } } else { } } else { } } else { } msleep(50U); return (status); } } static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw ) { u32 timeout ; u16 an_reg ; struct _ddebug descriptor ; long tmp ; { if ((unsigned int )hw->device_id != 5387U) { return (0); } else { } timeout = 0U; goto ldv_55559; ldv_55558: (*(hw->phy.ops.read_reg))(hw, 1U, 7U, & an_reg); if (((int )an_reg & 32) != 0 && ((int )an_reg & 4) != 0) { goto ldv_55557; } else { } msleep(100U); timeout = timeout + 1U; ldv_55559: ; if (timeout <= 49U) { goto ldv_55558; } else { } ldv_55557: ; if (timeout == 50U) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_validate_link_ready"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c"; descriptor.format = "Link was indicated but link is down\n"; descriptor.lineno = 528U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Link was indicated but link is down\n"); } else { } return (-8); } else { } return (0); } } static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw , ixgbe_link_speed *speed , bool *link_up , bool link_up_wait_to_complete ) { u32 links_reg ; u32 i ; u16 link_reg ; u16 adapt_comp_reg ; s32 tmp ; { if ((unsigned int )hw->phy.type == 10U) { (*(hw->phy.ops.read_reg))(hw, 51103U, 1U, & link_reg); (*(hw->phy.ops.read_reg))(hw, 51103U, 1U, & link_reg); (*(hw->phy.ops.read_reg))(hw, 49164U, 1U, & adapt_comp_reg); if ((int )link_up_wait_to_complete) { i = 0U; goto ldv_55574; ldv_55573: ; if ((int )link_reg & 1 && ((int )adapt_comp_reg & 1) == 0) { *link_up = 1; goto ldv_55572; } else { *link_up = 0; } msleep(100U); (*(hw->phy.ops.read_reg))(hw, 51103U, 1U, & link_reg); (*(hw->phy.ops.read_reg))(hw, 49164U, 1U, & adapt_comp_reg); i = i + 1U; ldv_55574: ; if (i <= 89U) { goto ldv_55573; } else { } ldv_55572: ; } else if ((int )link_reg & 1 && ((int )adapt_comp_reg & 1) == 0) { *link_up = 1; } else { *link_up = 0; } if (! *link_up) { return (0); } else { } } else { } links_reg = ixgbe_read_reg(hw, 17060U); if ((int )link_up_wait_to_complete) { i = 0U; goto ldv_55577; ldv_55576: ; if ((links_reg & 1073741824U) != 0U) { *link_up = 1; goto ldv_55575; } else { *link_up = 0; } msleep(100U); links_reg = ixgbe_read_reg(hw, 17060U); i = i + 1U; ldv_55577: ; if (i <= 89U) { goto ldv_55576; } else { } ldv_55575: ; } else if ((links_reg & 1073741824U) != 0U) { *link_up = 1; } else { *link_up = 0; } if ((links_reg & 536870912U) != 0U) { *speed = 128U; } else { *speed = 32U; } if ((unsigned int )hw->device_id == 5387U && (int )*link_up) { tmp = ixgbe_validate_link_ready(hw); if (tmp != 0) { *link_up = 0; } else { } } else { } return (0); } } static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) { bool autoneg ; ixgbe_link_speed link_capabilities ; u32 curr_autoc ; u32 tmp ; u32 autoc ; u32 link_mode ; s32 tmp___0 ; { autoneg = 0; link_capabilities = 0U; tmp = ixgbe_read_reg(hw, 17056U); curr_autoc = tmp; autoc = curr_autoc; link_mode = autoc & 57344U; ixgbe_get_link_capabilities_82598(hw, & link_capabilities, & autoneg); speed = speed & link_capabilities; if (speed == 0U) { return (-8); } else if (link_mode == 32768U || link_mode == 49152U) { autoc = autoc & 1073741823U; if ((speed & 128U) != 0U) { autoc = autoc | 2147483648U; } else { } if ((speed & 32U) != 0U) { autoc = autoc | 1073741824U; } else { } if (autoc != curr_autoc) { ixgbe_write_reg(hw, 17056U, autoc); } else { } } else { } tmp___0 = ixgbe_start_mac_link_82598(hw, (int )autoneg_wait_to_complete); return (tmp___0); } } static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) { s32 status ; { status = (*(hw->phy.ops.setup_link_speed))(hw, speed, (int )autoneg_wait_to_complete); ixgbe_start_mac_link_82598(hw, (int )autoneg_wait_to_complete); return (status); } } static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw ) { s32 status ; s32 phy_status ; u32 ctrl ; u32 gheccr ; u32 i ; u32 autoc ; u8 analog_val ; u32 tmp ; struct _ddebug descriptor ; long tmp___0 ; { phy_status = 0; status = (*(hw->mac.ops.stop_adapter))(hw); if (status != 0) { return (status); } else { } (*(hw->mac.ops.read_analog_reg8))(hw, 36U, & analog_val); if (((int )analog_val & 16) != 0) { (*(hw->mac.ops.read_analog_reg8))(hw, 36U, & analog_val); analog_val = (unsigned int )analog_val & 239U; (*(hw->mac.ops.write_analog_reg8))(hw, 36U, (int )analog_val); (*(hw->mac.ops.read_analog_reg8))(hw, 11U, & analog_val); analog_val = (unsigned int )analog_val & 15U; (*(hw->mac.ops.write_analog_reg8))(hw, 11U, (int )analog_val); (*(hw->mac.ops.read_analog_reg8))(hw, 12U, & analog_val); analog_val = (unsigned int )analog_val & 15U; (*(hw->mac.ops.write_analog_reg8))(hw, 12U, (int )analog_val); (*(hw->mac.ops.read_analog_reg8))(hw, 13U, & analog_val); analog_val = (unsigned int )analog_val & 15U; (*(hw->mac.ops.write_analog_reg8))(hw, 13U, (int )analog_val); } else { } if (! hw->phy.reset_disable) { phy_status = (*(hw->phy.ops.init))(hw); if (phy_status == -19) { return (phy_status); } else { } if (phy_status == -20) { goto mac_reset_top; } else { } (*(hw->phy.ops.reset))(hw); } else { } mac_reset_top: tmp = ixgbe_read_reg(hw, 0U); ctrl = tmp | 67108864U; ixgbe_write_reg(hw, 0U, ctrl); ixgbe_read_reg(hw, 8U); i = 0U; goto ldv_55607; ldv_55606: __const_udelay(4295UL); ctrl = ixgbe_read_reg(hw, 0U); if ((ctrl & 67108864U) == 0U) { goto ldv_55605; } else { } i = i + 1U; ldv_55607: ; if (i <= 9U) { goto ldv_55606; } else { } ldv_55605: ; if ((ctrl & 67108864U) != 0U) { status = -15; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_reset_hw_82598"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c"; descriptor.format = "Reset polling failed to complete.\n"; descriptor.lineno = 778U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Reset polling failed to complete.\n"); } else { } } else { } msleep(50U); if ((int )hw->mac.flags & 1) { hw->mac.flags = (unsigned int )hw->mac.flags & 254U; goto mac_reset_top; } else { } gheccr = ixgbe_read_reg(hw, 69808U); gheccr = gheccr & 4292607423U; ixgbe_write_reg(hw, 69808U, gheccr); autoc = ixgbe_read_reg(hw, 17056U); if (! hw->mac.orig_link_settings_stored) { hw->mac.orig_autoc = autoc; hw->mac.orig_link_settings_stored = 1; } else if (hw->mac.orig_autoc != autoc) { ixgbe_write_reg(hw, 17056U, hw->mac.orig_autoc); } else { } (*(hw->mac.ops.get_mac_addr))(hw, (u8 *)(& hw->mac.perm_addr)); (*(hw->mac.ops.init_rx_addrs))(hw); if (phy_status != 0) { status = phy_status; } else { } return (status); } } static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw , u32 rar , u32 vmdq ) { u32 rar_high ; u32 rar_entries ; struct _ddebug descriptor ; long tmp ; { rar_entries = hw->mac.num_rar_entries; if (rar >= rar_entries) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_set_vmdq_82598"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c"; descriptor.format = "RAR index %d is out of range.\n"; descriptor.lineno = 838U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "RAR index %d is out of range.\n", rar); } else { } return (-32); } else { } rar_high = ixgbe_read_reg(hw, rar <= 15U ? rar * 8U + 21508U : rar * 8U + 41476U); rar_high = rar_high & 4291035135U; rar_high = ((vmdq << 18) & 3932160U) | rar_high; ixgbe_write_reg(hw, rar <= 15U ? rar * 8U + 21508U : rar * 8U + 41476U, rar_high); return (0); } } static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw , u32 rar , u32 vmdq ) { u32 rar_high ; u32 rar_entries ; struct _ddebug descriptor ; long tmp ; { rar_entries = hw->mac.num_rar_entries; if (rar >= rar_entries) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_clear_vmdq_82598"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c"; descriptor.format = "RAR index %d is out of range.\n"; descriptor.lineno = 863U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "RAR index %d is out of range.\n", rar); } else { } return (-32); } else { } rar_high = ixgbe_read_reg(hw, rar <= 15U ? rar * 8U + 21508U : rar * 8U + 41476U); if ((rar_high & 3932160U) != 0U) { rar_high = rar_high & 4291035135U; ixgbe_write_reg(hw, rar <= 15U ? rar * 8U + 21508U : rar * 8U + 41476U, rar_high); } else { } return (0); } } static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw , u32 vlan , u32 vind , bool vlan_on ) { u32 regindex ; u32 bitindex ; u32 bits ; u32 vftabyte ; { if (vlan > 4095U) { return (-5); } else { } regindex = (vlan >> 5) & 127U; vftabyte = (vlan >> 3) & 3U; bitindex = (vlan & 7U) << 2; bits = ixgbe_read_reg(hw, ((vftabyte + 81U) * 128U + regindex) * 4U); bits = (u32 )(~ (15 << (int )bitindex)) & bits; bits = (vind << (int )bitindex) | bits; ixgbe_write_reg(hw, ((vftabyte + 81U) * 128U + regindex) * 4U, bits); bitindex = vlan & 31U; bits = ixgbe_read_reg(hw, (regindex + 10240U) * 4U); if ((int )vlan_on) { bits = (u32 )(1 << (int )bitindex) | bits; } else { bits = (u32 )(~ (1 << (int )bitindex)) & bits; } ixgbe_write_reg(hw, (regindex + 10240U) * 4U, bits); return (0); } } static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw ) { u32 offset ; u32 vlanbyte ; { offset = 0U; goto ldv_55644; ldv_55643: ixgbe_write_reg(hw, (offset + 10240U) * 4U, 0U); offset = offset + 1U; ldv_55644: ; if (hw->mac.vft_size > offset) { goto ldv_55643; } else { } vlanbyte = 0U; goto ldv_55650; ldv_55649: offset = 0U; goto ldv_55647; ldv_55646: ixgbe_write_reg(hw, ((vlanbyte + 81U) * 128U + offset) * 4U, 0U); offset = offset + 1U; ldv_55647: ; if (hw->mac.vft_size > offset) { goto ldv_55646; } else { } vlanbyte = vlanbyte + 1U; ldv_55650: ; if (vlanbyte <= 3U) { goto ldv_55649; } else { } return (0); } } static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw , u32 reg , u8 *val ) { u32 atlas_ctl ; { ixgbe_write_reg(hw, 18432U, (reg << 8) | 65536U); ixgbe_read_reg(hw, 8U); __const_udelay(42950UL); atlas_ctl = ixgbe_read_reg(hw, 18432U); *val = (unsigned char )atlas_ctl; return (0); } } static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw , u32 reg , u8 val ) { u32 atlas_ctl ; { atlas_ctl = (reg << 8) | (u32 )val; ixgbe_write_reg(hw, 18432U, atlas_ctl); ixgbe_read_reg(hw, 8U); __const_udelay(42950UL); return (0); } } static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw , u8 dev_addr , u8 byte_offset , u8 *eeprom_data ) { s32 status ; u16 sfp_addr ; u16 sfp_data ; u16 sfp_stat ; u16 gssr ; u32 i ; u32 tmp ; s32 tmp___0 ; struct _ddebug descriptor ; long tmp___1 ; { status = 0; sfp_addr = 0U; sfp_data = 0U; sfp_stat = 0U; tmp = ixgbe_read_reg(hw, 8U); if ((tmp & 4U) != 0U) { gssr = 4U; } else { gssr = 2U; } tmp___0 = (*(hw->mac.ops.acquire_swfw_sync))(hw, (u32 )gssr); if (tmp___0 != 0) { return (-16); } else { } if ((unsigned int )hw->phy.type == 10U) { sfp_addr = ((int )((u16 )dev_addr) << 8U) + (int )((u16 )byte_offset); sfp_addr = (u16 )((unsigned int )sfp_addr | 256U); (*(hw->phy.ops.write_reg_mdi))(hw, 49930U, 1U, (int )sfp_addr); i = 0U; goto ldv_55678; ldv_55677: (*(hw->phy.ops.read_reg_mdi))(hw, 49932U, 1U, & sfp_stat); sfp_stat = (unsigned int )sfp_stat & 3U; if ((unsigned int )sfp_stat != 3U) { goto ldv_55676; } else { } usleep_range(10000UL, 20000UL); i = i + 1U; ldv_55678: ; if (i <= 99U) { goto ldv_55677; } else { } ldv_55676: ; if ((unsigned int )sfp_stat != 1U) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_read_i2c_phy_82598"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c"; descriptor.format = "EEPROM read did not pass.\n"; descriptor.lineno = 1041U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read did not pass.\n"); } else { } status = -20; goto out; } else { } (*(hw->phy.ops.read_reg_mdi))(hw, 49931U, 1U, & sfp_data); *eeprom_data = (unsigned char )((int )sfp_data >> 8); } else { status = -3; } out: (*(hw->mac.ops.release_swfw_sync))(hw, (u32 )gssr); return (status); } } static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw , u8 byte_offset , u8 *eeprom_data ) { s32 tmp ; { tmp = ixgbe_read_i2c_phy_82598(hw, 160, (int )byte_offset, eeprom_data); return (tmp); } } static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw , u8 byte_offset , u8 *sff8472_data ) { s32 tmp ; { tmp = ixgbe_read_i2c_phy_82598(hw, 162, (int )byte_offset, sff8472_data); return (tmp); } } static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw ) { struct ixgbe_bus_info *bus ; u16 pci_gen ; u16 pci_ctrl2 ; { bus = & hw->bus; pci_gen = 0U; pci_ctrl2 = 0U; ixgbe_set_lan_id_multi_port_pcie(hw); (*(hw->eeprom.ops.read))(hw, 6, & pci_gen); if ((unsigned int )pci_gen != 0U && (unsigned int )pci_gen != 65535U) { (*(hw->eeprom.ops.read))(hw, (int )((unsigned int )pci_gen + 5U), & pci_ctrl2); if ((((int )pci_ctrl2 & 2) != 0 && ((int )pci_ctrl2 & 1) == 0) && ((int )pci_ctrl2 & 8) == 0) { bus->func = 0U; } else { } } else { } return; } } static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw , int num_pb , u32 headroom , int strategy ) { u32 rxpktsize ; u8 i ; { rxpktsize = 65536U; i = 0U; if (num_pb == 0) { return; } else { } switch (strategy) { case 1: rxpktsize = 81920U; goto ldv_55708; ldv_55707: ixgbe_write_reg(hw, (u32 )(((int )i + 3840) * 4), rxpktsize); i = (u8 )((int )i + 1); ldv_55708: ; if ((unsigned int )i <= 3U) { goto ldv_55707; } else { } rxpktsize = 49152U; case 0: ; default: ; goto ldv_55713; ldv_55712: ixgbe_write_reg(hw, (u32 )(((int )i + 3840) * 4), rxpktsize); i = (u8 )((int )i + 1); ldv_55713: ; if ((unsigned int )i <= 7U) { goto ldv_55712; } else { } goto ldv_55715; } ldv_55715: i = 0U; goto ldv_55717; ldv_55716: ixgbe_write_reg(hw, (u32 )(((int )i + 13056) * 4), 40960U); i = (u8 )((int )i + 1); ldv_55717: ; if ((unsigned int )i <= 7U) { goto ldv_55716; } else { } return; } } static struct ixgbe_mac_operations mac_ops_82598 = {& ixgbe_init_hw_generic, & ixgbe_reset_hw_82598, & ixgbe_start_hw_82598, & ixgbe_clear_hw_cntrs_generic, & ixgbe_get_media_type_82598, & ixgbe_get_mac_addr_generic, 0, 0, 0, & ixgbe_stop_adapter_generic, & ixgbe_get_bus_info_generic, & ixgbe_set_lan_id_multi_port_pcie_82598, & ixgbe_read_analog_reg8_82598, & ixgbe_write_analog_reg8_82598, 0, 0, 0, & ixgbe_enable_rx_dma_generic, & ixgbe_acquire_swfw_sync, & ixgbe_release_swfw_sync, & prot_autoc_read_generic, & prot_autoc_write_generic, 0, 0, 0, 0, & ixgbe_setup_mac_link_82598, & ixgbe_check_mac_link_82598, & ixgbe_get_link_capabilities_82598, & ixgbe_set_rxpba_82598, & ixgbe_led_on_generic, & ixgbe_led_off_generic, & ixgbe_blink_led_start_generic, & ixgbe_blink_led_stop_generic, & ixgbe_set_rar_generic, & ixgbe_clear_rar_generic, & ixgbe_set_vmdq_82598, 0, & ixgbe_clear_vmdq_82598, & ixgbe_init_rx_addrs_generic, & ixgbe_update_mc_addr_list_generic, & ixgbe_enable_mc_generic, & ixgbe_disable_mc_generic, & ixgbe_clear_vfta_82598, & ixgbe_set_vfta_82598, 0, 0, 0, & ixgbe_fc_enable_82598, (s32 (*)(struct ixgbe_hw * , u8 , u8 , u8 , u8 ))0, (s32 (*)(struct ixgbe_hw * ))0, (s32 (*)(struct ixgbe_hw * ))0, & ixgbe_disable_rx_generic, & ixgbe_enable_rx_generic, 0, 0, 0, 0, 0}; static struct ixgbe_eeprom_operations eeprom_ops_82598 = {& ixgbe_init_eeprom_params_generic, & ixgbe_read_eerd_generic, & ixgbe_read_eerd_buffer_generic, & ixgbe_write_eeprom_generic, & ixgbe_write_eeprom_buffer_bit_bang_generic, & ixgbe_validate_eeprom_checksum_generic, & ixgbe_update_eeprom_checksum_generic, & ixgbe_calc_eeprom_checksum_generic}; static struct ixgbe_phy_operations phy_ops_82598 = {& ixgbe_identify_phy_generic, & ixgbe_identify_module_generic, & ixgbe_init_phy_ops_82598, & ixgbe_reset_phy_generic, & ixgbe_read_phy_reg_generic, & ixgbe_write_phy_reg_generic, & ixgbe_read_phy_reg_mdi, & ixgbe_write_phy_reg_mdi, & ixgbe_setup_phy_link_generic, 0, & ixgbe_setup_phy_link_speed_generic, 0, 0, 0, 0, & ixgbe_read_i2c_sff8472_82598, & ixgbe_read_i2c_eeprom_82598, 0, 0, 0, & ixgbe_tn_check_overtemp, 0, 0}; struct ixgbe_info ixgbe_82598_info = {1, & ixgbe_get_invariants_82598, & mac_ops_82598, & eeprom_ops_82598, & phy_ops_82598, 0, (u32 const *)(& ixgbe_mvals_8259X)}; extern int ldv_release_29(void) ; extern int ldv_probe_29(void) ; extern int ldv_release_30(void) ; extern int ldv_probe_30(void) ; extern int ldv_setup_28(void) ; extern int ldv_release_28(void) ; void ldv_initialize_ixgbe_phy_operations_28(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); phy_ops_82598_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_initialize_ixgbe_mac_operations_30(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); mac_ops_82598_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_initialize_ixgbe_eeprom_operations_29(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); eeprom_ops_82598_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_main_exported_27(void) { struct ixgbe_hw *ldvarg198 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1696UL); ldvarg198 = (struct ixgbe_hw *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_27 == 1) { ixgbe_get_invariants_82598(ldvarg198); ldv_state_variable_27 = 1; } else { } goto ldv_55749; default: ldv_stop(); } ldv_55749: ; return; } } void ldv_main_exported_28(void) { u32 ldvarg260 ; u8 *ldvarg255 ; void *tmp ; u8 ldvarg258 ; bool ldvarg253 ; u16 ldvarg262 ; u32 ldvarg249 ; u16 ldvarg250 ; u32 ldvarg251 ; u8 *ldvarg257 ; void *tmp___0 ; u32 ldvarg252 ; u32 ldvarg264 ; u8 ldvarg256 ; u32 ldvarg261 ; u32 ldvarg248 ; ixgbe_link_speed ldvarg254 ; u32 ldvarg263 ; u16 *ldvarg259 ; void *tmp___1 ; u16 *ldvarg247 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(1UL); ldvarg255 = (u8 *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg257 = (u8 *)tmp___0; tmp___1 = ldv_init_zalloc(2UL); ldvarg259 = (u16 *)tmp___1; tmp___2 = ldv_init_zalloc(2UL); ldvarg247 = (u16 *)tmp___2; ldv_memset((void *)(& ldvarg260), 0, 4UL); ldv_memset((void *)(& ldvarg258), 0, 1UL); ldv_memset((void *)(& ldvarg253), 0, 1UL); ldv_memset((void *)(& ldvarg262), 0, 2UL); ldv_memset((void *)(& ldvarg249), 0, 4UL); ldv_memset((void *)(& ldvarg250), 0, 2UL); ldv_memset((void *)(& ldvarg251), 0, 4UL); ldv_memset((void *)(& ldvarg252), 0, 4UL); ldv_memset((void *)(& ldvarg264), 0, 4UL); ldv_memset((void *)(& ldvarg256), 0, 1UL); ldv_memset((void *)(& ldvarg261), 0, 4UL); ldv_memset((void *)(& ldvarg248), 0, 4UL); ldv_memset((void *)(& ldvarg254), 0, 4UL); ldv_memset((void *)(& ldvarg263), 0, 4UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_28 == 3) { ixgbe_write_phy_reg_mdi(phy_ops_82598_group0, ldvarg264, ldvarg263, (int )ldvarg262); ldv_state_variable_28 = 3; } else { } goto ldv_55773; case 1: ; if (ldv_state_variable_28 == 1) { ixgbe_reset_phy_generic(phy_ops_82598_group0); ldv_state_variable_28 = 1; } else { } if (ldv_state_variable_28 == 3) { ixgbe_reset_phy_generic(phy_ops_82598_group0); ldv_state_variable_28 = 3; } else { } if (ldv_state_variable_28 == 2) { ixgbe_reset_phy_generic(phy_ops_82598_group0); ldv_state_variable_28 = 2; } else { } goto ldv_55773; case 2: ; if (ldv_state_variable_28 == 3) { ixgbe_read_phy_reg_mdi(phy_ops_82598_group0, ldvarg261, ldvarg260, ldvarg259); ldv_state_variable_28 = 3; } else { } goto ldv_55773; case 3: ; if (ldv_state_variable_28 == 1) { ixgbe_read_i2c_eeprom_82598(phy_ops_82598_group0, (int )ldvarg258, ldvarg257); ldv_state_variable_28 = 1; } else { } if (ldv_state_variable_28 == 3) { ixgbe_read_i2c_eeprom_82598(phy_ops_82598_group0, (int )ldvarg258, ldvarg257); ldv_state_variable_28 = 3; } else { } if (ldv_state_variable_28 == 2) { ixgbe_read_i2c_eeprom_82598(phy_ops_82598_group0, (int )ldvarg258, ldvarg257); ldv_state_variable_28 = 2; } else { } goto ldv_55773; case 4: ; if (ldv_state_variable_28 == 1) { ixgbe_read_i2c_sff8472_82598(phy_ops_82598_group0, (int )ldvarg256, ldvarg255); ldv_state_variable_28 = 1; } else { } if (ldv_state_variable_28 == 3) { ixgbe_read_i2c_sff8472_82598(phy_ops_82598_group0, (int )ldvarg256, ldvarg255); ldv_state_variable_28 = 3; } else { } if (ldv_state_variable_28 == 2) { ixgbe_read_i2c_sff8472_82598(phy_ops_82598_group0, (int )ldvarg256, ldvarg255); ldv_state_variable_28 = 2; } else { } goto ldv_55773; case 5: ; if (ldv_state_variable_28 == 1) { ixgbe_setup_phy_link_generic(phy_ops_82598_group0); ldv_state_variable_28 = 1; } else { } if (ldv_state_variable_28 == 3) { ixgbe_setup_phy_link_generic(phy_ops_82598_group0); ldv_state_variable_28 = 3; } else { } if (ldv_state_variable_28 == 2) { ixgbe_setup_phy_link_generic(phy_ops_82598_group0); ldv_state_variable_28 = 2; } else { } goto ldv_55773; case 6: ; if (ldv_state_variable_28 == 1) { ixgbe_identify_phy_generic(phy_ops_82598_group0); ldv_state_variable_28 = 1; } else { } if (ldv_state_variable_28 == 3) { ixgbe_identify_phy_generic(phy_ops_82598_group0); ldv_state_variable_28 = 3; } else { } if (ldv_state_variable_28 == 2) { ixgbe_identify_phy_generic(phy_ops_82598_group0); ldv_state_variable_28 = 2; } else { } goto ldv_55773; case 7: ; if (ldv_state_variable_28 == 1) { ixgbe_setup_phy_link_speed_generic(phy_ops_82598_group0, ldvarg254, (int )ldvarg253); ldv_state_variable_28 = 1; } else { } if (ldv_state_variable_28 == 3) { ixgbe_setup_phy_link_speed_generic(phy_ops_82598_group0, ldvarg254, (int )ldvarg253); ldv_state_variable_28 = 3; } else { } if (ldv_state_variable_28 == 2) { ixgbe_setup_phy_link_speed_generic(phy_ops_82598_group0, ldvarg254, (int )ldvarg253); ldv_state_variable_28 = 2; } else { } goto ldv_55773; case 8: ; if (ldv_state_variable_28 == 1) { ixgbe_write_phy_reg_generic(phy_ops_82598_group0, ldvarg252, ldvarg251, (int )ldvarg250); ldv_state_variable_28 = 1; } else { } if (ldv_state_variable_28 == 3) { ixgbe_write_phy_reg_generic(phy_ops_82598_group0, ldvarg252, ldvarg251, (int )ldvarg250); ldv_state_variable_28 = 3; } else { } if (ldv_state_variable_28 == 2) { ixgbe_write_phy_reg_generic(phy_ops_82598_group0, ldvarg252, ldvarg251, (int )ldvarg250); ldv_state_variable_28 = 2; } else { } goto ldv_55773; case 9: ; if (ldv_state_variable_28 == 1) { ixgbe_identify_module_generic(phy_ops_82598_group0); ldv_state_variable_28 = 1; } else { } if (ldv_state_variable_28 == 3) { ixgbe_identify_module_generic(phy_ops_82598_group0); ldv_state_variable_28 = 3; } else { } if (ldv_state_variable_28 == 2) { ixgbe_identify_module_generic(phy_ops_82598_group0); ldv_state_variable_28 = 2; } else { } goto ldv_55773; case 10: ; if (ldv_state_variable_28 == 1) { ixgbe_read_phy_reg_generic(phy_ops_82598_group0, ldvarg249, ldvarg248, ldvarg247); ldv_state_variable_28 = 1; } else { } if (ldv_state_variable_28 == 3) { ixgbe_read_phy_reg_generic(phy_ops_82598_group0, ldvarg249, ldvarg248, ldvarg247); ldv_state_variable_28 = 3; } else { } if (ldv_state_variable_28 == 2) { ixgbe_read_phy_reg_generic(phy_ops_82598_group0, ldvarg249, ldvarg248, ldvarg247); ldv_state_variable_28 = 2; } else { } goto ldv_55773; case 11: ; if (ldv_state_variable_28 == 1) { ixgbe_tn_check_overtemp(phy_ops_82598_group0); ldv_state_variable_28 = 1; } else { } if (ldv_state_variable_28 == 3) { ixgbe_tn_check_overtemp(phy_ops_82598_group0); ldv_state_variable_28 = 3; } else { } if (ldv_state_variable_28 == 2) { ixgbe_tn_check_overtemp(phy_ops_82598_group0); ldv_state_variable_28 = 2; } else { } goto ldv_55773; case 12: ; if (ldv_state_variable_28 == 2) { ixgbe_init_phy_ops_82598(phy_ops_82598_group0); ldv_state_variable_28 = 3; } else { } goto ldv_55773; case 13: ; if (ldv_state_variable_28 == 1) { ldv_setup_28(); ldv_state_variable_28 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_55773; case 14: ; if (ldv_state_variable_28 == 3) { ldv_release_28(); ldv_state_variable_28 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_28 == 2) { ldv_release_28(); ldv_state_variable_28 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_55773; default: ldv_stop(); } ldv_55773: ; return; } } void ldv_main_exported_30(void) { u8 ldvarg148 ; int ldvarg162 ; u32 ldvarg139 ; u32 ldvarg159 ; ixgbe_link_speed *ldvarg143 ; void *tmp ; bool *ldvarg171 ; void *tmp___0 ; u32 ldvarg150 ; u32 ldvarg173 ; u32 ldvarg164 ; u8 *ldvarg147 ; void *tmp___1 ; u32 ldvarg166 ; u32 ldvarg156 ; bool ldvarg153 ; bool ldvarg172 ; u32 ldvarg144 ; u32 ldvarg137 ; ixgbe_link_speed ldvarg154 ; u8 *ldvarg151 ; void *tmp___2 ; u32 ldvarg175 ; u32 ldvarg158 ; int ldvarg160 ; bool *ldvarg142 ; void *tmp___3 ; u32 ldvarg161 ; u32 ldvarg149 ; u32 ldvarg169 ; u32 ldvarg163 ; u8 *ldvarg138 ; void *tmp___4 ; u32 *ldvarg170 ; void *tmp___5 ; u32 ldvarg152 ; bool *ldvarg145 ; void *tmp___6 ; u32 ldvarg140 ; bool ldvarg167 ; struct net_device *ldvarg155 ; void *tmp___7 ; u32 ldvarg165 ; u32 ldvarg168 ; u32 ldvarg174 ; bool ldvarg141 ; u32 ldvarg157 ; ixgbe_link_speed *ldvarg146 ; void *tmp___8 ; int tmp___9 ; { tmp = ldv_init_zalloc(4UL); ldvarg143 = (ixgbe_link_speed *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg171 = (bool *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg147 = (u8 *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg151 = (u8 *)tmp___2; tmp___3 = ldv_init_zalloc(1UL); ldvarg142 = (bool *)tmp___3; tmp___4 = ldv_init_zalloc(1UL); ldvarg138 = (u8 *)tmp___4; tmp___5 = ldv_init_zalloc(4UL); ldvarg170 = (u32 *)tmp___5; tmp___6 = ldv_init_zalloc(1UL); ldvarg145 = (bool *)tmp___6; tmp___7 = ldv_init_zalloc(3008UL); ldvarg155 = (struct net_device *)tmp___7; tmp___8 = ldv_init_zalloc(4UL); ldvarg146 = (ixgbe_link_speed *)tmp___8; ldv_memset((void *)(& ldvarg148), 0, 1UL); ldv_memset((void *)(& ldvarg162), 0, 4UL); ldv_memset((void *)(& ldvarg139), 0, 4UL); ldv_memset((void *)(& ldvarg159), 0, 4UL); ldv_memset((void *)(& ldvarg150), 0, 4UL); ldv_memset((void *)(& ldvarg173), 0, 4UL); ldv_memset((void *)(& ldvarg164), 0, 4UL); ldv_memset((void *)(& ldvarg166), 0, 4UL); ldv_memset((void *)(& ldvarg156), 0, 4UL); ldv_memset((void *)(& ldvarg153), 0, 1UL); ldv_memset((void *)(& ldvarg172), 0, 1UL); ldv_memset((void *)(& ldvarg144), 0, 4UL); ldv_memset((void *)(& ldvarg137), 0, 4UL); ldv_memset((void *)(& ldvarg154), 0, 4UL); ldv_memset((void *)(& ldvarg175), 0, 4UL); ldv_memset((void *)(& ldvarg158), 0, 4UL); ldv_memset((void *)(& ldvarg160), 0, 4UL); ldv_memset((void *)(& ldvarg161), 0, 4UL); ldv_memset((void *)(& ldvarg149), 0, 4UL); ldv_memset((void *)(& ldvarg169), 0, 4UL); ldv_memset((void *)(& ldvarg163), 0, 4UL); ldv_memset((void *)(& ldvarg152), 0, 4UL); ldv_memset((void *)(& ldvarg140), 0, 4UL); ldv_memset((void *)(& ldvarg167), 0, 1UL); ldv_memset((void *)(& ldvarg165), 0, 4UL); ldv_memset((void *)(& ldvarg168), 0, 4UL); ldv_memset((void *)(& ldvarg174), 0, 4UL); ldv_memset((void *)(& ldvarg141), 0, 1UL); ldv_memset((void *)(& ldvarg157), 0, 4UL); tmp___9 = __VERIFIER_nondet_int(); switch (tmp___9) { case 0: ; if (ldv_state_variable_30 == 1) { ixgbe_stop_adapter_generic(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_stop_adapter_generic(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 1: ; if (ldv_state_variable_30 == 1) { ixgbe_led_off_generic(mac_ops_82598_group0, ldvarg175); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_led_off_generic(mac_ops_82598_group0, ldvarg175); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 2: ; if (ldv_state_variable_30 == 1) { ixgbe_set_vfta_82598(mac_ops_82598_group0, ldvarg174, ldvarg173, (int )ldvarg172); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_set_vfta_82598(mac_ops_82598_group0, ldvarg174, ldvarg173, (int )ldvarg172); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 3: ; if (ldv_state_variable_30 == 2) { prot_autoc_read_generic(mac_ops_82598_group0, ldvarg171, ldvarg170); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 4: ; if (ldv_state_variable_30 == 1) { ixgbe_enable_rx_dma_generic(mac_ops_82598_group0, ldvarg169); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_enable_rx_dma_generic(mac_ops_82598_group0, ldvarg169); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 5: ; if (ldv_state_variable_30 == 2) { prot_autoc_write_generic(mac_ops_82598_group0, ldvarg168, (int )ldvarg167); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 6: ; if (ldv_state_variable_30 == 1) { ixgbe_led_on_generic(mac_ops_82598_group0, ldvarg166); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_led_on_generic(mac_ops_82598_group0, ldvarg166); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 7: ; if (ldv_state_variable_30 == 1) { ixgbe_blink_led_stop_generic(mac_ops_82598_group0, ldvarg165); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_blink_led_stop_generic(mac_ops_82598_group0, ldvarg165); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 8: ; if (ldv_state_variable_30 == 1) { ixgbe_clear_rar_generic(mac_ops_82598_group0, ldvarg164); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_clear_rar_generic(mac_ops_82598_group0, ldvarg164); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 9: ; if (ldv_state_variable_30 == 1) { ixgbe_enable_rx_generic(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_enable_rx_generic(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 10: ; if (ldv_state_variable_30 == 1) { ixgbe_get_bus_info_generic(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_get_bus_info_generic(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 11: ; if (ldv_state_variable_30 == 1) { ixgbe_blink_led_start_generic(mac_ops_82598_group0, ldvarg163); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_blink_led_start_generic(mac_ops_82598_group0, ldvarg163); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 12: ; if (ldv_state_variable_30 == 1) { ixgbe_disable_mc_generic(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_disable_mc_generic(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 13: ; if (ldv_state_variable_30 == 1) { ixgbe_set_rxpba_82598(mac_ops_82598_group0, ldvarg162, ldvarg161, ldvarg160); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_set_rxpba_82598(mac_ops_82598_group0, ldvarg162, ldvarg161, ldvarg160); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 14: ; if (ldv_state_variable_30 == 1) { ixgbe_set_vmdq_82598(mac_ops_82598_group0, ldvarg159, ldvarg158); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_set_vmdq_82598(mac_ops_82598_group0, ldvarg159, ldvarg158); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 15: ; if (ldv_state_variable_30 == 1) { ixgbe_clear_vmdq_82598(mac_ops_82598_group0, ldvarg157, ldvarg156); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_clear_vmdq_82598(mac_ops_82598_group0, ldvarg157, ldvarg156); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 16: ; if (ldv_state_variable_30 == 1) { ixgbe_clear_vfta_82598(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_clear_vfta_82598(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 17: ; if (ldv_state_variable_30 == 1) { ixgbe_get_media_type_82598(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_get_media_type_82598(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 18: ; if (ldv_state_variable_30 == 1) { ixgbe_update_mc_addr_list_generic(mac_ops_82598_group0, ldvarg155); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_update_mc_addr_list_generic(mac_ops_82598_group0, ldvarg155); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 19: ; if (ldv_state_variable_30 == 1) { ixgbe_init_rx_addrs_generic(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_init_rx_addrs_generic(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 20: ; if (ldv_state_variable_30 == 1) { ixgbe_fc_enable_82598(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_fc_enable_82598(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 21: ; if (ldv_state_variable_30 == 1) { ixgbe_clear_hw_cntrs_generic(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_clear_hw_cntrs_generic(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 22: ; if (ldv_state_variable_30 == 1) { ixgbe_setup_mac_link_82598(mac_ops_82598_group0, ldvarg154, (int )ldvarg153); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_setup_mac_link_82598(mac_ops_82598_group0, ldvarg154, (int )ldvarg153); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 23: ; if (ldv_state_variable_30 == 1) { ixgbe_read_analog_reg8_82598(mac_ops_82598_group0, ldvarg152, ldvarg151); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_read_analog_reg8_82598(mac_ops_82598_group0, ldvarg152, ldvarg151); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 24: ; if (ldv_state_variable_30 == 1) { ixgbe_disable_rx_generic(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_disable_rx_generic(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 25: ; if (ldv_state_variable_30 == 1) { ixgbe_set_lan_id_multi_port_pcie_82598(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_set_lan_id_multi_port_pcie_82598(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 26: ; if (ldv_state_variable_30 == 1) { ixgbe_acquire_swfw_sync(mac_ops_82598_group0, ldvarg150); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_acquire_swfw_sync(mac_ops_82598_group0, ldvarg150); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 27: ; if (ldv_state_variable_30 == 1) { ixgbe_start_hw_82598(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_start_hw_82598(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 28: ; if (ldv_state_variable_30 == 1) { ixgbe_write_analog_reg8_82598(mac_ops_82598_group0, ldvarg149, (int )ldvarg148); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_write_analog_reg8_82598(mac_ops_82598_group0, ldvarg149, (int )ldvarg148); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 29: ; if (ldv_state_variable_30 == 1) { ixgbe_enable_mc_generic(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_enable_mc_generic(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 30: ; if (ldv_state_variable_30 == 1) { ixgbe_get_mac_addr_generic(mac_ops_82598_group0, ldvarg147); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_get_mac_addr_generic(mac_ops_82598_group0, ldvarg147); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 31: ; if (ldv_state_variable_30 == 1) { ixgbe_get_link_capabilities_82598(mac_ops_82598_group0, ldvarg146, ldvarg145); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_get_link_capabilities_82598(mac_ops_82598_group0, ldvarg146, ldvarg145); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 32: ; if (ldv_state_variable_30 == 1) { ixgbe_init_hw_generic(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_init_hw_generic(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 33: ; if (ldv_state_variable_30 == 1) { ixgbe_reset_hw_82598(mac_ops_82598_group0); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_reset_hw_82598(mac_ops_82598_group0); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 34: ; if (ldv_state_variable_30 == 1) { ixgbe_release_swfw_sync(mac_ops_82598_group0, ldvarg144); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_release_swfw_sync(mac_ops_82598_group0, ldvarg144); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 35: ; if (ldv_state_variable_30 == 1) { ixgbe_check_mac_link_82598(mac_ops_82598_group0, ldvarg143, ldvarg142, (int )ldvarg141); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_check_mac_link_82598(mac_ops_82598_group0, ldvarg143, ldvarg142, (int )ldvarg141); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 36: ; if (ldv_state_variable_30 == 1) { ixgbe_set_rar_generic(mac_ops_82598_group0, ldvarg139, ldvarg138, ldvarg137, ldvarg140); ldv_state_variable_30 = 1; } else { } if (ldv_state_variable_30 == 2) { ixgbe_set_rar_generic(mac_ops_82598_group0, ldvarg139, ldvarg138, ldvarg137, ldvarg140); ldv_state_variable_30 = 2; } else { } goto ldv_55832; case 37: ; if (ldv_state_variable_30 == 2) { ldv_release_30(); ldv_state_variable_30 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_55832; case 38: ; if (ldv_state_variable_30 == 1) { ldv_probe_30(); ldv_state_variable_30 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_55832; default: ldv_stop(); } ldv_55832: ; return; } } void ldv_main_exported_29(void) { u16 ldvarg423 ; u16 *ldvarg420 ; void *tmp ; u16 *ldvarg419 ; void *tmp___0 ; u16 *ldvarg427 ; void *tmp___1 ; u16 ldvarg425 ; u16 *ldvarg424 ; void *tmp___2 ; u16 ldvarg426 ; u16 ldvarg429 ; u16 ldvarg428 ; u16 ldvarg421 ; u16 ldvarg422 ; int tmp___3 ; { tmp = ldv_init_zalloc(2UL); ldvarg420 = (u16 *)tmp; tmp___0 = ldv_init_zalloc(2UL); ldvarg419 = (u16 *)tmp___0; tmp___1 = ldv_init_zalloc(2UL); ldvarg427 = (u16 *)tmp___1; tmp___2 = ldv_init_zalloc(2UL); ldvarg424 = (u16 *)tmp___2; ldv_memset((void *)(& ldvarg423), 0, 2UL); ldv_memset((void *)(& ldvarg425), 0, 2UL); ldv_memset((void *)(& ldvarg426), 0, 2UL); ldv_memset((void *)(& ldvarg429), 0, 2UL); ldv_memset((void *)(& ldvarg428), 0, 2UL); ldv_memset((void *)(& ldvarg421), 0, 2UL); ldv_memset((void *)(& ldvarg422), 0, 2UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_29 == 2) { ixgbe_write_eeprom_buffer_bit_bang_generic(eeprom_ops_82598_group0, (int )ldvarg429, (int )ldvarg428, ldvarg427); ldv_state_variable_29 = 2; } else { } goto ldv_55887; case 1: ; if (ldv_state_variable_29 == 2) { ixgbe_read_eerd_buffer_generic(eeprom_ops_82598_group0, (int )ldvarg426, (int )ldvarg425, ldvarg424); ldv_state_variable_29 = 2; } else { } goto ldv_55887; case 2: ; if (ldv_state_variable_29 == 1) { ixgbe_update_eeprom_checksum_generic(eeprom_ops_82598_group0); ldv_state_variable_29 = 1; } else { } if (ldv_state_variable_29 == 2) { ixgbe_update_eeprom_checksum_generic(eeprom_ops_82598_group0); ldv_state_variable_29 = 2; } else { } goto ldv_55887; case 3: ; if (ldv_state_variable_29 == 1) { ixgbe_calc_eeprom_checksum_generic(eeprom_ops_82598_group0); ldv_state_variable_29 = 1; } else { } if (ldv_state_variable_29 == 2) { ixgbe_calc_eeprom_checksum_generic(eeprom_ops_82598_group0); ldv_state_variable_29 = 2; } else { } goto ldv_55887; case 4: ; if (ldv_state_variable_29 == 1) { ixgbe_write_eeprom_generic(eeprom_ops_82598_group0, (int )ldvarg423, (int )ldvarg422); ldv_state_variable_29 = 1; } else { } if (ldv_state_variable_29 == 2) { ixgbe_write_eeprom_generic(eeprom_ops_82598_group0, (int )ldvarg423, (int )ldvarg422); ldv_state_variable_29 = 2; } else { } goto ldv_55887; case 5: ; if (ldv_state_variable_29 == 1) { ixgbe_read_eerd_generic(eeprom_ops_82598_group0, (int )ldvarg421, ldvarg420); ldv_state_variable_29 = 1; } else { } if (ldv_state_variable_29 == 2) { ixgbe_read_eerd_generic(eeprom_ops_82598_group0, (int )ldvarg421, ldvarg420); ldv_state_variable_29 = 2; } else { } goto ldv_55887; case 6: ; if (ldv_state_variable_29 == 1) { ixgbe_init_eeprom_params_generic(eeprom_ops_82598_group0); ldv_state_variable_29 = 1; } else { } if (ldv_state_variable_29 == 2) { ixgbe_init_eeprom_params_generic(eeprom_ops_82598_group0); ldv_state_variable_29 = 2; } else { } goto ldv_55887; case 7: ; if (ldv_state_variable_29 == 1) { ixgbe_validate_eeprom_checksum_generic(eeprom_ops_82598_group0, ldvarg419); ldv_state_variable_29 = 1; } else { } if (ldv_state_variable_29 == 2) { ixgbe_validate_eeprom_checksum_generic(eeprom_ops_82598_group0, ldvarg419); ldv_state_variable_29 = 2; } else { } goto ldv_55887; case 8: ; if (ldv_state_variable_29 == 2) { ldv_release_29(); ldv_state_variable_29 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_55887; case 9: ; if (ldv_state_variable_29 == 1) { ldv_probe_29(); ldv_state_variable_29 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_55887; default: ldv_stop(); } ldv_55887: ; return; } } bool ldv_queue_work_on_254(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_255(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_256(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_257(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_258(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_264(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_270(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_272(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_274(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_275(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_276(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_277(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_278(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_279(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_280(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_281(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_301(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_303(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_302(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_305(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_304(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_311(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_328(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_319(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_327(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_321(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_317(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_325(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_326(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_322(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_323(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_324(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; extern int mdio45_probe(struct mdio_if_info * , int ) ; s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw , u16 *firmware_version ) ; s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw , bool on ) ; s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw ) ; s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw , u8 addr , u16 reg , u16 *val ) ; s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw , u8 addr , u16 reg , u16 val ) ; static void ixgbe_i2c_start(struct ixgbe_hw *hw ) ; static void ixgbe_i2c_stop(struct ixgbe_hw *hw ) ; static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw , u8 *data ) ; static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw , u8 data ) ; static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw ) ; static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw , bool *data ) ; static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw , bool data ) ; static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw , u32 *i2cctl ) ; static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw , u32 *i2cctl ) ; static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw , u32 *i2cctl , bool data ) ; static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw , u32 *i2cctl ) ; static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw ) ; static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id ) ; static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw ) ; static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw ) ; static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw , u8 byte ) { s32 status ; s32 tmp ; { status = ixgbe_clock_out_i2c_byte(hw, (int )byte); if (status != 0) { return (status); } else { } tmp = ixgbe_get_i2c_ack(hw); return (tmp); } } static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw , u8 *byte ) { s32 status ; s32 tmp ; { status = ixgbe_clock_in_i2c_byte(hw, byte); if (status != 0) { return (status); } else { } tmp = ixgbe_clock_out_i2c_bit(hw, 0); return (tmp); } } static u8 ixgbe_ones_comp_byte_add(u8 add1 , u8 add2 ) { u16 sum ; { sum = (int )((u16 )add1) + (int )((u16 )add2); sum = ((unsigned int )sum & 255U) + (unsigned int )((u16 )((int )sum >> 8)); return ((u8 )sum); } } s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw , u8 addr , u16 reg , u16 *val ) { u32 swfw_mask ; int max_retry ; int retry ; u8 csum_byte ; u8 high_bits ; u8 low_bits ; u8 reg_high ; u8 csum ; s32 tmp ; s32 tmp___0 ; s32 tmp___1 ; s32 tmp___2 ; s32 tmp___3 ; s32 tmp___4 ; s32 tmp___5 ; s32 tmp___6 ; s32 tmp___7 ; s32 tmp___8 ; struct _ddebug descriptor ; long tmp___9 ; struct _ddebug descriptor___0 ; long tmp___10 ; { swfw_mask = hw->phy.phy_semaphore_mask; max_retry = 10; retry = 0; reg_high = (u8 )((int )((signed char )((int )reg >> 7)) | 1); csum = ixgbe_ones_comp_byte_add((int )reg_high, (int )((u8 )reg)); csum = ~ ((int )csum); ldv_55503: tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, swfw_mask); if (tmp != 0) { return (-16); } else { } ixgbe_i2c_start(hw); tmp___0 = ixgbe_out_i2c_byte_ack(hw, (int )addr); if (tmp___0 != 0) { goto fail; } else { } tmp___1 = ixgbe_out_i2c_byte_ack(hw, (int )reg_high); if (tmp___1 != 0) { goto fail; } else { } tmp___2 = ixgbe_out_i2c_byte_ack(hw, (int )((u8 )reg)); if (tmp___2 != 0) { goto fail; } else { } tmp___3 = ixgbe_out_i2c_byte_ack(hw, (int )csum); if (tmp___3 != 0) { goto fail; } else { } ixgbe_i2c_start(hw); tmp___4 = ixgbe_out_i2c_byte_ack(hw, (int )((unsigned int )addr | 1U)); if (tmp___4 != 0) { goto fail; } else { } tmp___5 = ixgbe_in_i2c_byte_ack(hw, & high_bits); if (tmp___5 != 0) { goto fail; } else { } tmp___6 = ixgbe_in_i2c_byte_ack(hw, & low_bits); if (tmp___6 != 0) { goto fail; } else { } tmp___7 = ixgbe_clock_in_i2c_byte(hw, & csum_byte); if (tmp___7 != 0) { goto fail; } else { } tmp___8 = ixgbe_clock_out_i2c_bit(hw, 0); if (tmp___8 != 0) { goto fail; } else { } ixgbe_i2c_stop(hw); (*(hw->mac.ops.release_swfw_sync))(hw, swfw_mask); *val = (u16 )((int )((short )((int )high_bits << 8)) | (int )((short )low_bits)); return (0); fail: ixgbe_i2c_bus_clear(hw); (*(hw->mac.ops.release_swfw_sync))(hw, swfw_mask); retry = retry + 1; if (retry < max_retry) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_read_i2c_combined_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "I2C byte read combined error - Retry.\n"; descriptor.lineno = 169U; descriptor.flags = 0U; tmp___9 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___9 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "I2C byte read combined error - Retry.\n"); } else { } } else { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_read_i2c_combined_generic"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___0.format = "I2C byte read combined error.\n"; descriptor___0.lineno = 171U; descriptor___0.flags = 0U; tmp___10 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___10 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "I2C byte read combined error.\n"); } else { } } if (retry < max_retry) { goto ldv_55503; } else { } return (-18); } } s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw , u8 addr , u16 reg , u16 val ) { int max_retry ; int retry ; u8 reg_high ; u8 csum ; s32 tmp ; s32 tmp___0 ; s32 tmp___1 ; s32 tmp___2 ; s32 tmp___3 ; s32 tmp___4 ; struct _ddebug descriptor ; long tmp___5 ; struct _ddebug descriptor___0 ; long tmp___6 ; { max_retry = 1; retry = 0; reg_high = (unsigned int )((u8 )((int )reg >> 7)) & 254U; csum = ixgbe_ones_comp_byte_add((int )reg_high, (int )((u8 )reg)); csum = ixgbe_ones_comp_byte_add((int )csum, (int )((u8 )((int )val >> 8))); csum = ixgbe_ones_comp_byte_add((int )csum, (int )((u8 )val)); csum = ~ ((int )csum); ldv_55519: ixgbe_i2c_start(hw); tmp = ixgbe_out_i2c_byte_ack(hw, (int )addr); if (tmp != 0) { goto fail; } else { } tmp___0 = ixgbe_out_i2c_byte_ack(hw, (int )reg_high); if (tmp___0 != 0) { goto fail; } else { } tmp___1 = ixgbe_out_i2c_byte_ack(hw, (int )((u8 )reg)); if (tmp___1 != 0) { goto fail; } else { } tmp___2 = ixgbe_out_i2c_byte_ack(hw, (int )((u8 )((int )val >> 8))); if (tmp___2 != 0) { goto fail; } else { } tmp___3 = ixgbe_out_i2c_byte_ack(hw, (int )((u8 )val)); if (tmp___3 != 0) { goto fail; } else { } tmp___4 = ixgbe_out_i2c_byte_ack(hw, (int )csum); if (tmp___4 != 0) { goto fail; } else { } ixgbe_i2c_stop(hw); return (0); fail: ixgbe_i2c_bus_clear(hw); retry = retry + 1; if (retry < max_retry) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_write_i2c_combined_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "I2C byte write combined error - Retry.\n"; descriptor.lineno = 226U; descriptor.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___5 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "I2C byte write combined error - Retry.\n"); } else { } } else { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_write_i2c_combined_generic"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___0.format = "I2C byte write combined error.\n"; descriptor___0.lineno = 228U; descriptor___0.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___6 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "I2C byte write combined error.\n"); } else { } } if (retry < max_retry) { goto ldv_55519; } else { } return (-18); } } s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw ) { u32 phy_addr ; u16 ext_ability ; u32 tmp ; int tmp___0 ; { ext_ability = 0U; if (hw->phy.phy_semaphore_mask == 0U) { tmp = ixgbe_read_reg(hw, 8U); hw->phy.lan_id = (unsigned int )((u8 )tmp) & 4U; if ((unsigned int )hw->phy.lan_id != 0U) { hw->phy.phy_semaphore_mask = 4U; } else { hw->phy.phy_semaphore_mask = 2U; } } else { } if ((unsigned int )hw->phy.type == 0U) { phy_addr = 0U; goto ldv_55527; ldv_55526: hw->phy.mdio.prtad = (int )phy_addr; tmp___0 = mdio45_probe(& hw->phy.mdio, (int )phy_addr); if (tmp___0 == 0) { ixgbe_get_phy_id(hw); hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); if ((unsigned int )hw->phy.type == 0U) { (*(hw->phy.ops.read_reg))(hw, 11U, 1U, & ext_ability); if (((int )ext_ability & 36) != 0) { hw->phy.type = 7; } else { hw->phy.type = 24; } } else { } return (0); } else { } phy_addr = phy_addr + 1U; ldv_55527: ; if (phy_addr <= 31U) { goto ldv_55526; } else { } hw->phy.mdio.prtad = 0; return (-17); } else { } return (0); } } bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw ) { u32 mmngc ; struct _ddebug descriptor ; long tmp ; { if ((unsigned int )hw->mac.type == 1U) { return (0); } else { } mmngc = ixgbe_read_reg(hw, 17104U); if ((int )mmngc & 1) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_check_reset_blocked"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "MNG_VETO bit detected.\n"; descriptor.lineno = 306U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "MNG_VETO bit detected.\n"); } else { } return (1); } else { } return (0); } } static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw ) { s32 status ; u16 phy_id_high ; u16 phy_id_low ; { phy_id_high = 0U; phy_id_low = 0U; status = (*(hw->phy.ops.read_reg))(hw, 2U, 1U, & phy_id_high); if (status == 0) { hw->phy.id = (unsigned int )((int )phy_id_high << 16); status = (*(hw->phy.ops.read_reg))(hw, 3U, 1U, & phy_id_low); hw->phy.id = hw->phy.id | ((u32 )phy_id_low & 4294967280U); hw->phy.revision = (unsigned int )phy_id_low & 15U; } else { } return (status); } } static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id ) { enum ixgbe_phy_type phy_type ; { switch (phy_id) { case 10589200U: phy_type = 2; goto ldv_55546; case 22282784U: ; case 22282752U: phy_type = 3; goto ldv_55546; case 4432896U: phy_type = 8; goto ldv_55546; case 54693968U: phy_type = 10; goto ldv_55546; case 22282816U: phy_type = 6; goto ldv_55546; default: phy_type = 0; goto ldv_55546; } ldv_55546: ; return (phy_type); } } s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw ) { u32 i ; u16 ctrl ; s32 status ; s32 tmp ; bool tmp___0 ; struct _ddebug descriptor ; long tmp___1 ; { ctrl = 0U; status = 0; if ((unsigned int )hw->phy.type == 0U) { status = ixgbe_identify_phy_generic(hw); } else { } if (status != 0 || (unsigned int )hw->phy.type == 1U) { return (status); } else { } if (! hw->phy.reset_if_overtemp) { tmp = (*(hw->phy.ops.check_overtemp))(hw); if (tmp == -26) { return (0); } else { } } else { } tmp___0 = ixgbe_check_reset_blocked(hw); if ((int )tmp___0) { return (0); } else { } (*(hw->phy.ops.write_reg))(hw, 0U, 4U, 32768); i = 0U; goto ldv_55561; ldv_55560: msleep(100U); (*(hw->phy.ops.read_reg))(hw, 0U, 4U, & ctrl); if ((int )((short )ctrl) >= 0) { __const_udelay(8590UL); goto ldv_55559; } else { } i = i + 1U; ldv_55561: ; if (i <= 29U) { goto ldv_55560; } else { } ldv_55559: ; if ((int )((short )ctrl) < 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_reset_phy_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "PHY reset polling failed to complete.\n"; descriptor.lineno = 420U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "PHY reset polling failed to complete.\n"); } else { } return (-15); } else { } return (0); } } s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw , u32 reg_addr , u32 device_type , u16 *phy_data ) { u32 i ; u32 data ; u32 command ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; { command = (((device_type << 16) | reg_addr) | (u32 )(hw->phy.mdio.prtad << 21)) | 1073741824U; ixgbe_write_reg(hw, 16988U, command); i = 0U; goto ldv_55575; ldv_55574: __const_udelay(42950UL); command = ixgbe_read_reg(hw, 16988U); if ((command & 1073741824U) == 0U) { goto ldv_55573; } else { } i = i + 1U; ldv_55575: ; if (i <= 99U) { goto ldv_55574; } else { } ldv_55573: ; if ((command & 1073741824U) != 0U) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_read_phy_reg_mdi"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "PHY address command did not complete.\n"; descriptor.lineno = 461U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "PHY address command did not complete.\n"); } else { } return (-3); } else { } command = (((device_type << 16) | reg_addr) | (u32 )(hw->phy.mdio.prtad << 21)) | 1275068416U; ixgbe_write_reg(hw, 16988U, command); i = 0U; goto ldv_55580; ldv_55579: __const_udelay(42950UL); command = ixgbe_read_reg(hw, 16988U); if ((command & 1073741824U) == 0U) { goto ldv_55578; } else { } i = i + 1U; ldv_55580: ; if (i <= 99U) { goto ldv_55579; } else { } ldv_55578: ; if ((command & 1073741824U) != 0U) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_read_phy_reg_mdi"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___0.format = "PHY read command didn\'t complete\n"; descriptor___0.lineno = 488U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "PHY read command didn\'t complete\n"); } else { } return (-3); } else { } data = ixgbe_read_reg(hw, 16992U); data = data >> 16; *phy_data = (unsigned short )data; return (0); } } s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw , u32 reg_addr , u32 device_type , u16 *phy_data ) { s32 status ; u32 gssr ; s32 tmp ; { gssr = hw->phy.phy_semaphore_mask; tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, gssr); if (tmp == 0) { status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, phy_data); (*(hw->mac.ops.release_swfw_sync))(hw, gssr); } else { return (-16); } return (status); } } s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw , u32 reg_addr , u32 device_type , u16 phy_data ) { u32 i ; u32 command ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; { ixgbe_write_reg(hw, 16992U, (unsigned int )phy_data); command = (((device_type << 16) | reg_addr) | (u32 )(hw->phy.mdio.prtad << 21)) | 1073741824U; ixgbe_write_reg(hw, 16988U, command); i = 0U; goto ldv_55600; ldv_55599: __const_udelay(42950UL); command = ixgbe_read_reg(hw, 16988U); if ((command & 1073741824U) == 0U) { goto ldv_55598; } else { } i = i + 1U; ldv_55600: ; if (i <= 99U) { goto ldv_55599; } else { } ldv_55598: ; if ((command & 1073741824U) != 0U) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_write_phy_reg_mdi"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "PHY address cmd didn\'t complete\n"; descriptor.lineno = 564U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "PHY address cmd didn\'t complete\n"); } else { } return (-3); } else { } command = (((device_type << 16) | reg_addr) | (u32 )(hw->phy.mdio.prtad << 21)) | 1140850688U; ixgbe_write_reg(hw, 16988U, command); i = 0U; goto ldv_55605; ldv_55604: __const_udelay(42950UL); command = ixgbe_read_reg(hw, 16988U); if ((command & 1073741824U) == 0U) { goto ldv_55603; } else { } i = i + 1U; ldv_55605: ; if (i <= 99U) { goto ldv_55604; } else { } ldv_55603: ; if ((command & 1073741824U) != 0U) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_write_phy_reg_mdi"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___0.format = "PHY write cmd didn\'t complete\n"; descriptor___0.lineno = 592U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "PHY write cmd didn\'t complete\n"); } else { } return (-3); } else { } return (0); } } s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw , u32 reg_addr , u32 device_type , u16 phy_data ) { s32 status ; u32 gssr ; u32 tmp ; s32 tmp___0 ; { tmp = ixgbe_read_reg(hw, 8U); if ((tmp & 4U) != 0U) { gssr = 4U; } else { gssr = 2U; } tmp___0 = (*(hw->mac.ops.acquire_swfw_sync))(hw, gssr); if (tmp___0 == 0) { status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, (int )phy_data); (*(hw->mac.ops.release_swfw_sync))(hw, gssr); } else { return (-16); } return (status); } } s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw ) { s32 status ; u16 autoneg_reg ; bool autoneg ; ixgbe_link_speed speed ; bool tmp ; { status = 0; autoneg_reg = 0U; autoneg = 0; ixgbe_get_copper_link_capabilities_generic(hw, & speed, & autoneg); if ((speed & 128U) != 0U) { (*(hw->phy.ops.read_reg))(hw, 32U, 7U, & autoneg_reg); autoneg_reg = (unsigned int )autoneg_reg & 61439U; if ((hw->phy.autoneg_advertised & 128U) != 0U) { autoneg_reg = (u16 )((unsigned int )autoneg_reg | 4096U); } else { } (*(hw->phy.ops.write_reg))(hw, 32U, 7U, (int )autoneg_reg); } else { } if ((speed & 32U) != 0U) { (*(hw->phy.ops.read_reg))(hw, 50176U, 7U, & autoneg_reg); autoneg_reg = (unsigned int )autoneg_reg & 32767U; if ((hw->phy.autoneg_advertised & 32U) != 0U) { autoneg_reg = (u16 )((unsigned int )autoneg_reg | 32768U); } else { } (*(hw->phy.ops.write_reg))(hw, 50176U, 7U, (int )autoneg_reg); } else { } if ((speed & 8U) != 0U) { (*(hw->phy.ops.read_reg))(hw, 16U, 7U, & autoneg_reg); autoneg_reg = (unsigned int )autoneg_reg & 65151U; if ((hw->phy.autoneg_advertised & 8U) != 0U) { autoneg_reg = (u16 )((unsigned int )autoneg_reg | 256U); } else { } (*(hw->phy.ops.write_reg))(hw, 16U, 7U, (int )autoneg_reg); } else { } tmp = ixgbe_check_reset_blocked(hw); if ((int )tmp) { return (0); } else { } (*(hw->phy.ops.read_reg))(hw, 0U, 7U, & autoneg_reg); autoneg_reg = (u16 )((unsigned int )autoneg_reg | 512U); (*(hw->phy.ops.write_reg))(hw, 0U, 7U, (int )autoneg_reg); return (status); } } s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) { { hw->phy.autoneg_advertised = 0U; if ((speed & 128U) != 0U) { hw->phy.autoneg_advertised = hw->phy.autoneg_advertised | 128U; } else { } if ((speed & 32U) != 0U) { hw->phy.autoneg_advertised = hw->phy.autoneg_advertised | 32U; } else { } if ((speed & 8U) != 0U) { hw->phy.autoneg_advertised = hw->phy.autoneg_advertised | 8U; } else { } (*(hw->phy.ops.setup_link))(hw); return (0); } } s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw , ixgbe_link_speed *speed , bool *autoneg ) { s32 status ; u16 speed_ability ; { *speed = 0U; *autoneg = 1; status = (*(hw->phy.ops.read_reg))(hw, 4U, 1U, & speed_ability); if (status == 0) { if ((int )speed_ability & 1) { *speed = *speed | 128U; } else { } if (((int )speed_ability & 16) != 0) { *speed = *speed | 32U; } else { } if (((int )speed_ability & 32) != 0) { *speed = *speed | 8U; } else { } } else { } if ((unsigned int )hw->mac.type == 5U) { *speed = *speed & 4294967287U; } else { } return (status); } } s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw , ixgbe_link_speed *speed , bool *link_up ) { s32 status ; u32 time_out ; u32 max_time_out ; u16 phy_link ; u16 phy_speed ; u16 phy_data ; { max_time_out = 10U; phy_link = 0U; phy_speed = 0U; phy_data = 0U; *link_up = 0; *speed = 128U; time_out = 0U; goto ldv_55647; ldv_55646: __const_udelay(42950UL); status = (*(hw->phy.ops.read_reg))(hw, 1U, 30U, & phy_data); phy_link = (unsigned int )phy_data & 8U; phy_speed = (unsigned int )phy_data & 16U; if ((unsigned int )phy_link == 8U) { *link_up = 1; if ((unsigned int )phy_speed == 16U) { *speed = 32U; } else { } goto ldv_55645; } else { } time_out = time_out + 1U; ldv_55647: ; if (time_out < max_time_out) { goto ldv_55646; } else { } ldv_55645: ; return (status); } } s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw ) { u16 autoneg_reg ; bool autoneg ; ixgbe_link_speed speed ; bool tmp ; { autoneg_reg = 0U; autoneg = 0; ixgbe_get_copper_link_capabilities_generic(hw, & speed, & autoneg); if ((speed & 128U) != 0U) { (*(hw->phy.ops.read_reg))(hw, 32U, 7U, & autoneg_reg); autoneg_reg = (unsigned int )autoneg_reg & 61439U; if ((hw->phy.autoneg_advertised & 128U) != 0U) { autoneg_reg = (u16 )((unsigned int )autoneg_reg | 4096U); } else { } (*(hw->phy.ops.write_reg))(hw, 32U, 7U, (int )autoneg_reg); } else { } if ((speed & 32U) != 0U) { (*(hw->phy.ops.read_reg))(hw, 23U, 7U, & autoneg_reg); autoneg_reg = (unsigned int )autoneg_reg & 49151U; if ((hw->phy.autoneg_advertised & 32U) != 0U) { autoneg_reg = (u16 )((unsigned int )autoneg_reg | 16384U); } else { } (*(hw->phy.ops.write_reg))(hw, 23U, 7U, (int )autoneg_reg); } else { } if ((speed & 8U) != 0U) { (*(hw->phy.ops.read_reg))(hw, 16U, 7U, & autoneg_reg); autoneg_reg = (unsigned int )autoneg_reg & 65151U; if ((hw->phy.autoneg_advertised & 8U) != 0U) { autoneg_reg = (u16 )((unsigned int )autoneg_reg | 256U); } else { } (*(hw->phy.ops.write_reg))(hw, 16U, 7U, (int )autoneg_reg); } else { } tmp = ixgbe_check_reset_blocked(hw); if ((int )tmp) { return (0); } else { } (*(hw->phy.ops.read_reg))(hw, 0U, 7U, & autoneg_reg); autoneg_reg = (u16 )((unsigned int )autoneg_reg | 512U); (*(hw->phy.ops.write_reg))(hw, 0U, 7U, (int )autoneg_reg); return (0); } } s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw , u16 *firmware_version ) { s32 status ; { status = (*(hw->phy.ops.read_reg))(hw, 11U, 30U, firmware_version); return (status); } } s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw , u16 *firmware_version ) { s32 status ; { status = (*(hw->phy.ops.read_reg))(hw, 32U, 30U, firmware_version); return (status); } } s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw ) { u16 phy_offset ; u16 control ; u16 eword ; u16 edata ; u16 block_crc ; bool end_data ; u16 list_offset ; u16 data_offset ; u16 phy_data ; s32 ret_val ; u32 i ; bool tmp ; struct _ddebug descriptor ; long tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; struct _ddebug descriptor___1 ; long tmp___2 ; u16 tmp___3 ; struct _ddebug descriptor___2 ; long tmp___4 ; struct _ddebug descriptor___3 ; long tmp___5 ; struct _ddebug descriptor___4 ; long tmp___6 ; struct _ddebug descriptor___5 ; long tmp___7 ; struct _ddebug descriptor___6 ; long tmp___8 ; struct _ddebug descriptor___7 ; long tmp___9 ; { end_data = 0; phy_data = 0U; tmp = ixgbe_check_reset_blocked(hw); if ((int )tmp) { return (0); } else { } (*(hw->phy.ops.read_reg))(hw, 0U, 4U, & phy_data); (*(hw->phy.ops.write_reg))(hw, 0U, 4U, (int )((unsigned int )phy_data | 32768U)); i = 0U; goto ldv_55680; ldv_55679: (*(hw->phy.ops.read_reg))(hw, 0U, 4U, & phy_data); if ((int )((short )phy_data) >= 0) { goto ldv_55678; } else { } usleep_range(10000UL, 20000UL); i = i + 1U; ldv_55680: ; if (i <= 99U) { goto ldv_55679; } else { } ldv_55678: ; if ((int )((short )phy_data) < 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_reset_phy_nl"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "PHY reset did not complete.\n"; descriptor.lineno = 968U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "PHY reset did not complete.\n"); } else { } return (-3); } else { } ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, & list_offset, & data_offset); if (ret_val != 0) { return (ret_val); } else { } ret_val = (*(hw->eeprom.ops.read))(hw, (int )data_offset, & block_crc); data_offset = (u16 )((int )data_offset + 1); goto ldv_55701; ldv_55700: ret_val = (*(hw->eeprom.ops.read))(hw, (int )data_offset, & eword); if (ret_val != 0) { goto err_eeprom; } else { } control = (int )eword >> 12; edata = (unsigned int )eword & 4095U; switch ((int )control) { case 0: data_offset = (u16 )((int )data_offset + 1); descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_reset_phy_nl"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___0.format = "DELAY: %d MS\n"; descriptor___0.lineno = 993U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "DELAY: %d MS\n", (int )edata); } else { } usleep_range((unsigned long )((int )edata * 1000), (unsigned long )((int )edata * 2000)); goto ldv_55686; case 1: descriptor___1.modname = "ixgbe"; descriptor___1.function = "ixgbe_reset_phy_nl"; descriptor___1.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___1.format = "DATA:\n"; descriptor___1.lineno = 997U; descriptor___1.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "DATA:\n"); } else { } data_offset = (u16 )((int )data_offset + 1); tmp___3 = data_offset; data_offset = (u16 )((int )data_offset + 1); ret_val = (*(hw->eeprom.ops.read))(hw, (int )tmp___3, & phy_offset); if (ret_val != 0) { goto err_eeprom; } else { } i = 0U; goto ldv_55691; ldv_55690: ret_val = (*(hw->eeprom.ops.read))(hw, (int )data_offset, & eword); if (ret_val != 0) { goto err_eeprom; } else { } (*(hw->phy.ops.write_reg))(hw, (u32 )phy_offset, 1U, (int )eword); descriptor___2.modname = "ixgbe"; descriptor___2.function = "ixgbe_reset_phy_nl"; descriptor___2.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___2.format = "Wrote %4.4x to %4.4x\n"; descriptor___2.lineno = 1011U; descriptor___2.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Wrote %4.4x to %4.4x\n", (int )eword, (int )phy_offset); } else { } data_offset = (u16 )((int )data_offset + 1); phy_offset = (u16 )((int )phy_offset + 1); i = i + 1U; ldv_55691: ; if ((u32 )edata > i) { goto ldv_55690; } else { } goto ldv_55686; case 15: data_offset = (u16 )((int )data_offset + 1); descriptor___3.modname = "ixgbe"; descriptor___3.function = "ixgbe_reset_phy_nl"; descriptor___3.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___3.format = "CONTROL:\n"; descriptor___3.lineno = 1018U; descriptor___3.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___5 != 0L) { __dynamic_netdev_dbg(& descriptor___3, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "CONTROL:\n"); } else { } if ((unsigned int )edata == 4095U) { descriptor___4.modname = "ixgbe"; descriptor___4.function = "ixgbe_reset_phy_nl"; descriptor___4.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___4.format = "EOL\n"; descriptor___4.lineno = 1020U; descriptor___4.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___4.flags & 1L, 0L); if (tmp___6 != 0L) { __dynamic_netdev_dbg(& descriptor___4, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EOL\n"); } else { } end_data = 1; } else if ((unsigned int )edata == 0U) { descriptor___5.modname = "ixgbe"; descriptor___5.function = "ixgbe_reset_phy_nl"; descriptor___5.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___5.format = "SOL\n"; descriptor___5.lineno = 1023U; descriptor___5.flags = 0U; tmp___7 = ldv__builtin_expect((long )descriptor___5.flags & 1L, 0L); if (tmp___7 != 0L) { __dynamic_netdev_dbg(& descriptor___5, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "SOL\n"); } else { } } else { descriptor___6.modname = "ixgbe"; descriptor___6.function = "ixgbe_reset_phy_nl"; descriptor___6.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___6.format = "Bad control value\n"; descriptor___6.lineno = 1025U; descriptor___6.flags = 0U; tmp___8 = ldv__builtin_expect((long )descriptor___6.flags & 1L, 0L); if (tmp___8 != 0L) { __dynamic_netdev_dbg(& descriptor___6, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Bad control value\n"); } else { } return (-3); } goto ldv_55686; default: descriptor___7.modname = "ixgbe"; descriptor___7.function = "ixgbe_reset_phy_nl"; descriptor___7.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___7.format = "Bad control type\n"; descriptor___7.lineno = 1030U; descriptor___7.flags = 0U; tmp___9 = ldv__builtin_expect((long )descriptor___7.flags & 1L, 0L); if (tmp___9 != 0L) { __dynamic_netdev_dbg(& descriptor___7, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Bad control type\n"); } else { } return (-3); } ldv_55686: ; ldv_55701: ; if (! end_data) { goto ldv_55700; } else { } return (ret_val); err_eeprom: netdev_err((struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "eeprom read at offset %d failed\n", (int )data_offset); return (-3); } } s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw ) { enum ixgbe_media_type tmp ; s32 tmp___0 ; s32 tmp___1 ; { tmp = (*(hw->mac.ops.get_media_type))(hw); switch ((unsigned int )tmp) { case 1U: tmp___0 = ixgbe_identify_sfp_module_generic(hw); return (tmp___0); case 2U: tmp___1 = ixgbe_identify_qsfp_module_generic(hw); return (tmp___1); default: hw->phy.sfp_type = 65534; return (-20); } return (-20); } } s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw ) { struct ixgbe_adapter *adapter ; s32 status ; u32 vendor_oui ; enum ixgbe_sfp_type stored_sfp_type ; u8 identifier ; u8 comp_codes_1g ; u8 comp_codes_10g ; u8 oui_bytes[3U] ; u8 cable_tech ; u8 cable_spec ; u16 enforce_sfp ; enum ixgbe_media_type tmp ; struct _ddebug descriptor ; long tmp___0 ; { adapter = (struct ixgbe_adapter *)hw->back; vendor_oui = 0U; stored_sfp_type = hw->phy.sfp_type; identifier = 0U; comp_codes_1g = 0U; comp_codes_10g = 0U; oui_bytes[0] = 0U; oui_bytes[1] = 0U; oui_bytes[2] = 0U; cable_tech = 0U; cable_spec = 0U; enforce_sfp = 0U; tmp = (*(hw->mac.ops.get_media_type))(hw); if ((unsigned int )tmp != 1U) { hw->phy.sfp_type = 65534; return (-20); } else { } status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 0, & identifier); if (status != 0) { goto err_read_i2c_eeprom; } else { } (*(hw->mac.ops.set_lan_id))(hw); if ((unsigned int )identifier != 3U) { hw->phy.type = 23; return (-19); } else { } status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 6, & comp_codes_1g); if (status != 0) { goto err_read_i2c_eeprom; } else { } status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 3, & comp_codes_10g); if (status != 0) { goto err_read_i2c_eeprom; } else { } status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 8, & cable_tech); if (status != 0) { goto err_read_i2c_eeprom; } else { } if ((unsigned int )hw->mac.type == 1U) { if (((int )cable_tech & 4) != 0) { hw->phy.sfp_type = 0; } else if (((int )comp_codes_10g & 16) != 0) { hw->phy.sfp_type = 1; } else if (((int )comp_codes_10g & 32) != 0) { hw->phy.sfp_type = 2; } else { hw->phy.sfp_type = 65535; } } else if ((unsigned int )hw->mac.type == 2U) { if (((int )cable_tech & 4) != 0) { if ((unsigned int )hw->bus.lan_id == 0U) { hw->phy.sfp_type = 3; } else { hw->phy.sfp_type = 4; } } else if (((int )cable_tech & 8) != 0) { (*(hw->phy.ops.read_i2c_eeprom))(hw, 60, & cable_spec); if (((int )cable_spec & 4) != 0) { if ((unsigned int )hw->bus.lan_id == 0U) { hw->phy.sfp_type = 7; } else { hw->phy.sfp_type = 8; } } else { hw->phy.sfp_type = 65535; } } else if (((int )comp_codes_10g & 48) != 0) { if ((unsigned int )hw->bus.lan_id == 0U) { hw->phy.sfp_type = 5; } else { hw->phy.sfp_type = 6; } } else if (((int )comp_codes_1g & 8) != 0) { if ((unsigned int )hw->bus.lan_id == 0U) { hw->phy.sfp_type = 9; } else { hw->phy.sfp_type = 10; } } else if ((int )comp_codes_1g & 1) { if ((unsigned int )hw->bus.lan_id == 0U) { hw->phy.sfp_type = 11; } else { hw->phy.sfp_type = 12; } } else if (((int )comp_codes_1g & 2) != 0) { if ((unsigned int )hw->bus.lan_id == 0U) { hw->phy.sfp_type = 13; } else { hw->phy.sfp_type = 14; } } else { hw->phy.sfp_type = 65535; } } else { } if ((unsigned int )hw->phy.sfp_type != (unsigned int )stored_sfp_type) { hw->phy.sfp_setup_needed = 1; } else { } hw->phy.multispeed_fiber = 0; if (((int )comp_codes_1g & 1 && ((int )comp_codes_10g & 16) != 0) || (((int )comp_codes_1g & 2) != 0 && ((int )comp_codes_10g & 32) != 0)) { hw->phy.multispeed_fiber = 1; } else { } if ((unsigned int )hw->phy.type != 10U) { hw->phy.id = (u32 )identifier; status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 37, (u8 *)(& oui_bytes)); if (status != 0) { goto err_read_i2c_eeprom; } else { } status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 38, (u8 *)(& oui_bytes) + 1UL); if (status != 0) { goto err_read_i2c_eeprom; } else { } status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 39, (u8 *)(& oui_bytes) + 2UL); if (status != 0) { goto err_read_i2c_eeprom; } else { } vendor_oui = (u32 )((((int )oui_bytes[0] << 24) | ((int )oui_bytes[1] << 16)) | ((int )oui_bytes[2] << 8)); switch (vendor_oui) { case 4224512U: ; if (((int )cable_tech & 4) != 0) { hw->phy.type = 11; } else { } goto ldv_55725; case 9463040U: ; if (((int )cable_tech & 8) != 0) { hw->phy.type = 16; } else { hw->phy.type = 15; } goto ldv_55725; case 1534464U: hw->phy.type = 14; goto ldv_55725; case 1777920U: hw->phy.type = 18; goto ldv_55725; default: ; if (((int )cable_tech & 4) != 0) { hw->phy.type = 12; } else if (((int )cable_tech & 8) != 0) { hw->phy.type = 13; } else { hw->phy.type = 17; } goto ldv_55725; } ldv_55725: ; } else { } if (((int )cable_tech & 12) != 0) { return (0); } else { } if ((unsigned int )comp_codes_10g == 0U && ((((((unsigned int )hw->phy.sfp_type != 10U && (unsigned int )hw->phy.sfp_type != 9U) && (unsigned int )hw->phy.sfp_type != 13U) && (unsigned int )hw->phy.sfp_type != 14U) && (unsigned int )hw->phy.sfp_type != 11U) && (unsigned int )hw->phy.sfp_type != 12U)) { hw->phy.type = 23; return (-19); } else { } if ((unsigned int )hw->mac.type == 1U) { return (0); } else { } (*(hw->mac.ops.get_device_caps))(hw, & enforce_sfp); if (((int )enforce_sfp & 1) == 0 && ((((((unsigned int )hw->phy.sfp_type != 9U && (unsigned int )hw->phy.sfp_type != 10U) && (unsigned int )hw->phy.sfp_type != 13U) && (unsigned int )hw->phy.sfp_type != 14U) && (unsigned int )hw->phy.sfp_type != 11U) && (unsigned int )hw->phy.sfp_type != 12U)) { if ((unsigned int )hw->phy.type == 18U) { return (0); } else { } if ((int )hw->allow_unsupported_sfp) { if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); } else { } return (0); } else { } descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_identify_sfp_module_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "SFP+ module not supported\n"; descriptor.lineno = 1313U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "SFP+ module not supported\n"); } else { } hw->phy.type = 23; return (-19); } else { } return (0); err_read_i2c_eeprom: hw->phy.sfp_type = 65534; if ((unsigned int )hw->phy.type != 10U) { hw->phy.id = 0U; hw->phy.type = 0; } else { } return (-20); } } static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw ) { struct ixgbe_adapter *adapter ; s32 status ; u32 vendor_oui ; enum ixgbe_sfp_type stored_sfp_type ; u8 identifier ; u8 comp_codes_1g ; u8 comp_codes_10g ; u8 oui_bytes[3U] ; u16 enforce_sfp ; u8 connector ; u8 cable_length ; u8 device_tech ; bool active_cable ; enum ixgbe_media_type tmp ; struct _ddebug descriptor ; long tmp___0 ; { adapter = (struct ixgbe_adapter *)hw->back; vendor_oui = 0U; stored_sfp_type = hw->phy.sfp_type; identifier = 0U; comp_codes_1g = 0U; comp_codes_10g = 0U; oui_bytes[0] = 0U; oui_bytes[1] = 0U; oui_bytes[2] = 0U; enforce_sfp = 0U; connector = 0U; cable_length = 0U; device_tech = 0U; active_cable = 0; tmp = (*(hw->mac.ops.get_media_type))(hw); if ((unsigned int )tmp != 2U) { hw->phy.sfp_type = 65534; return (-20); } else { } (*(hw->mac.ops.set_lan_id))(hw); status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 0, & identifier); if (status != 0) { goto err_read_i2c_eeprom; } else { } if ((unsigned int )identifier != 13U) { hw->phy.type = 23; return (-19); } else { } hw->phy.id = (u32 )identifier; status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 131, & comp_codes_10g); if (status != 0) { goto err_read_i2c_eeprom; } else { } status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 134, & comp_codes_1g); if (status != 0) { goto err_read_i2c_eeprom; } else { } if (((int )comp_codes_10g & 8) != 0) { hw->phy.type = 19; if ((unsigned int )hw->bus.lan_id == 0U) { hw->phy.sfp_type = 3; } else { hw->phy.sfp_type = 4; } } else if (((int )comp_codes_10g & 48) != 0) { if ((unsigned int )hw->bus.lan_id == 0U) { hw->phy.sfp_type = 5; } else { hw->phy.sfp_type = 6; } } else { if ((int )comp_codes_10g & 1) { active_cable = 1; } else { } if (! active_cable) { (*(hw->phy.ops.read_i2c_eeprom))(hw, 130, & connector); (*(hw->phy.ops.read_i2c_eeprom))(hw, 146, & cable_length); (*(hw->phy.ops.read_i2c_eeprom))(hw, 147, & device_tech); if (((unsigned int )connector == 35U && (unsigned int )cable_length != 0U) && (unsigned int )((int )device_tech >> 4) == 0U) { active_cable = 1; } else { } } else { } if ((int )active_cable) { hw->phy.type = 20; if ((unsigned int )hw->bus.lan_id == 0U) { hw->phy.sfp_type = 7; } else { hw->phy.sfp_type = 8; } } else { hw->phy.type = 23; return (-19); } } if ((unsigned int )hw->phy.sfp_type != (unsigned int )stored_sfp_type) { hw->phy.sfp_setup_needed = 1; } else { } hw->phy.multispeed_fiber = 0; if (((int )comp_codes_1g & 1 && ((int )comp_codes_10g & 16) != 0) || (((int )comp_codes_1g & 2) != 0 && ((int )comp_codes_10g & 32) != 0)) { hw->phy.multispeed_fiber = 1; } else { } if (((int )comp_codes_10g & 48) != 0) { status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 165, (u8 *)(& oui_bytes)); if (status != 0) { goto err_read_i2c_eeprom; } else { } status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 166, (u8 *)(& oui_bytes) + 1UL); if (status != 0) { goto err_read_i2c_eeprom; } else { } status = (*(hw->phy.ops.read_i2c_eeprom))(hw, 167, (u8 *)(& oui_bytes) + 2UL); if (status != 0) { goto err_read_i2c_eeprom; } else { } vendor_oui = (u32 )((((int )oui_bytes[0] << 24) | ((int )oui_bytes[1] << 16)) | ((int )oui_bytes[2] << 8)); if (vendor_oui == 1777920U) { hw->phy.type = 21; } else { hw->phy.type = 22; } (*(hw->mac.ops.get_device_caps))(hw, & enforce_sfp); if (((int )enforce_sfp & 1) == 0) { if ((unsigned int )hw->phy.type == 21U) { return (0); } else { } if ((int )hw->allow_unsupported_sfp) { if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); } else { } return (0); } else { } descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_identify_qsfp_module_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "QSFP module not supported\n"; descriptor.lineno = 1492U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "QSFP module not supported\n"); } else { } hw->phy.type = 23; return (-19); } else { } return (0); } else { } return (0); err_read_i2c_eeprom: hw->phy.sfp_type = 65534; hw->phy.id = 0U; hw->phy.type = 0; return (-20); } } s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw , u16 *list_offset , u16 *data_offset ) { u16 sfp_id ; u16 sfp_type ; s32 tmp ; s32 tmp___0 ; s32 tmp___1 ; struct _ddebug descriptor ; long tmp___2 ; s32 tmp___3 ; struct _ddebug descriptor___0 ; long tmp___4 ; { sfp_type = (u16 )hw->phy.sfp_type; if ((unsigned int )hw->phy.sfp_type == 65535U) { return (-19); } else { } if ((unsigned int )hw->phy.sfp_type == 65534U) { return (-20); } else { } if ((unsigned int )hw->device_id == 4321U && (unsigned int )hw->phy.sfp_type == 0U) { return (-19); } else { } if ((((unsigned int )sfp_type == 7U || (unsigned int )sfp_type == 13U) || (unsigned int )sfp_type == 9U) || (unsigned int )sfp_type == 11U) { sfp_type = 5U; } else if ((((unsigned int )sfp_type == 8U || (unsigned int )sfp_type == 14U) || (unsigned int )sfp_type == 10U) || (unsigned int )sfp_type == 12U) { sfp_type = 6U; } else { } tmp = (*(hw->eeprom.ops.read))(hw, 43, list_offset); if (tmp != 0) { netdev_err((struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "eeprom read at %d failed\n", 43); return (-21); } else { } if ((unsigned int )*list_offset == 0U || (unsigned int )*list_offset == 65535U) { return (-21); } else { } *list_offset = (u16 )((int )*list_offset + 1); tmp___0 = (*(hw->eeprom.ops.read))(hw, (int )*list_offset, & sfp_id); if (tmp___0 != 0) { goto err_phy; } else { } goto ldv_55763; ldv_55762: ; if ((int )sfp_id == (int )sfp_type) { *list_offset = (u16 )((int )*list_offset + 1); tmp___1 = (*(hw->eeprom.ops.read))(hw, (int )*list_offset, data_offset); if (tmp___1 != 0) { goto err_phy; } else { } if ((unsigned int )*data_offset == 0U || (unsigned int )*data_offset == 65535U) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_get_sfp_init_sequence_offsets"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "SFP+ module not supported\n"; descriptor.lineno = 1575U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "SFP+ module not supported\n"); } else { } return (-19); } else { goto ldv_55761; } } else { *list_offset = (unsigned int )*list_offset + 2U; tmp___3 = (*(hw->eeprom.ops.read))(hw, (int )*list_offset, & sfp_id); if (tmp___3 != 0) { goto err_phy; } else { } } ldv_55763: ; if ((unsigned int )sfp_id != 65535U) { goto ldv_55762; } else { } ldv_55761: ; if ((unsigned int )sfp_id == 65535U) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_get_sfp_init_sequence_offsets"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___0.format = "No matching SFP+ module found\n"; descriptor___0.lineno = 1588U; descriptor___0.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "No matching SFP+ module found\n"); } else { } return (-19); } else { } return (0); err_phy: netdev_err((struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "eeprom read at offset %d failed\n", (int )*list_offset); return (-3); } } s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw , u8 byte_offset , u8 *eeprom_data ) { s32 tmp ; { tmp = (*(hw->phy.ops.read_i2c_byte))(hw, (int )byte_offset, 160, eeprom_data); return (tmp); } } s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw , u8 byte_offset , u8 *sff8472_data ) { s32 tmp ; { tmp = (*(hw->phy.ops.read_i2c_byte))(hw, (int )byte_offset, 162, sff8472_data); return (tmp); } } s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw , u8 byte_offset , u8 eeprom_data ) { s32 tmp ; { tmp = (*(hw->phy.ops.write_i2c_byte))(hw, (int )byte_offset, 160, (int )eeprom_data); return (tmp); } } s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw , u8 byte_offset , u8 dev_addr , u8 *data ) { s32 status ; u32 max_retry ; u32 retry ; u32 swfw_mask ; bool nack ; s32 tmp ; struct _ddebug descriptor ; long tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; { max_retry = 10U; retry = 0U; swfw_mask = hw->phy.phy_semaphore_mask; nack = 1; *data = 0U; ldv_55796: tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, swfw_mask); if (tmp != 0) { return (-16); } else { } ixgbe_i2c_start(hw); status = ixgbe_clock_out_i2c_byte(hw, (int )dev_addr); if (status != 0) { goto fail; } else { } status = ixgbe_get_i2c_ack(hw); if (status != 0) { goto fail; } else { } status = ixgbe_clock_out_i2c_byte(hw, (int )byte_offset); if (status != 0) { goto fail; } else { } status = ixgbe_get_i2c_ack(hw); if (status != 0) { goto fail; } else { } ixgbe_i2c_start(hw); status = ixgbe_clock_out_i2c_byte(hw, (int )((unsigned int )dev_addr | 1U)); if (status != 0) { goto fail; } else { } status = ixgbe_get_i2c_ack(hw); if (status != 0) { goto fail; } else { } status = ixgbe_clock_in_i2c_byte(hw, data); if (status != 0) { goto fail; } else { } status = ixgbe_clock_out_i2c_bit(hw, (int )nack); if (status != 0) { goto fail; } else { } ixgbe_i2c_stop(hw); goto ldv_55792; fail: ixgbe_i2c_bus_clear(hw); (*(hw->mac.ops.release_swfw_sync))(hw, swfw_mask); msleep(100U); retry = retry + 1U; if (retry < max_retry) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_read_i2c_byte_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "I2C byte read error - Retrying.\n"; descriptor.lineno = 1717U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "I2C byte read error - Retrying.\n"); } else { } } else { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_read_i2c_byte_generic"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___0.format = "I2C byte read error.\n"; descriptor___0.lineno = 1719U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "I2C byte read error.\n"); } else { } } if (retry < max_retry) { goto ldv_55796; } else { } ldv_55792: (*(hw->mac.ops.release_swfw_sync))(hw, swfw_mask); return (status); } } s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw , u8 byte_offset , u8 dev_addr , u8 data ) { s32 status ; u32 max_retry ; u32 retry ; u32 swfw_mask ; s32 tmp ; struct _ddebug descriptor ; long tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; { max_retry = 1U; retry = 0U; swfw_mask = hw->phy.phy_semaphore_mask; tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, swfw_mask); if (tmp != 0) { return (-16); } else { } ldv_55812: ixgbe_i2c_start(hw); status = ixgbe_clock_out_i2c_byte(hw, (int )dev_addr); if (status != 0) { goto fail; } else { } status = ixgbe_get_i2c_ack(hw); if (status != 0) { goto fail; } else { } status = ixgbe_clock_out_i2c_byte(hw, (int )byte_offset); if (status != 0) { goto fail; } else { } status = ixgbe_get_i2c_ack(hw); if (status != 0) { goto fail; } else { } status = ixgbe_clock_out_i2c_byte(hw, (int )data); if (status != 0) { goto fail; } else { } status = ixgbe_get_i2c_ack(hw); if (status != 0) { goto fail; } else { } ixgbe_i2c_stop(hw); goto ldv_55808; fail: ixgbe_i2c_bus_clear(hw); retry = retry + 1U; if (retry < max_retry) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_write_i2c_byte_generic"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "I2C byte write error - Retrying.\n"; descriptor.lineno = 1782U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "I2C byte write error - Retrying.\n"); } else { } } else { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_write_i2c_byte_generic"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor___0.format = "I2C byte write error.\n"; descriptor___0.lineno = 1784U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "I2C byte write error.\n"); } else { } } if (retry < max_retry) { goto ldv_55812; } else { } ldv_55808: (*(hw->mac.ops.release_swfw_sync))(hw, swfw_mask); return (status); } } static void ixgbe_i2c_start(struct ixgbe_hw *hw ) { u32 i2cctl ; u32 tmp ; { tmp = ixgbe_read_reg(hw, *(hw->mvals + 23UL)); i2cctl = tmp; ixgbe_set_i2c_data(hw, & i2cctl, 1); ixgbe_raise_i2c_clk(hw, & i2cctl); __const_udelay(21475UL); ixgbe_set_i2c_data(hw, & i2cctl, 0); __const_udelay(17180UL); ixgbe_lower_i2c_clk(hw, & i2cctl); __const_udelay(21475UL); return; } } static void ixgbe_i2c_stop(struct ixgbe_hw *hw ) { u32 i2cctl ; u32 tmp ; { tmp = ixgbe_read_reg(hw, *(hw->mvals + 23UL)); i2cctl = tmp; ixgbe_set_i2c_data(hw, & i2cctl, 0); ixgbe_raise_i2c_clk(hw, & i2cctl); __const_udelay(17180UL); ixgbe_set_i2c_data(hw, & i2cctl, 1); __const_udelay(21475UL); return; } } static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw , u8 *data ) { s32 i ; bool bit ; { bit = 0; i = 7; goto ldv_55828; ldv_55827: ixgbe_clock_in_i2c_bit(hw, & bit); *data = (u8 )((int )((signed char )*data) | (int )((signed char )((int )bit << i))); i = i - 1; ldv_55828: ; if (i >= 0) { goto ldv_55827; } else { } return (0); } } static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw , u8 data ) { s32 status ; s32 i ; u32 i2cctl ; bool bit ; { bit = 0; i = 7; goto ldv_55840; ldv_55839: bit = (((int )data >> i) & 1) != 0; status = ixgbe_clock_out_i2c_bit(hw, (int )bit); if (status != 0) { goto ldv_55838; } else { } i = i - 1; ldv_55840: ; if (i >= 0) { goto ldv_55839; } else { } ldv_55838: i2cctl = ixgbe_read_reg(hw, *(hw->mvals + 23UL)); i2cctl = (u32 )*(hw->mvals + 19UL) | i2cctl; ixgbe_write_reg(hw, *(hw->mvals + 23UL), i2cctl); ixgbe_read_reg(hw, 8U); return (status); } } static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw ) { s32 status ; u32 i ; u32 i2cctl ; u32 tmp ; u32 timeout ; bool ack ; struct _ddebug descriptor ; long tmp___0 ; { status = 0; i = 0U; tmp = ixgbe_read_reg(hw, *(hw->mvals + 23UL)); i2cctl = tmp; timeout = 10U; ack = 1; ixgbe_raise_i2c_clk(hw, & i2cctl); __const_udelay(17180UL); i = 0U; goto ldv_55851; ldv_55850: i2cctl = ixgbe_read_reg(hw, *(hw->mvals + 23UL)); ack = ixgbe_get_i2c_data(hw, & i2cctl); __const_udelay(4295UL); if (! ack) { goto ldv_55849; } else { } i = i + 1U; ldv_55851: ; if (i < timeout) { goto ldv_55850; } else { } ldv_55849: ; if ((int )ack) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_get_i2c_ack"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "I2C ack was not received.\n"; descriptor.lineno = 1927U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "I2C ack was not received.\n"); } else { } status = -18; } else { } ixgbe_lower_i2c_clk(hw, & i2cctl); __const_udelay(21475UL); return (status); } } static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw , bool *data ) { u32 i2cctl ; u32 tmp ; { tmp = ixgbe_read_reg(hw, *(hw->mvals + 23UL)); i2cctl = tmp; ixgbe_raise_i2c_clk(hw, & i2cctl); __const_udelay(17180UL); i2cctl = ixgbe_read_reg(hw, *(hw->mvals + 23UL)); *data = ixgbe_get_i2c_data(hw, & i2cctl); ixgbe_lower_i2c_clk(hw, & i2cctl); __const_udelay(21475UL); return (0); } } static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw , bool data ) { s32 status ; u32 i2cctl ; u32 tmp ; struct _ddebug descriptor ; long tmp___0 ; { tmp = ixgbe_read_reg(hw, *(hw->mvals + 23UL)); i2cctl = tmp; status = ixgbe_set_i2c_data(hw, & i2cctl, (int )data); if (status == 0) { ixgbe_raise_i2c_clk(hw, & i2cctl); __const_udelay(17180UL); ixgbe_lower_i2c_clk(hw, & i2cctl); __const_udelay(21475UL); } else { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_clock_out_i2c_bit"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "I2C data was not set to %X\n"; descriptor.lineno = 1992U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "I2C data was not set to %X\n", (int )data); } else { } return (-18); } return (0); } } static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw , u32 *i2cctl ) { u32 i ; u32 timeout ; u32 i2cctl_r ; { i = 0U; timeout = 500U; i2cctl_r = 0U; i = 0U; goto ldv_55876; ldv_55875: *i2cctl = *i2cctl | (u32 )*(hw->mvals + 17UL); ixgbe_write_reg(hw, *(hw->mvals + 23UL), *i2cctl); ixgbe_read_reg(hw, 8U); __const_udelay(4295UL); i2cctl_r = ixgbe_read_reg(hw, *(hw->mvals + 23UL)); if (((u32 )*(hw->mvals + 16UL) & i2cctl_r) != 0U) { goto ldv_55874; } else { } i = i + 1U; ldv_55876: ; if (i < timeout) { goto ldv_55875; } else { } ldv_55874: ; return; } } static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw , u32 *i2cctl ) { { *i2cctl = *i2cctl & (u32 )(~ *(hw->mvals + 17UL)); ixgbe_write_reg(hw, *(hw->mvals + 23UL), *i2cctl); ixgbe_read_reg(hw, 8U); __const_udelay(4295UL); return; } } static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw , u32 *i2cctl , bool data ) { struct _ddebug descriptor ; long tmp ; bool tmp___0 ; { if ((int )data) { *i2cctl = *i2cctl | (u32 )*(hw->mvals + 19UL); } else { *i2cctl = *i2cctl & (u32 )(~ *(hw->mvals + 19UL)); } ixgbe_write_reg(hw, *(hw->mvals + 23UL), *i2cctl); ixgbe_read_reg(hw, 8U); __const_udelay(12885UL); *i2cctl = ixgbe_read_reg(hw, *(hw->mvals + 23UL)); tmp___0 = ixgbe_get_i2c_data(hw, i2cctl); if ((int )tmp___0 != (int )data) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_set_i2c_data"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c"; descriptor.format = "Error - I2C data was not set to %X.\n"; descriptor.lineno = 2067U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Error - I2C data was not set to %X.\n", (int )data); } else { } return (-18); } else { } return (0); } } static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw , u32 *i2cctl ) { { if ((*i2cctl & (u32 )*(hw->mvals + 18UL)) != 0U) { return (1); } else { } return (0); } } static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw ) { u32 i2cctl ; u32 tmp ; u32 i ; { tmp = ixgbe_read_reg(hw, *(hw->mvals + 23UL)); i2cctl = tmp; ixgbe_i2c_start(hw); ixgbe_set_i2c_data(hw, & i2cctl, 1); i = 0U; goto ldv_55898; ldv_55897: ixgbe_raise_i2c_clk(hw, & i2cctl); __const_udelay(17180UL); ixgbe_lower_i2c_clk(hw, & i2cctl); __const_udelay(21475UL); i = i + 1U; ldv_55898: ; if (i <= 8U) { goto ldv_55897; } else { } ixgbe_i2c_start(hw); ixgbe_i2c_stop(hw); return; } } s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw ) { u16 phy_data ; { phy_data = 0U; if ((unsigned int )hw->device_id != 5404U) { return (0); } else { } (*(hw->phy.ops.read_reg))(hw, 36869U, 1U, & phy_data); if (((int )phy_data & 8) == 0) { return (0); } else { } return (-26); } } s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw , bool on ) { u32 status ; u16 reg ; enum ixgbe_media_type tmp ; s32 tmp___0 ; bool tmp___1 ; s32 tmp___2 ; { tmp = (*(hw->mac.ops.get_media_type))(hw); if ((unsigned int )tmp != 4U) { return (0); } else { } tmp___0 = (*(hw->phy.ops.read_reg))(hw, 0U, 30U, & reg); status = (u32 )tmp___0; if (status != 0U) { return ((s32 )status); } else { } if ((int )on) { reg = (unsigned int )reg & 63487U; } else { tmp___1 = ixgbe_check_reset_blocked(hw); if ((int )tmp___1) { return (0); } else { } reg = (u16 )((unsigned int )reg | 2048U); } tmp___2 = (*(hw->phy.ops.write_reg))(hw, 0U, 30U, (int )reg); status = (u32 )tmp___2; return ((s32 )status); } } bool ldv_queue_work_on_301(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_302(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_303(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_304(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_305(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_311(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_317(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_319(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_321(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_322(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_323(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_324(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_325(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_326(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_327(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_328(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern void __list_add(struct list_head * , struct list_head * , struct list_head * ) ; __inline static void list_add(struct list_head *new , struct list_head *head ) { { __list_add(new, head, head->next); return; } } bool ldv_queue_work_on_348(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_350(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_349(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_352(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_351(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_358(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_375(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; __inline static void *kcalloc(size_t n , size_t size , gfp_t flags ) ; extern int pci_enable_sriov(struct pci_dev * , int ) ; extern void pci_disable_sriov(struct pci_dev * ) ; extern int pci_num_vf(struct pci_dev * ) ; extern int pci_vfs_assigned(struct pci_dev * ) ; struct sk_buff *ldv_skb_clone_366(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_374(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_368(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_364(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_372(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_373(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_369(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_370(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_371(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; s32 ixgbe_read_mbx(struct ixgbe_hw *hw , u32 *msg , u16 size , u16 mbx_id ) ; s32 ixgbe_write_mbx(struct ixgbe_hw *hw , u32 *msg , u16 size , u16 mbx_id ) ; s32 ixgbe_check_for_msg(struct ixgbe_hw *hw , u16 mbx_id ) ; s32 ixgbe_check_for_ack(struct ixgbe_hw *hw , u16 mbx_id ) ; s32 ixgbe_check_for_rst(struct ixgbe_hw *hw , u16 mbx_id ) ; __inline static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter , u16 vid , u16 qos , u32 vf ) { struct ixgbe_hw *hw ; u32 vmvir ; { hw = & adapter->hw; vmvir = (u32 )(((int )vid | ((int )qos << 13)) | 1073741824); ixgbe_write_reg(hw, (vf + 8192U) * 4U, vmvir); return; } } static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int num_vf_macvlans ; int i ; struct vf_macvlans *mv_list ; void *tmp ; void *tmp___0 ; { hw = & adapter->hw; adapter->flags = adapter->flags | 8388608U; if (((int )adapter->msg_enable & 2) != 0) { netdev_info((struct net_device const *)adapter->netdev, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); } else { } adapter->flags = adapter->flags | 16384U; if ((unsigned int )adapter->ring_feature[1].limit == 0U) { adapter->ring_feature[1].limit = 1U; } else { } adapter->ring_feature[1].offset = (u16 )adapter->num_vfs; num_vf_macvlans = (int )((hw->mac.num_rar_entries - adapter->num_vfs) - 16U); tmp = kcalloc((size_t )num_vf_macvlans, 32UL, 208U); mv_list = (struct vf_macvlans *)tmp; adapter->mv_list = mv_list; if ((unsigned long )mv_list != (unsigned long )((struct vf_macvlans *)0)) { INIT_LIST_HEAD(& adapter->vf_mvs.l); i = 0; goto ldv_55662; ldv_55661: mv_list->vf = -1; mv_list->free = 1; list_add(& mv_list->l, & adapter->vf_mvs.l); mv_list = mv_list + 1; i = i + 1; ldv_55662: ; if (i < num_vf_macvlans) { goto ldv_55661; } else { } } else { } ixgbe_write_reg(hw, 33312U, 1U); adapter->bridge_mode = 0U; tmp___0 = kcalloc((size_t )adapter->num_vfs, 88UL, 208U); adapter->vfinfo = (struct vf_data_storage *)tmp___0; if ((unsigned long )adapter->vfinfo != (unsigned long )((struct vf_data_storage *)0)) { if ((unsigned int )adapter->hw.mac.type == 2U && adapter->num_vfs <= 15U) { adapter->dcb_cfg.num_tcs.pg_tcs = 8U; adapter->dcb_cfg.num_tcs.pfc_tcs = 8U; } else if (adapter->num_vfs <= 31U) { adapter->dcb_cfg.num_tcs.pg_tcs = 4U; adapter->dcb_cfg.num_tcs.pfc_tcs = 4U; } else { adapter->dcb_cfg.num_tcs.pg_tcs = 1U; adapter->dcb_cfg.num_tcs.pfc_tcs = 1U; } adapter->flags2 = adapter->flags2 & 4294967292U; i = 0; goto ldv_55665; ldv_55664: (adapter->vfinfo + (unsigned long )i)->spoofchk_enabled = 1U; (adapter->vfinfo + (unsigned long )i)->rss_query_enabled = 0; i = i + 1; ldv_55665: ; if ((unsigned int )i < adapter->num_vfs) { goto ldv_55664; } else { } return (0); } else { } return (-12); } } void ixgbe_enable_sriov(struct ixgbe_adapter *adapter ) { int pre_existing_vfs ; int err ; unsigned int __min1 ; unsigned int __min2 ; int tmp ; { pre_existing_vfs = 0; pre_existing_vfs = pci_num_vf(adapter->pdev); if (pre_existing_vfs == 0 && adapter->num_vfs == 0U) { return; } else { } if (pre_existing_vfs != 0) { adapter->num_vfs = (unsigned int )pre_existing_vfs; dev_warn((struct device const *)(& (adapter->pdev)->dev), "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); } else { __min1 = adapter->num_vfs; __min2 = 63U; adapter->num_vfs = __min1 < __min2 ? __min1 : __min2; err = pci_enable_sriov(adapter->pdev, (int )adapter->num_vfs); if (err != 0) { if (((int )adapter->msg_enable & 2) != 0) { netdev_err((struct net_device const *)adapter->netdev, "Failed to enable PCI sriov: %d\n", err); } else { } adapter->num_vfs = 0U; return; } else { } } tmp = __ixgbe_enable_sriov(adapter); if (tmp == 0) { return; } else { } if (((int )adapter->msg_enable & 2) != 0) { netdev_err((struct net_device const *)adapter->netdev, "Unable to allocate memory for VF Data Storage - SRIOV disabled\n"); } else { } ixgbe_disable_sriov(adapter); return; } } int ixgbe_disable_sriov(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 gpie ; u32 vmdctl ; int rss ; int tmp ; int __min1 ; u8 tmp___0 ; int __min2 ; unsigned int tmp___1 ; int __min1___0 ; int __min2___0 ; unsigned int tmp___2 ; { hw = & adapter->hw; adapter->num_vfs = 0U; kfree((void const *)adapter->vfinfo); adapter->vfinfo = (struct vf_data_storage *)0; kfree((void const *)adapter->mv_list); adapter->mv_list = (struct vf_macvlans *)0; if ((adapter->flags & 8388608U) == 0U) { return (0); } else { } tmp = pci_vfs_assigned(adapter->pdev); if (tmp != 0) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); return (-1); } else { } pci_disable_sriov(adapter->pdev); ixgbe_write_reg(hw, 69712U, 0U); gpie = ixgbe_read_reg(hw, 2200U); gpie = gpie & 4294918143U; ixgbe_write_reg(hw, 2200U, gpie); vmdctl = ixgbe_read_reg(hw, 20912U); vmdctl = vmdctl & 4294959231U; ixgbe_write_reg(hw, 20912U, vmdctl); ixgbe_read_reg(hw, 8U); if ((unsigned int )adapter->ring_feature[1].limit == 1U) { adapter->flags = adapter->flags & 4294950911U; adapter->flags = adapter->flags & 4286578687U; tmp___0 = ixgbe_max_rss_indices(adapter); __min1 = (int )tmp___0; tmp___1 = cpumask_weight(cpu_online_mask); __min2 = (int )tmp___1; rss = __min1 < __min2 ? __min1 : __min2; } else { __min1___0 = 4; tmp___2 = cpumask_weight(cpu_online_mask); __min2___0 = (int )tmp___2; rss = __min1___0 < __min2___0 ? __min1___0 : __min2___0; } adapter->ring_feature[1].offset = 0U; adapter->ring_feature[2].limit = (u16 )rss; msleep(100U); return (0); } } static int ixgbe_pci_sriov_enable(struct pci_dev *dev , int num_vfs ) { struct ixgbe_adapter *adapter ; void *tmp ; int err ; int i ; int pre_existing_vfs ; int tmp___0 ; { tmp = pci_get_drvdata(dev); adapter = (struct ixgbe_adapter *)tmp; err = 0; tmp___0 = pci_num_vf(dev); pre_existing_vfs = tmp___0; if (pre_existing_vfs != 0 && pre_existing_vfs != num_vfs) { err = ixgbe_disable_sriov(adapter); } else if (pre_existing_vfs != 0 && pre_existing_vfs == num_vfs) { return (num_vfs); } else { } if (err != 0) { return (err); } else { } if (adapter->num_rx_pools + num_vfs > 64) { return (-1); } else { } adapter->num_vfs = (unsigned int )num_vfs; err = __ixgbe_enable_sriov(adapter); if (err != 0) { return (err); } else { } i = 0; goto ldv_55697; ldv_55696: ixgbe_vf_configuration(dev, (unsigned int )(i | 268435456)); i = i + 1; ldv_55697: ; if ((unsigned int )i < adapter->num_vfs) { goto ldv_55696; } else { } err = pci_enable_sriov(dev, num_vfs); if (err != 0) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "Failed to enable PCI sriov: %d\n", err); return (err); } else { } ixgbe_sriov_reinit(adapter); return (num_vfs); } } static int ixgbe_pci_sriov_disable(struct pci_dev *dev ) { struct ixgbe_adapter *adapter ; void *tmp ; int err ; u32 current_flags ; { tmp = pci_get_drvdata(dev); adapter = (struct ixgbe_adapter *)tmp; current_flags = adapter->flags; err = ixgbe_disable_sriov(adapter); if (err == 0 && adapter->flags != current_flags) { ixgbe_sriov_reinit(adapter); } else { } return (err); } } int ixgbe_pci_sriov_configure(struct pci_dev *dev , int num_vfs ) { int tmp ; int tmp___0 ; { if (num_vfs == 0) { tmp = ixgbe_pci_sriov_disable(dev); return (tmp); } else { tmp___0 = ixgbe_pci_sriov_enable(dev, num_vfs); return (tmp___0); } } } static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter , u32 *msgbuf , u32 vf ) { int entries ; u16 *hash_list ; struct vf_data_storage *vfinfo ; struct ixgbe_hw *hw ; int i ; u32 vector_bit ; u32 vector_reg ; u32 mta_reg ; u32 vmolr ; u32 tmp ; int _min1 ; int _min2 ; { entries = (int )((*msgbuf & 16711680U) >> 16); hash_list = (u16 *)msgbuf + 1U; vfinfo = adapter->vfinfo + (unsigned long )vf; hw = & adapter->hw; tmp = ixgbe_read_reg(hw, (vf + 15360U) * 4U); vmolr = tmp; _min1 = entries; _min2 = 30; entries = _min1 < _min2 ? _min1 : _min2; vfinfo->num_vf_mc_hashes = (u16 )entries; i = 0; goto ldv_55727; ldv_55726: vfinfo->vf_mc_hashes[i] = *(hash_list + (unsigned long )i); i = i + 1; ldv_55727: ; if (i < entries) { goto ldv_55726; } else { } i = 0; goto ldv_55730; ldv_55729: vector_reg = (u32 )((int )vfinfo->vf_mc_hashes[i] >> 5) & 127U; vector_bit = (u32 )vfinfo->vf_mc_hashes[i] & 31U; mta_reg = ixgbe_read_reg(hw, (vector_reg + 5248U) * 4U); mta_reg = (u32 )(1 << (int )vector_bit) | mta_reg; ixgbe_write_reg(hw, (vector_reg + 5248U) * 4U, mta_reg); i = i + 1; ldv_55730: ; if ((int )vfinfo->num_vf_mc_hashes > i) { goto ldv_55729; } else { } vmolr = vmolr | 33554432U; ixgbe_write_reg(hw, (vf + 15360U) * 4U, vmolr); return (0); } } void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; struct vf_data_storage *vfinfo ; int i ; int j ; u32 vector_bit ; u32 vector_reg ; u32 mta_reg ; u32 vmolr ; u32 tmp ; { hw = & adapter->hw; i = 0; goto ldv_55747; ldv_55746: tmp = ixgbe_read_reg(hw, (u32 )((i + 15360) * 4)); vmolr = tmp; vfinfo = adapter->vfinfo + (unsigned long )i; j = 0; goto ldv_55744; ldv_55743: hw->addr_ctrl.mta_in_use = hw->addr_ctrl.mta_in_use + 1U; vector_reg = (u32 )((int )vfinfo->vf_mc_hashes[j] >> 5) & 127U; vector_bit = (u32 )vfinfo->vf_mc_hashes[j] & 31U; mta_reg = ixgbe_read_reg(hw, (vector_reg + 5248U) * 4U); mta_reg = (u32 )(1 << (int )vector_bit) | mta_reg; ixgbe_write_reg(hw, (vector_reg + 5248U) * 4U, mta_reg); j = j + 1; ldv_55744: ; if ((int )vfinfo->num_vf_mc_hashes > j) { goto ldv_55743; } else { } if ((unsigned int )vfinfo->num_vf_mc_hashes != 0U) { vmolr = vmolr | 33554432U; } else { vmolr = vmolr & 4261412863U; } ixgbe_write_reg(hw, (u32 )((i + 15360) * 4), vmolr); i = i + 1; ldv_55747: ; if ((unsigned int )i < adapter->num_vfs) { goto ldv_55746; } else { } ixgbe_full_sync_mac_table(adapter); return; } } static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter , int add , int vid , u32 vf ) { s32 tmp ; { if (vid == 0 && add == 0) { return (0); } else { } tmp = (*(adapter->hw.mac.ops.set_vfta))(& adapter->hw, (u32 )vid, vf, add != 0); return (tmp); } } static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter , u32 *msgbuf , u32 vf ) { struct ixgbe_hw *hw ; int max_frame ; u32 max_frs ; struct net_device *dev ; int pf_max_frame ; u32 reg_offset ; u32 vf_shift ; u32 vfre ; s32 err ; { hw = & adapter->hw; max_frame = (int )*(msgbuf + 1UL); if ((unsigned int )adapter->hw.mac.type == 2U) { dev = adapter->netdev; pf_max_frame = (int )(dev->mtu + 14U); err = 0; switch ((adapter->vfinfo + (unsigned long )vf)->vf_api) { case 2U: ; case 3U: ; if (pf_max_frame > 1514) { goto ldv_55771; } else { } default: ; if (pf_max_frame > 1514 || max_frame > 1518) { err = -22; } else { } goto ldv_55771; } ldv_55771: vf_shift = vf & 31U; reg_offset = vf / 32U; vfre = ixgbe_read_reg(hw, (reg_offset + 5240U) * 4U); if (err != 0) { vfre = (u32 )(~ (1 << (int )vf_shift)) & vfre; } else { vfre = (u32 )(1 << (int )vf_shift) | vfre; } ixgbe_write_reg(hw, (reg_offset + 5240U) * 4U, vfre); if (err != 0) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "VF max_frame %d out of range\n", max_frame); } else { } return (err); } else { } } else { } if (max_frame > 9728) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "VF max_frame %d out of range\n", max_frame); } else { } return (-22); } else { } max_frs = ixgbe_read_reg(hw, 17000U); max_frs = max_frs & 4294901760U; max_frs = max_frs >> 16; if ((u32 )max_frame > max_frs) { max_frs = (u32 )(max_frame << 16); ixgbe_write_reg(hw, 17000U, max_frs); } else { } if (((int )adapter->msg_enable & 8192) != 0) { netdev_info((struct net_device const *)adapter->netdev, "VF requests change max MTU to %d\n", max_frame); } else { } return (0); } } static void ixgbe_set_vmolr(struct ixgbe_hw *hw , u32 vf , bool aupe ) { u32 vmolr ; u32 tmp ; { tmp = ixgbe_read_reg(hw, (vf + 15360U) * 4U); vmolr = tmp; vmolr = vmolr | 134217728U; if ((int )aupe) { vmolr = vmolr | 16777216U; } else { vmolr = vmolr & 4278190079U; } ixgbe_write_reg(hw, (vf + 15360U) * 4U, vmolr); return; } } static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter , u32 vf ) { struct ixgbe_hw *hw ; { hw = & adapter->hw; ixgbe_write_reg(hw, (vf + 8192U) * 4U, 0U); return; } } __inline static void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter , u32 vf ) { struct ixgbe_hw *hw ; struct vf_data_storage *vfinfo ; u8 num_tcs ; int tmp ; { hw = & adapter->hw; vfinfo = adapter->vfinfo + (unsigned long )vf; tmp = netdev_get_num_tc(adapter->netdev); num_tcs = (u8 )tmp; ixgbe_set_vf_vlan(adapter, 1, (int )vfinfo->pf_vlan, vf); ixgbe_set_vmolr(hw, vf, (unsigned int )vfinfo->pf_vlan == 0U); if (((unsigned int )vfinfo->pf_vlan == 0U && (unsigned int )vfinfo->pf_qos == 0U) && (unsigned int )num_tcs == 0U) { ixgbe_clear_vmvir(adapter, vf); } else { if ((unsigned int )vfinfo->pf_qos != 0U || (unsigned int )num_tcs == 0U) { ixgbe_set_vmvir(adapter, (int )vfinfo->pf_vlan, (int )vfinfo->pf_qos, vf); } else { ixgbe_set_vmvir(adapter, (int )vfinfo->pf_vlan, (int )adapter->default_up, vf); } if ((unsigned int )vfinfo->spoofchk_enabled != 0U) { (*(hw->mac.ops.set_vlan_anti_spoofing))(hw, 1, (int )vf); } else { } } (adapter->vfinfo + (unsigned long )vf)->num_vf_mc_hashes = 0U; ixgbe_set_rx_mode(adapter->netdev); ixgbe_del_mac_filter(adapter, (u8 *)(& (adapter->vfinfo + (unsigned long )vf)->vf_mac_addresses), (int )((u16 )vf)); (adapter->vfinfo + (unsigned long )vf)->vf_api = 0U; return; } } static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter , int vf , unsigned char *mac_addr ) { { ixgbe_del_mac_filter(adapter, (u8 *)(& (adapter->vfinfo + (unsigned long )vf)->vf_mac_addresses), (int )((u16 )vf)); memcpy((void *)(& (adapter->vfinfo + (unsigned long )vf)->vf_mac_addresses), (void const *)mac_addr, 6UL); ixgbe_add_mac_filter(adapter, (u8 *)(& (adapter->vfinfo + (unsigned long )vf)->vf_mac_addresses), (int )((u16 )vf)); return (0); } } static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter , int vf , int index , unsigned char *mac_addr ) { struct list_head *pos ; struct vf_macvlans *entry ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { if (index <= 1) { pos = adapter->vf_mvs.l.next; goto ldv_55807; ldv_55806: __mptr = (struct list_head const *)pos; entry = (struct vf_macvlans *)__mptr; if (entry->vf == vf) { entry->vf = -1; entry->free = 1; entry->is_macvlan = 0; ixgbe_del_mac_filter(adapter, (u8 *)(& entry->vf_macvlan), (int )((u16 )vf)); } else { } pos = pos->next; ldv_55807: ; if ((unsigned long )(& adapter->vf_mvs.l) != (unsigned long )pos) { goto ldv_55806; } else { } } else { } if (index == 0) { return (0); } else { } entry = (struct vf_macvlans *)0; pos = adapter->vf_mvs.l.next; goto ldv_55813; ldv_55812: __mptr___0 = (struct list_head const *)pos; entry = (struct vf_macvlans *)__mptr___0; if ((int )entry->free) { goto ldv_55811; } else { } pos = pos->next; ldv_55813: ; if ((unsigned long )(& adapter->vf_mvs.l) != (unsigned long )pos) { goto ldv_55812; } else { } ldv_55811: ; if ((unsigned long )entry == (unsigned long )((struct vf_macvlans *)0) || ! entry->free) { return (-28); } else { } entry->free = 0; entry->is_macvlan = 1; entry->vf = vf; memcpy((void *)(& entry->vf_macvlan), (void const *)mac_addr, 6UL); ixgbe_add_mac_filter(adapter, mac_addr, (int )((u16 )vf)); return (0); } } int ixgbe_vf_configuration(struct pci_dev *pdev , unsigned int event_mask ) { struct ixgbe_adapter *adapter ; void *tmp ; unsigned int vfn ; bool enable ; { tmp = pci_get_drvdata(pdev); adapter = (struct ixgbe_adapter *)tmp; vfn = event_mask & 63U; enable = (event_mask & 268435456U) != 0U; if ((int )enable) { eth_zero_addr((u8 *)(& (adapter->vfinfo + (unsigned long )vfn)->vf_mac_addresses)); } else { } return (0); } } __inline static void ixgbe_write_qde(struct ixgbe_adapter *adapter , u32 vf , u32 qde ) { struct ixgbe_hw *hw ; struct ixgbe_ring_feature *vmdq ; u32 q_per_pool ; int i ; u32 reg ; { hw = & adapter->hw; vmdq = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 1UL; q_per_pool = (u32 )(- ((int )vmdq->mask) & (int )vmdq->mask); i = (int )(vf * q_per_pool); goto ldv_55832; ldv_55831: ixgbe_read_reg(hw, 8U); reg = 65537U; reg = (u32 )(i << 8) | reg; ixgbe_write_reg(hw, 12036U, reg); i = i + 1; ldv_55832: ; if ((u32 )i < (vf + 1U) * q_per_pool) { goto ldv_55831; } else { } return; } } static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter , u32 vf ) { struct ixgbe_ring_feature *vmdq ; struct ixgbe_hw *hw ; unsigned char *vf_mac ; u32 reg ; u32 reg_offset ; u32 vf_shift ; u32 msgbuf[4U] ; u8 *addr ; u32 q_per_pool ; int i ; bool tmp ; int tmp___0 ; struct net_device *dev ; int pf_max_frame ; bool tmp___1 ; int tmp___2 ; { vmdq = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 1UL; hw = & adapter->hw; vf_mac = (unsigned char *)(& (adapter->vfinfo + (unsigned long )vf)->vf_mac_addresses); msgbuf[0] = 0U; msgbuf[1] = 0U; msgbuf[2] = 0U; msgbuf[3] = 0U; addr = (u8 *)(& msgbuf) + 1U; q_per_pool = (u32 )(- ((int )vmdq->mask) & (int )vmdq->mask); if (((int )adapter->msg_enable & 2) != 0) { netdev_info((struct net_device const *)adapter->netdev, "VF Reset msg received from vf %d\n", vf); } else { } ixgbe_vf_reset_event(adapter, vf); tmp = is_zero_ether_addr((u8 const *)vf_mac); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { ixgbe_set_vf_mac(adapter, (int )vf, vf_mac); } else { } vf_shift = vf & 31U; reg_offset = vf / 32U; reg = ixgbe_read_reg(hw, (reg_offset + 8260U) * 4U); reg = (u32 )(1 << (int )vf_shift) | reg; ixgbe_write_reg(hw, (reg_offset + 8260U) * 4U, reg); ixgbe_write_qde(adapter, vf, 1U); reg = ixgbe_read_reg(hw, (reg_offset + 5240U) * 4U); reg = (u32 )(1 << (int )vf_shift) | reg; if ((unsigned int )adapter->hw.mac.type == 2U) { dev = adapter->netdev; pf_max_frame = (int )(dev->mtu + 14U); if (pf_max_frame > 1514) { reg = (u32 )(~ (1 << (int )vf_shift)) & reg; } else { } } else { } ixgbe_write_reg(hw, (reg_offset + 5240U) * 4U, reg); (adapter->vfinfo + (unsigned long )vf)->clear_to_send = 1; reg = ixgbe_read_reg(hw, (reg_offset + 8676U) * 4U); reg = (u32 )(1 << (int )vf_shift) | reg; ixgbe_write_reg(hw, (reg_offset + 8676U) * 4U, reg); i = 0; goto ldv_55851; ldv_55850: ixgbe_write_reg(hw, (q_per_pool * vf + (u32 )i) * 64U + 24636U, 0U); ixgbe_write_reg(hw, (q_per_pool * vf + (u32 )i) * 64U + 24632U, 0U); i = i + 1; ldv_55851: ; if ((u32 )i < q_per_pool) { goto ldv_55850; } else { } msgbuf[0] = 1U; tmp___1 = is_zero_ether_addr((u8 const *)vf_mac); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { msgbuf[0] = msgbuf[0] | 2147483648U; memcpy((void *)addr, (void const *)vf_mac, 6UL); } else { msgbuf[0] = msgbuf[0] | 1073741824U; dev_warn((struct device const *)(& (adapter->pdev)->dev), "VF %d has no MAC address assigned, you may have to assign one manually\n", vf); } msgbuf[3] = (u32 )hw->mac.mc_filter_type; ixgbe_write_mbx(hw, (u32 *)(& msgbuf), 4, (int )((u16 )vf)); return (0); } } static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter , u32 *msgbuf , u32 vf ) { u8 *new_mac ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; int tmp___3 ; { new_mac = (u8 *)msgbuf + 1U; tmp = is_valid_ether_addr((u8 const *)new_mac); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "VF %d attempted to set invalid mac\n", vf); } else { } return (-1); } else { } if ((int )(adapter->vfinfo + (unsigned long )vf)->pf_set_mac) { tmp___1 = ether_addr_equal((u8 const *)(& (adapter->vfinfo + (unsigned long )vf)->vf_mac_addresses), (u8 const *)new_mac); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", vf); } else { } return (-1); } else { } } else { } tmp___3 = ixgbe_set_vf_mac(adapter, (int )vf, new_mac); return (tmp___3 < 0); } } static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw , u32 vlan ) { u32 vlvf ; s32 regindex ; { if (vlan == 0U) { return (0); } else { } regindex = 1; goto ldv_55867; ldv_55866: vlvf = ixgbe_read_reg(hw, (u32 )((regindex + 15424) * 4)); if ((vlvf & 4095U) == vlan) { goto ldv_55865; } else { } regindex = regindex + 1; ldv_55867: ; if (regindex <= 63) { goto ldv_55866; } else { } ldv_55865: ; if (regindex > 63) { regindex = -1; } else { } return (regindex); } } static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter , u32 *msgbuf , u32 vf ) { struct ixgbe_hw *hw ; int add ; int vid ; int err ; s32 reg_ndx ; u32 vlvf ; u32 bits ; u8 tcs ; int tmp ; u32 tmp___0 ; u32 tmp___1 ; int tmp___2 ; { hw = & adapter->hw; add = (int )((*msgbuf & 16711680U) >> 16); vid = (int )*(msgbuf + 1UL) & 4095; tmp = netdev_get_num_tc(adapter->netdev); tcs = (u8 )tmp; if ((unsigned int )(adapter->vfinfo + (unsigned long )vf)->pf_vlan != 0U || (unsigned int )tcs != 0U) { if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "VF %d attempted to override administratively set VLAN configuration\nReload the VF driver to resume operations\n", vf); } else { } return (-1); } else { } if (add != 0) { (adapter->vfinfo + (unsigned long )vf)->vlan_count = (u16 )((int )(adapter->vfinfo + (unsigned long )vf)->vlan_count + 1); } else if ((unsigned int )(adapter->vfinfo + (unsigned long )vf)->vlan_count != 0U) { (adapter->vfinfo + (unsigned long )vf)->vlan_count = (u16 )((int )(adapter->vfinfo + (unsigned long )vf)->vlan_count - 1); } else { } if (add != 0 && ((adapter->netdev)->flags & 256U) != 0U) { err = ixgbe_set_vf_vlan(adapter, add, vid, (u32 )adapter->ring_feature[1].offset); } else { } err = ixgbe_set_vf_vlan(adapter, add, vid, vf); if (err == 0 && (unsigned int )(adapter->vfinfo + (unsigned long )vf)->spoofchk_enabled != 0U) { (*(hw->mac.ops.set_vlan_anti_spoofing))(hw, 1, (int )vf); } else { } if (add == 0 && ((adapter->netdev)->flags & 256U) != 0U) { reg_ndx = ixgbe_find_vlvf_entry(hw, (u32 )vid); if (reg_ndx < 0) { return (err); } else { } vlvf = ixgbe_read_reg(hw, (u32 )((reg_ndx + 15424) * 4)); if ((int )adapter->ring_feature[1].offset <= 31) { bits = ixgbe_read_reg(hw, (u32 )((reg_ndx + 7744) * 8)); bits = (u32 )(~ (1 << (int )adapter->ring_feature[1].offset)) & bits; tmp___0 = ixgbe_read_reg(hw, (u32 )((reg_ndx + 7744) * 8 + 1)); bits = tmp___0 | bits; } else { bits = ixgbe_read_reg(hw, (u32 )((reg_ndx + 7744) * 8 + 1)); bits = (u32 )(~ (1 << ((int )adapter->ring_feature[1].offset + -32))) & bits; tmp___1 = ixgbe_read_reg(hw, (u32 )((reg_ndx + 7744) * 8)); bits = tmp___1 | bits; } if ((vlvf & 4095U) == (u32 )vid) { tmp___2 = variable_test_bit((long )vid, (unsigned long const volatile *)(& adapter->active_vlans)); if (tmp___2 == 0) { if (bits == 0U) { ixgbe_set_vf_vlan(adapter, add, vid, (u32 )adapter->ring_feature[1].offset); } else { } } else { } } else { } } else { } return (err); } } static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter , u32 *msgbuf , u32 vf ) { u8 *new_mac ; int index ; int err ; bool tmp ; int tmp___0 ; { new_mac = (u8 *)msgbuf + 1U; index = (int )((*msgbuf & 16711680U) >> 16); if ((int )(adapter->vfinfo + (unsigned long )vf)->pf_set_mac && index > 0) { if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "VF %d requested MACVLAN filter but is administratively denied\n", vf); } else { } return (-1); } else { } if (index != 0) { tmp = is_valid_ether_addr((u8 const *)new_mac); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "VF %d attempted to set invalid mac\n", vf); } else { } return (-1); } else { } if ((unsigned int )(adapter->vfinfo + (unsigned long )vf)->spoofchk_enabled != 0U) { ixgbe_ndo_set_vf_spoofchk(adapter->netdev, (int )vf, 0); } else { } } else { } err = ixgbe_set_vf_macvlan(adapter, (int )vf, index, new_mac); if (err == -28) { if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "VF %d has requested a MACVLAN filter but there is no space for it\n", vf); } else { } } else { } return (err < 0); } } static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter , u32 *msgbuf , u32 vf ) { int api ; { api = (int )*(msgbuf + 1UL); switch (api) { case 0: ; case 2: ; case 3: (adapter->vfinfo + (unsigned long )vf)->vf_api = (unsigned int )api; return (0); default: ; goto ldv_55899; } ldv_55899: ; if ((int )adapter->msg_enable & 1) { netdev_info((struct net_device const *)adapter->netdev, "VF %d requested invalid api version %u\n", vf, api); } else { } return (-1); } } static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter , u32 *msgbuf , u32 vf ) { struct net_device *dev ; struct ixgbe_ring_feature *vmdq ; unsigned int default_tc ; u8 num_tcs ; int tmp ; int tmp___0 ; { dev = adapter->netdev; vmdq = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 1UL; default_tc = 0U; tmp = netdev_get_num_tc(dev); num_tcs = (u8 )tmp; switch ((adapter->vfinfo + (unsigned long )vf)->vf_api) { case 1U: ; case 2U: ; case 3U: ; goto ldv_55912; default: ; return (-1); } ldv_55912: *(msgbuf + 1UL) = (u32 )(- ((int )vmdq->mask) & (int )vmdq->mask); *(msgbuf + 2UL) = (u32 )(- ((int )vmdq->mask) & (int )vmdq->mask); if ((unsigned int )num_tcs > 1U) { tmp___0 = netdev_get_prio_tc_map((struct net_device const *)dev, (u32 )adapter->default_up); default_tc = (unsigned int )tmp___0; } else { } if ((unsigned int )num_tcs != 0U) { *(msgbuf + 3UL) = (u32 )num_tcs; } else if ((unsigned int )(adapter->vfinfo + (unsigned long )vf)->pf_vlan != 0U || (unsigned int )(adapter->vfinfo + (unsigned long )vf)->pf_qos != 0U) { *(msgbuf + 3UL) = 1U; } else { *(msgbuf + 3UL) = 0U; } *(msgbuf + 4UL) = default_tc; return (0); } } static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter , u32 *msgbuf , u32 vf ) { u32 i ; u32 j ; u32 *out_buf ; u8 const *reta ; u32 reta_size ; u32 tmp ; { out_buf = msgbuf + 1UL; reta = (u8 const *)(& adapter->rss_indir_tbl); tmp = ixgbe_rss_indir_tbl_entries(adapter); reta_size = tmp; if (! (adapter->vfinfo + (unsigned long )vf)->rss_query_enabled) { return (-1); } else { } if ((adapter->vfinfo + (unsigned long )vf)->vf_api != 3U) { return (-95); } else { } i = 0U; goto ldv_55928; ldv_55927: *(out_buf + (unsigned long )i) = 0U; j = 0U; goto ldv_55925; ldv_55924: *(out_buf + (unsigned long )i) = *(out_buf + (unsigned long )i) | (((unsigned int )*(reta + (unsigned long )(i * 16U + j)) & 3U) << (int )(j * 2U)); j = j + 1U; ldv_55925: ; if (j <= 15U) { goto ldv_55924; } else { } i = i + 1U; ldv_55928: ; if (reta_size / 16U > i) { goto ldv_55927; } else { } return (0); } } static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter , u32 *msgbuf , u32 vf ) { u32 *rss_key ; { rss_key = msgbuf + 1UL; if (! (adapter->vfinfo + (unsigned long )vf)->rss_query_enabled) { return (-1); } else { } if ((adapter->vfinfo + (unsigned long )vf)->vf_api != 3U) { return (-95); } else { } memcpy((void *)rss_key, (void const *)(& adapter->rss_key), 40UL); return (0); } } static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter , u32 vf ) { u32 mbx_size ; u32 msgbuf[16U] ; struct ixgbe_hw *hw ; s32 retval ; int tmp ; { mbx_size = 16U; hw = & adapter->hw; retval = ixgbe_read_mbx(hw, (u32 *)(& msgbuf), (int )((u16 )mbx_size), (int )((u16 )vf)); if (retval != 0) { printk("\vixgbe: Error receiving message from VF\n"); return (retval); } else { } if ((msgbuf[0] & 3221225472U) != 0U) { return (0); } else { } ixgbe_read_reg(hw, 8U); if (msgbuf[0] == 1U) { tmp = ixgbe_vf_reset_msg(adapter, vf); return (tmp); } else { } if (! (adapter->vfinfo + (unsigned long )vf)->clear_to_send) { msgbuf[0] = msgbuf[0] | 1073741824U; ixgbe_write_mbx(hw, (u32 *)(& msgbuf), 1, (int )((u16 )vf)); return (0); } else { } switch (msgbuf[0] & 65535U) { case 2U: retval = ixgbe_set_vf_mac_addr(adapter, (u32 *)(& msgbuf), vf); goto ldv_55945; case 3U: retval = ixgbe_set_vf_multicasts(adapter, (u32 *)(& msgbuf), vf); goto ldv_55945; case 4U: retval = ixgbe_set_vf_vlan_msg(adapter, (u32 *)(& msgbuf), vf); goto ldv_55945; case 5U: retval = ixgbe_set_vf_lpe(adapter, (u32 *)(& msgbuf), vf); goto ldv_55945; case 6U: retval = ixgbe_set_vf_macvlan_msg(adapter, (u32 *)(& msgbuf), vf); goto ldv_55945; case 8U: retval = ixgbe_negotiate_vf_api(adapter, (u32 *)(& msgbuf), vf); goto ldv_55945; case 9U: retval = ixgbe_get_vf_queues(adapter, (u32 *)(& msgbuf), vf); goto ldv_55945; case 10U: retval = ixgbe_get_vf_reta(adapter, (u32 *)(& msgbuf), vf); goto ldv_55945; case 11U: retval = ixgbe_get_vf_rss_key(adapter, (u32 *)(& msgbuf), vf); goto ldv_55945; default: ; if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "Unhandled Msg %8.8x\n", msgbuf[0]); } else { } retval = -100; goto ldv_55945; } ldv_55945: ; if (retval != 0) { msgbuf[0] = msgbuf[0] | 1073741824U; } else { msgbuf[0] = msgbuf[0] | 2147483648U; } msgbuf[0] = msgbuf[0] | 536870912U; ixgbe_write_mbx(hw, (u32 *)(& msgbuf), (int )((u16 )mbx_size), (int )((u16 )vf)); return (retval); } } static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter , u32 vf ) { struct ixgbe_hw *hw ; u32 msg ; { hw = & adapter->hw; msg = 1073741824U; if (! (adapter->vfinfo + (unsigned long )vf)->clear_to_send) { ixgbe_write_mbx(hw, & msg, 1, (int )((u16 )vf)); } else { } return; } } void ixgbe_msg_task(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 vf ; s32 tmp ; s32 tmp___0 ; s32 tmp___1 ; { hw = & adapter->hw; vf = 0U; goto ldv_55967; ldv_55966: tmp = ixgbe_check_for_rst(hw, (int )((u16 )vf)); if (tmp == 0) { ixgbe_vf_reset_event(adapter, vf); } else { } tmp___0 = ixgbe_check_for_msg(hw, (int )((u16 )vf)); if (tmp___0 == 0) { ixgbe_rcv_msg_from_vf(adapter, vf); } else { } tmp___1 = ixgbe_check_for_ack(hw, (int )((u16 )vf)); if (tmp___1 == 0) { ixgbe_rcv_ack_from_vf(adapter, vf); } else { } vf = vf + 1U; ldv_55967: ; if (adapter->num_vfs > vf) { goto ldv_55966; } else { } return; } } void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; { hw = & adapter->hw; ixgbe_write_reg(hw, 33040U, 0U); ixgbe_write_reg(hw, 33044U, 0U); ixgbe_write_reg(hw, 20960U, 0U); ixgbe_write_reg(hw, 20964U, 0U); return; } } void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 ping ; int i ; { hw = & adapter->hw; i = 0; goto ldv_55980; ldv_55979: ping = 256U; if ((int )(adapter->vfinfo + (unsigned long )i)->clear_to_send) { ping = ping | 536870912U; } else { } ixgbe_write_mbx(hw, & ping, 1, (int )((u16 )i)); i = i + 1; ldv_55980: ; if ((unsigned int )i < adapter->num_vfs) { goto ldv_55979; } else { } return; } } int ixgbe_ndo_set_vf_mac(struct net_device *netdev , int vf , u8 *mac ) { struct ixgbe_adapter *adapter ; void *tmp ; bool tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; tmp___0 = is_valid_ether_addr((u8 const *)mac); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1 || (unsigned int )vf >= adapter->num_vfs) { return (-22); } else { } (adapter->vfinfo + (unsigned long )vf)->pf_set_mac = 1; _dev_info((struct device const *)(& (adapter->pdev)->dev), "setting MAC %pM on VF %d\n", mac, vf); _dev_info((struct device const *)(& (adapter->pdev)->dev), "Reload the VF driver to make this change effective."); tmp___2 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___2 != 0) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "The VF MAC address has been set, but the PF device is not up.\n"); dev_warn((struct device const *)(& (adapter->pdev)->dev), "Bring the PF device up before attempting to use the VF device.\n"); } else { } tmp___3 = ixgbe_set_vf_mac(adapter, vf, mac); return (tmp___3); } } static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter , int vf , u16 vlan , u8 qos ) { struct ixgbe_hw *hw ; int err ; int tmp ; { hw = & adapter->hw; err = ixgbe_set_vf_vlan(adapter, 1, (int )vlan, (u32 )vf); if (err != 0) { goto out; } else { } ixgbe_set_vmvir(adapter, (int )vlan, (int )qos, (u32 )vf); ixgbe_set_vmolr(hw, (u32 )vf, 0); if ((unsigned int )(adapter->vfinfo + (unsigned long )vf)->spoofchk_enabled != 0U) { (*(hw->mac.ops.set_vlan_anti_spoofing))(hw, 1, vf); } else { } (adapter->vfinfo + (unsigned long )vf)->vlan_count = (u16 )((int )(adapter->vfinfo + (unsigned long )vf)->vlan_count + 1); if ((unsigned int )hw->mac.type > 3U) { ixgbe_write_qde(adapter, (u32 )vf, 3U); } else { } (adapter->vfinfo + (unsigned long )vf)->pf_vlan = vlan; (adapter->vfinfo + (unsigned long )vf)->pf_qos = (u16 )qos; _dev_info((struct device const *)(& (adapter->pdev)->dev), "Setting VLAN %d, QOS 0x%x on VF %d\n", (int )vlan, (int )qos, vf); tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp != 0) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "The VF VLAN has been set, but the PF device is not up.\n"); dev_warn((struct device const *)(& (adapter->pdev)->dev), "Bring the PF device up before attempting to use the VF device.\n"); } else { } out: ; return (err); } } static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter , int vf ) { struct ixgbe_hw *hw ; int err ; { hw = & adapter->hw; err = ixgbe_set_vf_vlan(adapter, 0, (int )(adapter->vfinfo + (unsigned long )vf)->pf_vlan, (u32 )vf); ixgbe_clear_vmvir(adapter, (u32 )vf); ixgbe_set_vmolr(hw, (u32 )vf, 1); (*(hw->mac.ops.set_vlan_anti_spoofing))(hw, 0, vf); if ((unsigned int )(adapter->vfinfo + (unsigned long )vf)->vlan_count != 0U) { (adapter->vfinfo + (unsigned long )vf)->vlan_count = (u16 )((int )(adapter->vfinfo + (unsigned long )vf)->vlan_count - 1); } else { } if ((unsigned int )hw->mac.type > 3U) { ixgbe_write_qde(adapter, (u32 )vf, 1U); } else { } (adapter->vfinfo + (unsigned long )vf)->pf_vlan = 0U; (adapter->vfinfo + (unsigned long )vf)->pf_qos = 0U; return (err); } } int ixgbe_ndo_set_vf_vlan(struct net_device *netdev , int vf , u16 vlan , u8 qos ) { int err ; struct ixgbe_adapter *adapter ; void *tmp ; { err = 0; tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; if (((unsigned int )vf >= adapter->num_vfs || (unsigned int )vlan > 4095U) || (unsigned int )qos > 7U) { return (-22); } else { } if ((unsigned int )vlan != 0U || (unsigned int )qos != 0U) { if ((unsigned int )(adapter->vfinfo + (unsigned long )vf)->pf_vlan != 0U) { err = ixgbe_disable_port_vlan(adapter, vf); } else { } if (err != 0) { goto out; } else { } err = ixgbe_enable_port_vlan(adapter, vf, (int )vlan, (int )qos); } else { err = ixgbe_disable_port_vlan(adapter, vf); } out: ; return (err); } } static int ixgbe_link_mbps(struct ixgbe_adapter *adapter ) { { switch (adapter->link_speed) { case 8U: ; return (100); case 32U: ; return (1000); case 128U: ; return (10000); default: ; return (0); } } } static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter , int vf ) { struct ixgbe_ring_feature *vmdq ; struct ixgbe_hw *hw ; u32 bcnrc_val ; u16 queue ; u16 queues_per_pool ; u16 tx_rate ; unsigned int reg_idx ; { vmdq = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 1UL; hw = & adapter->hw; bcnrc_val = 0U; tx_rate = (adapter->vfinfo + (unsigned long )vf)->tx_rate; if ((unsigned int )tx_rate != 0U) { bcnrc_val = (u32 )adapter->vf_rate_link_speed; bcnrc_val = bcnrc_val << 14; bcnrc_val = bcnrc_val / (u32 )tx_rate; bcnrc_val = bcnrc_val & 268435455U; bcnrc_val = bcnrc_val | 2147483648U; } else { } switch ((unsigned int )hw->mac.type) { case 2U: ixgbe_write_reg(hw, 18816U, 4U); goto ldv_56030; case 3U: ixgbe_write_reg(hw, 18816U, 20U); goto ldv_56030; default: ; goto ldv_56030; } ldv_56030: queues_per_pool = (u16 )((int )((short )(- ((int )vmdq->mask))) & (int )((short )vmdq->mask)); queue = 0U; goto ldv_56035; ldv_56034: reg_idx = (unsigned int )((int )queues_per_pool * vf + (int )queue); ixgbe_write_reg(hw, 18692U, reg_idx); ixgbe_write_reg(hw, 18820U, bcnrc_val); queue = (u16 )((int )queue + 1); ldv_56035: ; if ((int )queue < (int )queues_per_pool) { goto ldv_56034; } else { } return; } } void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter ) { int i ; int tmp ; { if (adapter->vf_rate_link_speed == 0) { return; } else { } tmp = ixgbe_link_mbps(adapter); if (tmp != adapter->vf_rate_link_speed) { adapter->vf_rate_link_speed = 0; _dev_info((struct device const *)(& (adapter->pdev)->dev), "Link speed has been changed. VF Transmit rate is disabled\n"); } else { } i = 0; goto ldv_56042; ldv_56041: ; if (adapter->vf_rate_link_speed == 0) { (adapter->vfinfo + (unsigned long )i)->tx_rate = 0U; } else { } ixgbe_set_vf_rate_limit(adapter, i); i = i + 1; ldv_56042: ; if ((unsigned int )i < adapter->num_vfs) { goto ldv_56041; } else { } return; } } int ixgbe_ndo_set_vf_bw(struct net_device *netdev , int vf , int min_tx_rate , int max_tx_rate ) { struct ixgbe_adapter *adapter ; void *tmp ; int link_speed ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; if ((unsigned int )vf >= adapter->num_vfs) { return (-22); } else { } if (! adapter->link_up) { return (-22); } else { } link_speed = ixgbe_link_mbps(adapter); if (link_speed != 10000) { return (-22); } else { } if (min_tx_rate != 0) { return (-22); } else { } if (max_tx_rate != 0 && (max_tx_rate <= 10 || max_tx_rate > link_speed)) { return (-22); } else { } adapter->vf_rate_link_speed = link_speed; (adapter->vfinfo + (unsigned long )vf)->tx_rate = (u16 )max_tx_rate; ixgbe_set_vf_rate_limit(adapter, vf); return (0); } } int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev , int vf , bool setting ) { struct ixgbe_adapter *adapter ; void *tmp ; int vf_target_reg ; int vf_target_shift ; struct ixgbe_hw *hw ; u32 regval ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; vf_target_reg = vf >> 3; vf_target_shift = vf % 8; hw = & adapter->hw; if ((unsigned int )vf >= adapter->num_vfs) { return (-22); } else { } (adapter->vfinfo + (unsigned long )vf)->spoofchk_enabled = (u8 )setting; regval = ixgbe_read_reg(hw, (u32 )((vf_target_reg + 8320) * 4)); regval = (u32 )(~ (1 << vf_target_shift)) & regval; regval = (u32 )((int )setting << vf_target_shift) | regval; ixgbe_write_reg(hw, (u32 )((vf_target_reg + 8320) * 4), regval); if ((unsigned int )(adapter->vfinfo + (unsigned long )vf)->vlan_count != 0U) { vf_target_shift = vf_target_shift + 8; regval = ixgbe_read_reg(hw, (u32 )((vf_target_reg + 8320) * 4)); regval = (u32 )(~ (1 << vf_target_shift)) & regval; regval = (u32 )((int )setting << vf_target_shift) | regval; ixgbe_write_reg(hw, (u32 )((vf_target_reg + 8320) * 4), regval); } else { } return (0); } } int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev , int vf , bool setting ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; if ((unsigned int )adapter->hw.mac.type <= 1U || (unsigned int )adapter->hw.mac.type > 3U) { return (-95); } else { } if ((unsigned int )vf >= adapter->num_vfs) { return (-22); } else { } (adapter->vfinfo + (unsigned long )vf)->rss_query_enabled = setting; return (0); } } int ixgbe_ndo_get_vf_config(struct net_device *netdev , int vf , struct ifla_vf_info *ivi ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; if ((unsigned int )vf >= adapter->num_vfs) { return (-22); } else { } ivi->vf = (__u32 )vf; memcpy((void *)(& ivi->mac), (void const *)(& (adapter->vfinfo + (unsigned long )vf)->vf_mac_addresses), 6UL); ivi->max_tx_rate = (__u32 )(adapter->vfinfo + (unsigned long )vf)->tx_rate; ivi->min_tx_rate = 0U; ivi->vlan = (__u32 )(adapter->vfinfo + (unsigned long )vf)->pf_vlan; ivi->qos = (__u32 )(adapter->vfinfo + (unsigned long )vf)->pf_qos; ivi->spoofchk = (__u32 )(adapter->vfinfo + (unsigned long )vf)->spoofchk_enabled; ivi->rss_query_en = (__u32 )(adapter->vfinfo + (unsigned long )vf)->rss_query_enabled; return (0); } } bool ldv_queue_work_on_348(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_349(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_350(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_351(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_352(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_358(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_364(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_366(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_368(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_369(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_370(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_371(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_372(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_373(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_374(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_375(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_395(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_397(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_396(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_399(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_398(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_405(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_422(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; extern void __udelay(unsigned long ) ; struct sk_buff *ldv_skb_clone_413(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_421(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_415(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_411(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_419(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_420(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_416(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_417(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_418(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; s32 ixgbe_read_mbx(struct ixgbe_hw *hw , u32 *msg , u16 size , u16 mbx_id ) { struct ixgbe_mbx_info *mbx ; s32 tmp ; { mbx = & hw->mbx; if ((int )mbx->size < (int )size) { size = mbx->size; } else { } if ((unsigned long )mbx->ops.read == (unsigned long )((s32 (*)(struct ixgbe_hw * , u32 * , u16 , u16 ))0)) { return (-100); } else { } tmp = (*(mbx->ops.read))(hw, msg, (int )size, (int )mbx_id); return (tmp); } } s32 ixgbe_write_mbx(struct ixgbe_hw *hw , u32 *msg , u16 size , u16 mbx_id ) { struct ixgbe_mbx_info *mbx ; s32 tmp ; { mbx = & hw->mbx; if ((int )mbx->size < (int )size) { return (-100); } else { } if ((unsigned long )mbx->ops.write == (unsigned long )((s32 (*)(struct ixgbe_hw * , u32 * , u16 , u16 ))0)) { return (-100); } else { } tmp = (*(mbx->ops.write))(hw, msg, (int )size, (int )mbx_id); return (tmp); } } s32 ixgbe_check_for_msg(struct ixgbe_hw *hw , u16 mbx_id ) { struct ixgbe_mbx_info *mbx ; s32 tmp ; { mbx = & hw->mbx; if ((unsigned long )mbx->ops.check_for_msg == (unsigned long )((s32 (*)(struct ixgbe_hw * , u16 ))0)) { return (-100); } else { } tmp = (*(mbx->ops.check_for_msg))(hw, (int )mbx_id); return (tmp); } } s32 ixgbe_check_for_ack(struct ixgbe_hw *hw , u16 mbx_id ) { struct ixgbe_mbx_info *mbx ; s32 tmp ; { mbx = & hw->mbx; if ((unsigned long )mbx->ops.check_for_ack == (unsigned long )((s32 (*)(struct ixgbe_hw * , u16 ))0)) { return (-100); } else { } tmp = (*(mbx->ops.check_for_ack))(hw, (int )mbx_id); return (tmp); } } s32 ixgbe_check_for_rst(struct ixgbe_hw *hw , u16 mbx_id ) { struct ixgbe_mbx_info *mbx ; s32 tmp ; { mbx = & hw->mbx; if ((unsigned long )mbx->ops.check_for_rst == (unsigned long )((s32 (*)(struct ixgbe_hw * , u16 ))0)) { return (-100); } else { } tmp = (*(mbx->ops.check_for_rst))(hw, (int )mbx_id); return (tmp); } } static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw , u16 mbx_id ) { struct ixgbe_mbx_info *mbx ; int countdown ; s32 tmp ; { mbx = & hw->mbx; countdown = (int )mbx->timeout; if (countdown == 0 || (unsigned long )mbx->ops.check_for_msg == (unsigned long )((s32 (*)(struct ixgbe_hw * , u16 ))0)) { return (-100); } else { } goto ldv_55372; ldv_55371: countdown = countdown - 1; if (countdown == 0) { return (-100); } else { } __udelay((unsigned long )mbx->usec_delay); ldv_55372: tmp = (*(mbx->ops.check_for_msg))(hw, (int )mbx_id); if (tmp != 0) { goto ldv_55371; } else { } return (0); } } static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw , u16 mbx_id ) { struct ixgbe_mbx_info *mbx ; int countdown ; s32 tmp ; { mbx = & hw->mbx; countdown = (int )mbx->timeout; if (countdown == 0 || (unsigned long )mbx->ops.check_for_ack == (unsigned long )((s32 (*)(struct ixgbe_hw * , u16 ))0)) { return (-100); } else { } goto ldv_55381; ldv_55380: countdown = countdown - 1; if (countdown == 0) { return (-100); } else { } __udelay((unsigned long )mbx->usec_delay); ldv_55381: tmp = (*(mbx->ops.check_for_ack))(hw, (int )mbx_id); if (tmp != 0) { goto ldv_55380; } else { } return (0); } } static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw , u32 *msg , u16 size , u16 mbx_id ) { struct ixgbe_mbx_info *mbx ; s32 ret_val ; s32 tmp ; { mbx = & hw->mbx; if ((unsigned long )mbx->ops.read == (unsigned long )((s32 (*)(struct ixgbe_hw * , u32 * , u16 , u16 ))0)) { return (-100); } else { } ret_val = ixgbe_poll_for_msg(hw, (int )mbx_id); if (ret_val != 0) { return (ret_val); } else { } tmp = (*(mbx->ops.read))(hw, msg, (int )size, (int )mbx_id); return (tmp); } } static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw , u32 *msg , u16 size , u16 mbx_id ) { struct ixgbe_mbx_info *mbx ; s32 ret_val ; s32 tmp ; { mbx = & hw->mbx; if ((unsigned long )mbx->ops.write == (unsigned long )((s32 (*)(struct ixgbe_hw * , u32 * , u16 , u16 ))0) || mbx->timeout == 0U) { return (-100); } else { } ret_val = (*(mbx->ops.write))(hw, msg, (int )size, (int )mbx_id); if (ret_val != 0) { return (ret_val); } else { } tmp = ixgbe_poll_for_ack(hw, (int )mbx_id); return (tmp); } } static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw , u32 mask , s32 index ) { u32 mbvficr ; u32 tmp ; { tmp = ixgbe_read_reg(hw, (u32 )((index + 452) * 4)); mbvficr = tmp; if ((mbvficr & mask) != 0U) { ixgbe_write_reg(hw, (u32 )((index + 452) * 4), mask); return (0); } else { } return (-100); } } static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw , u16 vf_number ) { s32 index ; u32 vf_bit ; s32 tmp ; { index = (int )vf_number >> 4; vf_bit = (u32 )vf_number & 15U; tmp = ixgbe_check_for_bit_pf(hw, (u32 )(1 << (int )vf_bit), index); if (tmp == 0) { hw->mbx.stats.reqs = hw->mbx.stats.reqs + 1U; return (0); } else { } return (-100); } } static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw , u16 vf_number ) { s32 index ; u32 vf_bit ; s32 tmp ; { index = (int )vf_number >> 4; vf_bit = (u32 )vf_number & 15U; tmp = ixgbe_check_for_bit_pf(hw, (u32 )(65536 << (int )vf_bit), index); if (tmp == 0) { hw->mbx.stats.acks = hw->mbx.stats.acks + 1U; return (0); } else { } return (-100); } } static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw , u16 vf_number ) { u32 reg_offset ; u32 vf_shift ; u32 vflre ; { reg_offset = (unsigned int )vf_number > 31U; vf_shift = (u32 )vf_number & 31U; vflre = 0U; switch ((unsigned int )hw->mac.type) { case 2U: vflre = ixgbe_read_reg(hw, (int )reg_offset & 1 ? 448U : 1536U); goto ldv_55425; case 3U: ; case 4U: ; case 5U: vflre = ixgbe_read_reg(hw, (reg_offset + 448U) * 4U); goto ldv_55425; default: ; goto ldv_55425; } ldv_55425: ; if (((u32 )(1 << (int )vf_shift) & vflre) != 0U) { ixgbe_write_reg(hw, (reg_offset + 448U) * 4U, (u32 )(1 << (int )vf_shift)); hw->mbx.stats.rsts = hw->mbx.stats.rsts + 1U; return (0); } else { } return (-100); } } static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw , u16 vf_number ) { u32 p2v_mailbox ; { ixgbe_write_reg(hw, (u32 )(((int )vf_number + 4800) * 4), 8U); p2v_mailbox = ixgbe_read_reg(hw, (u32 )(((int )vf_number + 4800) * 4)); if ((p2v_mailbox & 8U) != 0U) { return (0); } else { } return (-100); } } static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw , u32 *msg , u16 size , u16 vf_number ) { s32 ret_val ; u16 i ; { ret_val = ixgbe_obtain_mbx_lock_pf(hw, (int )vf_number); if (ret_val != 0) { return (ret_val); } else { } ixgbe_check_for_msg_pf(hw, (int )vf_number); ixgbe_check_for_ack_pf(hw, (int )vf_number); i = 0U; goto ldv_55444; ldv_55443: ixgbe_write_reg(hw, (u32 )(((int )vf_number + 1216) * 64 + ((int )i << 2)), *(msg + (unsigned long )i)); i = (u16 )((int )i + 1); ldv_55444: ; if ((int )i < (int )size) { goto ldv_55443; } else { } ixgbe_write_reg(hw, (u32 )(((int )vf_number + 4800) * 4), 1U); hw->mbx.stats.msgs_tx = hw->mbx.stats.msgs_tx + 1U; return (0); } } static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw , u32 *msg , u16 size , u16 vf_number ) { s32 ret_val ; u16 i ; { ret_val = ixgbe_obtain_mbx_lock_pf(hw, (int )vf_number); if (ret_val != 0) { return (ret_val); } else { } i = 0U; goto ldv_55455; ldv_55454: *(msg + (unsigned long )i) = ixgbe_read_reg(hw, (u32 )(((int )vf_number + 1216) * 64 + ((int )i << 2))); i = (u16 )((int )i + 1); ldv_55455: ; if ((int )i < (int )size) { goto ldv_55454; } else { } ixgbe_write_reg(hw, (u32 )(((int )vf_number + 4800) * 4), 2U); hw->mbx.stats.msgs_rx = hw->mbx.stats.msgs_rx + 1U; return (0); } } void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw ) { struct ixgbe_mbx_info *mbx ; { mbx = & hw->mbx; if ((((unsigned int )hw->mac.type != 2U && (unsigned int )hw->mac.type != 4U) && (unsigned int )hw->mac.type != 5U) && (unsigned int )hw->mac.type != 3U) { return; } else { } mbx->timeout = 0U; mbx->usec_delay = 0U; mbx->stats.msgs_tx = 0U; mbx->stats.msgs_rx = 0U; mbx->stats.reqs = 0U; mbx->stats.acks = 0U; mbx->stats.rsts = 0U; mbx->size = 16U; return; } } struct ixgbe_mbx_operations mbx_ops_generic = {0, & ixgbe_read_mbx_pf, & ixgbe_write_mbx_pf, & ixgbe_read_posted_mbx, & ixgbe_write_posted_mbx, & ixgbe_check_for_msg_pf, & ixgbe_check_for_ack_pf, & ixgbe_check_for_rst_pf}; extern int ldv_release_26(void) ; extern int ldv_probe_26(void) ; void ldv_initialize_ixgbe_mbx_operations_26(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); mbx_ops_generic_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_main_exported_26(void) { u16 ldvarg83 ; u32 *ldvarg96 ; void *tmp ; u16 ldvarg89 ; u16 ldvarg92 ; u16 ldvarg97 ; u32 *ldvarg93 ; void *tmp___0 ; u16 ldvarg86 ; u32 *ldvarg87 ; void *tmp___1 ; u32 *ldvarg90 ; void *tmp___2 ; u16 ldvarg88 ; u16 ldvarg91 ; u16 ldvarg94 ; u16 ldvarg84 ; u16 ldvarg85 ; u16 ldvarg95 ; int tmp___3 ; { tmp = ldv_init_zalloc(4UL); ldvarg96 = (u32 *)tmp; tmp___0 = ldv_init_zalloc(4UL); ldvarg93 = (u32 *)tmp___0; tmp___1 = ldv_init_zalloc(4UL); ldvarg87 = (u32 *)tmp___1; tmp___2 = ldv_init_zalloc(4UL); ldvarg90 = (u32 *)tmp___2; ldv_memset((void *)(& ldvarg83), 0, 2UL); ldv_memset((void *)(& ldvarg89), 0, 2UL); ldv_memset((void *)(& ldvarg92), 0, 2UL); ldv_memset((void *)(& ldvarg97), 0, 2UL); ldv_memset((void *)(& ldvarg86), 0, 2UL); ldv_memset((void *)(& ldvarg88), 0, 2UL); ldv_memset((void *)(& ldvarg91), 0, 2UL); ldv_memset((void *)(& ldvarg94), 0, 2UL); ldv_memset((void *)(& ldvarg84), 0, 2UL); ldv_memset((void *)(& ldvarg85), 0, 2UL); ldv_memset((void *)(& ldvarg95), 0, 2UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_26 == 1) { ixgbe_check_for_msg_pf(mbx_ops_generic_group0, (int )ldvarg97); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { ixgbe_check_for_msg_pf(mbx_ops_generic_group0, (int )ldvarg97); ldv_state_variable_26 = 2; } else { } goto ldv_55488; case 1: ; if (ldv_state_variable_26 == 2) { ixgbe_write_posted_mbx(mbx_ops_generic_group0, ldvarg96, (int )ldvarg95, (int )ldvarg94); ldv_state_variable_26 = 2; } else { } goto ldv_55488; case 2: ; if (ldv_state_variable_26 == 2) { ixgbe_read_posted_mbx(mbx_ops_generic_group0, ldvarg93, (int )ldvarg92, (int )ldvarg91); ldv_state_variable_26 = 2; } else { } goto ldv_55488; case 3: ; if (ldv_state_variable_26 == 1) { ixgbe_write_mbx_pf(mbx_ops_generic_group0, ldvarg90, (int )ldvarg89, (int )ldvarg88); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { ixgbe_write_mbx_pf(mbx_ops_generic_group0, ldvarg90, (int )ldvarg89, (int )ldvarg88); ldv_state_variable_26 = 2; } else { } goto ldv_55488; case 4: ; if (ldv_state_variable_26 == 1) { ixgbe_read_mbx_pf(mbx_ops_generic_group0, ldvarg87, (int )ldvarg86, (int )ldvarg85); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { ixgbe_read_mbx_pf(mbx_ops_generic_group0, ldvarg87, (int )ldvarg86, (int )ldvarg85); ldv_state_variable_26 = 2; } else { } goto ldv_55488; case 5: ; if (ldv_state_variable_26 == 1) { ixgbe_check_for_rst_pf(mbx_ops_generic_group0, (int )ldvarg84); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { ixgbe_check_for_rst_pf(mbx_ops_generic_group0, (int )ldvarg84); ldv_state_variable_26 = 2; } else { } goto ldv_55488; case 6: ; if (ldv_state_variable_26 == 1) { ixgbe_check_for_ack_pf(mbx_ops_generic_group0, (int )ldvarg83); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { ixgbe_check_for_ack_pf(mbx_ops_generic_group0, (int )ldvarg83); ldv_state_variable_26 = 2; } else { } goto ldv_55488; case 7: ; if (ldv_state_variable_26 == 2) { ldv_release_26(); ldv_state_variable_26 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_55488; case 8: ; if (ldv_state_variable_26 == 1) { ldv_probe_26(); ldv_state_variable_26 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_55488; default: ldv_stop(); } ldv_55488: ; return; } } bool ldv_queue_work_on_395(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_396(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_397(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_398(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_399(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_405(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_411(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_413(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_415(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_416(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_417(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_418(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_419(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_420(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_421(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_422(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_442(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_444(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_443(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_446(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_445(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_452(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_469(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_460(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_468(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_462(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_458(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_466(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_467(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_463(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_464(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_465(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw ) ; s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) ; s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw ) ; s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw ) ; enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw ) ; s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw , u32 index ) ; s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw , u32 index ) ; s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw , u32 mask ) ; void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw , u32 mask ) ; s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw ) ; static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw ) ; static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw ) ; static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw ) ; static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw ) ; enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw ) { { return (4); } } s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw ) { struct ixgbe_mac_info *mac ; { mac = & hw->mac; mac->mcft_size = 128U; mac->vft_size = 128U; mac->num_rar_entries = 128U; mac->rx_pb_size = 384U; mac->max_rx_queues = 128U; mac->max_tx_queues = 128U; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); return (0); } } s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait_to_complete ) { s32 tmp ; { tmp = (*(hw->phy.ops.setup_link_speed))(hw, speed, (int )autoneg_wait_to_complete); return (tmp); } } s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw ) { s32 status ; u32 ctrl ; u32 i ; u32 tmp ; struct _ddebug descriptor ; long tmp___0 ; bool tmp___1 ; { status = (*(hw->mac.ops.stop_adapter))(hw); if (status != 0) { return (status); } else { } ixgbe_clear_tx_pending(hw); mac_reset_top: ctrl = 67108864U; tmp = ixgbe_read_reg(hw, 0U); ctrl = tmp | ctrl; ixgbe_write_reg(hw, 0U, ctrl); ixgbe_read_reg(hw, 8U); i = 0U; goto ldv_55490; ldv_55489: __const_udelay(4295UL); ctrl = ixgbe_read_reg(hw, 0U); if ((ctrl & 67108872U) == 0U) { goto ldv_55488; } else { } i = i + 1U; ldv_55490: ; if (i <= 9U) { goto ldv_55489; } else { } ldv_55488: ; if ((ctrl & 67108872U) != 0U) { status = -15; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_reset_hw_X540"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor.format = "Reset polling failed to complete.\n"; descriptor.lineno = 119U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Reset polling failed to complete.\n"); } else { } } else { } msleep(100U); if ((int )hw->mac.flags & 1) { hw->mac.flags = (unsigned int )hw->mac.flags & 254U; goto mac_reset_top; } else { } ixgbe_write_reg(hw, 15360U, 393216U); (*(hw->mac.ops.get_mac_addr))(hw, (u8 *)(& hw->mac.perm_addr)); hw->mac.num_rar_entries = 128U; (*(hw->mac.ops.init_rx_addrs))(hw); (*(hw->mac.ops.get_san_mac_addr))(hw, (u8 *)(& hw->mac.san_addr)); tmp___1 = is_valid_ether_addr((u8 const *)(& hw->mac.san_addr)); if ((int )tmp___1) { (*(hw->mac.ops.set_rar))(hw, hw->mac.num_rar_entries - 1U, (u8 *)(& hw->mac.san_addr), 0U, 2147483648U); hw->mac.san_mac_rar_index = (unsigned int )((u8 )hw->mac.num_rar_entries) - 1U; hw->mac.num_rar_entries = hw->mac.num_rar_entries - 1U; } else { } (*(hw->mac.ops.get_wwn_prefix))(hw, & hw->mac.wwnn_prefix, & hw->mac.wwpn_prefix); return (status); } } s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw ) { s32 ret_val ; s32 tmp ; { ret_val = ixgbe_start_hw_generic(hw); if (ret_val != 0) { return (ret_val); } else { } tmp = ixgbe_start_hw_gen2(hw); return (tmp); } } s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw ) { struct ixgbe_eeprom_info *eeprom ; u32 eec ; u16 eeprom_size ; struct _ddebug descriptor ; long tmp ; { eeprom = & hw->eeprom; if ((unsigned int )eeprom->type == 0U) { eeprom->semaphore_delay = 10U; eeprom->type = 2; eec = ixgbe_read_reg(hw, *(hw->mvals)); eeprom_size = (unsigned short )((eec & 30720U) >> 11); eeprom->word_size = (u16 )(1 << ((int )eeprom_size + 6)); descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_init_eeprom_params_X540"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor.format = "Eeprom params: type = %d, size = %d\n"; descriptor.lineno = 212U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Eeprom params: type = %d, size = %d\n", (unsigned int )eeprom->type, (int )eeprom->word_size); } else { } } else { } return (0); } } static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw , u16 offset , u16 *data ) { s32 status ; s32 tmp ; { tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, 1U); if (tmp != 0) { return (-16); } else { } status = ixgbe_read_eerd_generic(hw, (int )offset, data); (*(hw->mac.ops.release_swfw_sync))(hw, 1U); return (status); } } static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) { s32 status ; s32 tmp ; { tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, 1U); if (tmp != 0) { return (-16); } else { } status = ixgbe_read_eerd_buffer_generic(hw, (int )offset, (int )words, data); (*(hw->mac.ops.release_swfw_sync))(hw, 1U); return (status); } } static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw , u16 offset , u16 data ) { s32 status ; s32 tmp ; { tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, 1U); if (tmp != 0) { return (-16); } else { } status = ixgbe_write_eewr_generic(hw, (int )offset, (int )data); (*(hw->mac.ops.release_swfw_sync))(hw, 1U); return (status); } } static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) { s32 status ; s32 tmp ; { tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, 1U); if (tmp != 0) { return (-16); } else { } status = ixgbe_write_eewr_buffer_generic(hw, (int )offset, (int )words, data); (*(hw->mac.ops.release_swfw_sync))(hw, 1U); return (status); } } static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw ) { u16 i ; u16 j ; u16 checksum ; u16 length ; u16 pointer ; u16 word ; u16 checksum_last_word ; u16 ptr_start ; struct _ddebug descriptor ; long tmp ; s32 tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; s32 tmp___2 ; struct _ddebug descriptor___1 ; long tmp___3 ; s32 tmp___4 ; struct _ddebug descriptor___2 ; long tmp___5 ; s32 tmp___6 ; { checksum = 0U; length = 0U; pointer = 0U; word = 0U; checksum_last_word = 63U; ptr_start = 3U; i = 0U; goto ldv_55545; ldv_55544: tmp___0 = ixgbe_read_eerd_generic(hw, (int )i, & word); if (tmp___0 != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_calc_eeprom_checksum_X540"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor.format = "EEPROM read failed\n"; descriptor.lineno = 334U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } return (-1); } else { } checksum = (int )checksum + (int )word; i = (u16 )((int )i + 1); ldv_55545: ; if ((int )i < (int )checksum_last_word) { goto ldv_55544; } else { } i = ptr_start; goto ldv_55556; ldv_55555: ; if ((unsigned int )i == 4U || (unsigned int )i == 5U) { goto ldv_55547; } else { } tmp___2 = ixgbe_read_eerd_generic(hw, (int )i, & pointer); if (tmp___2 != 0) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_calc_eeprom_checksum_X540"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor___0.format = "EEPROM read failed\n"; descriptor___0.lineno = 349U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } goto ldv_55549; } else { } if (((unsigned int )pointer == 65535U || (unsigned int )pointer == 0U) || (int )hw->eeprom.word_size <= (int )pointer) { goto ldv_55547; } else { } tmp___4 = ixgbe_read_eerd_generic(hw, (int )pointer, & length); if (tmp___4 != 0) { descriptor___1.modname = "ixgbe"; descriptor___1.function = "ixgbe_calc_eeprom_checksum_X540"; descriptor___1.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor___1.format = "EEPROM read failed\n"; descriptor___1.lineno = 359U; descriptor___1.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } return (-1); } else { } if (((unsigned int )length == 65535U || (unsigned int )length == 0U) || (int )pointer + (int )length >= (int )hw->eeprom.word_size) { goto ldv_55547; } else { } j = (unsigned int )pointer + 1U; goto ldv_55553; ldv_55552: tmp___6 = ixgbe_read_eerd_generic(hw, (int )j, & word); if (tmp___6 != 0) { descriptor___2.modname = "ixgbe"; descriptor___2.function = "ixgbe_calc_eeprom_checksum_X540"; descriptor___2.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor___2.format = "EEPROM read failed\n"; descriptor___2.lineno = 371U; descriptor___2.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___5 != 0L) { __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } return (-1); } else { } checksum = (int )checksum + (int )word; j = (u16 )((int )j + 1); ldv_55553: ; if ((int )j <= (int )pointer + (int )length) { goto ldv_55552; } else { } ldv_55547: i = (u16 )((int )i + 1); ldv_55556: ; if ((unsigned int )i <= 14U) { goto ldv_55555; } else { } ldv_55549: checksum = 47802U - (unsigned int )checksum; return ((s32 )checksum); } } static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw , u16 *checksum_val ) { s32 status ; u16 checksum ; u16 read_checksum ; struct _ddebug descriptor ; long tmp ; s32 tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; { read_checksum = 0U; status = (*(hw->eeprom.ops.read))(hw, 0, & checksum); if (status != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_validate_eeprom_checksum_X540"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor.format = "EEPROM read failed\n"; descriptor.lineno = 404U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } return (status); } else { } tmp___0 = (*(hw->mac.ops.acquire_swfw_sync))(hw, 1U); if (tmp___0 != 0) { return (-16); } else { } status = (*(hw->eeprom.ops.calc_checksum))(hw); if (status < 0) { goto out; } else { } checksum = (unsigned short )status; status = ixgbe_read_eerd_generic(hw, 63, & read_checksum); if (status != 0) { goto out; } else { } if ((int )read_checksum != (int )checksum) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_validate_eeprom_checksum_X540"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor___0.format = "Invalid EEPROM checksum"; descriptor___0.lineno = 429U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Invalid EEPROM checksum"); } else { } status = -2; } else { } if ((unsigned long )checksum_val != (unsigned long )((u16 *)0U)) { *checksum_val = checksum; } else { } out: (*(hw->mac.ops.release_swfw_sync))(hw, 1U); return (status); } } static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw ) { s32 status ; u16 checksum ; struct _ddebug descriptor ; long tmp ; s32 tmp___0 ; { status = (*(hw->eeprom.ops.read))(hw, 0, & checksum); if (status != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_update_eeprom_checksum_X540"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor.format = "EEPROM read failed\n"; descriptor.lineno = 462U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } return (status); } else { } tmp___0 = (*(hw->mac.ops.acquire_swfw_sync))(hw, 1U); if (tmp___0 != 0) { return (-16); } else { } status = (*(hw->eeprom.ops.calc_checksum))(hw); if (status < 0) { goto out; } else { } checksum = (unsigned short )status; status = ixgbe_write_eewr_generic(hw, 63, (int )checksum); if (status != 0) { goto out; } else { } status = ixgbe_update_flash_X540(hw); out: (*(hw->mac.ops.release_swfw_sync))(hw, 1U); return (status); } } static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw ) { u32 flup ; s32 status ; struct _ddebug descriptor ; long tmp ; u32 tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; struct _ddebug descriptor___1 ; long tmp___2 ; struct _ddebug descriptor___2 ; long tmp___3 ; struct _ddebug descriptor___3 ; long tmp___4 ; { status = ixgbe_poll_flash_update_done_X540(hw); if (status == -1) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_update_flash_X540"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor.format = "Flash update time out\n"; descriptor.lineno = 503U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flash update time out\n"); } else { } return (status); } else { } tmp___0 = ixgbe_read_reg(hw, *(hw->mvals)); flup = tmp___0 | 8388608U; ixgbe_write_reg(hw, *(hw->mvals), flup); status = ixgbe_poll_flash_update_done_X540(hw); if (status == 0) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_update_flash_X540"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor___0.format = "Flash update complete\n"; descriptor___0.lineno = 512U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flash update complete\n"); } else { } } else { descriptor___1.modname = "ixgbe"; descriptor___1.function = "ixgbe_update_flash_X540"; descriptor___1.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor___1.format = "Flash update time out\n"; descriptor___1.lineno = 514U; descriptor___1.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flash update time out\n"); } else { } } if ((unsigned int )hw->revision_id == 0U) { flup = ixgbe_read_reg(hw, *(hw->mvals)); if ((flup & 33554432U) != 0U) { flup = flup | 8388608U; ixgbe_write_reg(hw, *(hw->mvals), flup); } else { } status = ixgbe_poll_flash_update_done_X540(hw); if (status == 0) { descriptor___2.modname = "ixgbe"; descriptor___2.function = "ixgbe_update_flash_X540"; descriptor___2.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor___2.format = "Flash update complete\n"; descriptor___2.lineno = 526U; descriptor___2.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flash update complete\n"); } else { } } else { descriptor___3.modname = "ixgbe"; descriptor___3.function = "ixgbe_update_flash_X540"; descriptor___3.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor___3.format = "Flash update time out\n"; descriptor___3.lineno = 528U; descriptor___3.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_netdev_dbg(& descriptor___3, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Flash update time out\n"); } else { } } } else { } return (status); } } static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw ) { u32 i ; u32 reg ; { i = 0U; goto ldv_55593; ldv_55592: reg = ixgbe_read_reg(hw, *(hw->mvals)); if ((reg & 67108864U) != 0U) { return (0); } else { } __const_udelay(21475UL); i = i + 1U; ldv_55593: ; if (i <= 19999U) { goto ldv_55592; } else { } return (-1); } } s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw , u32 mask ) { u32 swfw_sync ; u32 swmask ; u32 fwmask ; u32 hwmask ; u32 timeout ; u32 i ; s32 tmp ; s32 tmp___0 ; { swmask = mask; fwmask = mask << 5; hwmask = 0U; timeout = 200U; if (swmask == 1U) { hwmask = 16U; } else { } i = 0U; goto ldv_55607; ldv_55606: tmp = ixgbe_get_swfw_sync_semaphore(hw); if (tmp != 0) { return (-16); } else { } swfw_sync = ixgbe_read_reg(hw, *(hw->mvals + 6UL)); if ((((fwmask | swmask) | hwmask) & swfw_sync) == 0U) { swfw_sync = swfw_sync | swmask; ixgbe_write_reg(hw, *(hw->mvals + 6UL), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); goto ldv_55605; } else { ixgbe_release_swfw_sync_semaphore(hw); usleep_range(5000UL, 10000UL); } i = i + 1U; ldv_55607: ; if (i < timeout) { goto ldv_55606; } else { } ldv_55605: ; if (i >= timeout) { swfw_sync = ixgbe_read_reg(hw, *(hw->mvals + 6UL)); if (((fwmask | hwmask) & swfw_sync) != 0U) { tmp___0 = ixgbe_get_swfw_sync_semaphore(hw); if (tmp___0 != 0) { return (-16); } else { } swfw_sync = swfw_sync | swmask; ixgbe_write_reg(hw, *(hw->mvals + 6UL), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); } else { } } else { } usleep_range(5000UL, 10000UL); return (0); } } void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw , u32 mask ) { u32 swfw_sync ; u32 swmask ; { swmask = mask; ixgbe_get_swfw_sync_semaphore(hw); swfw_sync = ixgbe_read_reg(hw, *(hw->mvals + 6UL)); swfw_sync = ~ swmask & swfw_sync; ixgbe_write_reg(hw, *(hw->mvals + 6UL), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); usleep_range(5000UL, 10000UL); return; } } static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw ) { u32 timeout ; u32 i ; u32 swsm ; struct _ddebug descriptor ; long tmp ; { timeout = 2000U; i = 0U; goto ldv_55622; ldv_55621: swsm = ixgbe_read_reg(hw, *(hw->mvals + 5UL)); if ((swsm & 1U) == 0U) { goto ldv_55620; } else { } usleep_range(50UL, 100UL); i = i + 1U; ldv_55622: ; if (i < timeout) { goto ldv_55621; } else { } ldv_55620: ; if (i == timeout) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_get_swfw_sync_semaphore"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c"; descriptor.format = "Software semaphore SMBI between device drivers not granted.\n"; descriptor.lineno = 671U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Software semaphore SMBI between device drivers not granted.\n"); } else { } return (-1); } else { } i = 0U; goto ldv_55626; ldv_55625: swsm = ixgbe_read_reg(hw, *(hw->mvals + 6UL)); if ((int )swsm >= 0) { return (0); } else { } usleep_range(50UL, 100UL); i = i + 1U; ldv_55626: ; if (i < timeout) { goto ldv_55625; } else { } return (-1); } } static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw ) { u32 swsm ; { swsm = ixgbe_read_reg(hw, *(hw->mvals + 6UL)); swsm = swsm & 2147483647U; ixgbe_write_reg(hw, *(hw->mvals + 6UL), swsm); swsm = ixgbe_read_reg(hw, *(hw->mvals + 5UL)); swsm = swsm & 4294967294U; ixgbe_write_reg(hw, *(hw->mvals + 5UL), swsm); ixgbe_read_reg(hw, 8U); return; } } s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw , u32 index ) { u32 macc_reg ; u32 ledctl_reg ; ixgbe_link_speed speed ; bool link_up ; { (*(hw->mac.ops.check_link))(hw, & speed, & link_up, 0); if (! link_up) { macc_reg = ixgbe_read_reg(hw, 17200U); macc_reg = macc_reg | 458753U; ixgbe_write_reg(hw, 17200U, macc_reg); } else { } ledctl_reg = ixgbe_read_reg(hw, 512U); ledctl_reg = (u32 )(~ (15 << (int )(index * 8U))) & ledctl_reg; ledctl_reg = (u32 )(128 << (int )(index * 8U)) | ledctl_reg; ixgbe_write_reg(hw, 512U, ledctl_reg); ixgbe_read_reg(hw, 8U); return (0); } } s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw , u32 index ) { u32 macc_reg ; u32 ledctl_reg ; { ledctl_reg = ixgbe_read_reg(hw, 512U); ledctl_reg = (u32 )(~ (15 << (int )(index * 8U))) & ledctl_reg; ledctl_reg = (u32 )(4 << (int )(index * 8U)) | ledctl_reg; ledctl_reg = (u32 )(~ (128 << (int )(index * 8U))) & ledctl_reg; ixgbe_write_reg(hw, 512U, ledctl_reg); macc_reg = ixgbe_read_reg(hw, 17200U); macc_reg = macc_reg & 4294508542U; ixgbe_write_reg(hw, 17200U, macc_reg); ixgbe_read_reg(hw, 8U); return (0); } } static struct ixgbe_mac_operations mac_ops_X540 = {& ixgbe_init_hw_generic, & ixgbe_reset_hw_X540, & ixgbe_start_hw_X540, & ixgbe_clear_hw_cntrs_generic, & ixgbe_get_media_type_X540, & ixgbe_get_mac_addr_generic, & ixgbe_get_san_mac_addr_generic, & ixgbe_get_device_caps_generic, & ixgbe_get_wwn_prefix_generic, & ixgbe_stop_adapter_generic, & ixgbe_get_bus_info_generic, & ixgbe_set_lan_id_multi_port_pcie, (s32 (*)(struct ixgbe_hw * , u32 , u8 * ))0, (s32 (*)(struct ixgbe_hw * , u32 , u8 ))0, (s32 (*)(struct ixgbe_hw * ))0, & ixgbe_disable_rx_buff_generic, & ixgbe_enable_rx_buff_generic, & ixgbe_enable_rx_dma_generic, & ixgbe_acquire_swfw_sync_X540, & ixgbe_release_swfw_sync_X540, & prot_autoc_read_generic, & prot_autoc_write_generic, 0, 0, 0, 0, & ixgbe_setup_mac_link_X540, & ixgbe_check_mac_link_generic, & ixgbe_get_copper_link_capabilities_generic, & ixgbe_set_rxpba_generic, & ixgbe_led_on_generic, & ixgbe_led_off_generic, & ixgbe_blink_led_start_X540, & ixgbe_blink_led_stop_X540, & ixgbe_set_rar_generic, & ixgbe_clear_rar_generic, & ixgbe_set_vmdq_generic, & ixgbe_set_vmdq_san_mac_generic, & ixgbe_clear_vmdq_generic, & ixgbe_init_rx_addrs_generic, & ixgbe_update_mc_addr_list_generic, & ixgbe_enable_mc_generic, & ixgbe_disable_mc_generic, & ixgbe_clear_vfta_generic, & ixgbe_set_vfta_generic, & ixgbe_init_uta_tables_generic, & ixgbe_set_mac_anti_spoofing, & ixgbe_set_vlan_anti_spoofing, & ixgbe_fc_enable_generic, & ixgbe_set_fw_drv_ver_generic, (s32 (*)(struct ixgbe_hw * ))0, (s32 (*)(struct ixgbe_hw * ))0, & ixgbe_disable_rx_generic, & ixgbe_enable_rx_generic, 0, 0, 0, 0, 0}; static struct ixgbe_eeprom_operations eeprom_ops_X540 = {& ixgbe_init_eeprom_params_X540, & ixgbe_read_eerd_X540, & ixgbe_read_eerd_buffer_X540, & ixgbe_write_eewr_X540, & ixgbe_write_eewr_buffer_X540, & ixgbe_validate_eeprom_checksum_X540, & ixgbe_update_eeprom_checksum_X540, & ixgbe_calc_eeprom_checksum_X540}; static struct ixgbe_phy_operations phy_ops_X540 = {& ixgbe_identify_phy_generic, & ixgbe_identify_sfp_module_generic, (s32 (*)(struct ixgbe_hw * ))0, (s32 (*)(struct ixgbe_hw * ))0, & ixgbe_read_phy_reg_generic, & ixgbe_write_phy_reg_generic, 0, 0, & ixgbe_setup_phy_link_generic, 0, & ixgbe_setup_phy_link_speed_generic, 0, & ixgbe_get_phy_firmware_version_generic, & ixgbe_read_i2c_byte_generic, & ixgbe_write_i2c_byte_generic, & ixgbe_read_i2c_sff8472_generic, & ixgbe_read_i2c_eeprom_generic, & ixgbe_write_i2c_eeprom_generic, 0, 0, & ixgbe_tn_check_overtemp, & ixgbe_set_copper_phy_power, 0}; static u32 const ixgbe_mvals_X540[24U] = { 65552U, 65564U, 66048U, 66064U, 65872U, 65856U, 65888U, 65864U, 2U, 4U, 8U, 33554432U, 67108864U, 134217728U, 69768U, 69772U, 1U, 2U, 4U, 8U, 0U, 0U, 0U, 40U}; struct ixgbe_info ixgbe_X540_info = {3, & ixgbe_get_invariants_X540, & mac_ops_X540, & eeprom_ops_X540, & phy_ops_X540, & mbx_ops_generic, (u32 const *)(& ixgbe_mvals_X540)}; extern int ldv_probe_25(void) ; extern int ldv_release_25(void) ; extern int ldv_release_24(void) ; extern int ldv_probe_24(void) ; extern int ldv_probe_23(void) ; extern int ldv_release_23(void) ; void ldv_initialize_ixgbe_eeprom_operations_24(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); eeprom_ops_X540_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_initialize_ixgbe_phy_operations_23(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); phy_ops_X540_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_initialize_ixgbe_mac_operations_25(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); mac_ops_X540_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_main_exported_25(void) { u8 ldvarg205 ; u32 *ldvarg239 ; void *tmp ; u16 *ldvarg220 ; void *tmp___0 ; bool ldvarg227 ; u32 ldvarg229 ; bool ldvarg217 ; u32 ldvarg200 ; u32 ldvarg235 ; u32 ldvarg245 ; u32 ldvarg223 ; u32 ldvarg224 ; bool *ldvarg209 ; void *tmp___1 ; u32 ldvarg238 ; int ldvarg231 ; u32 ldvarg246 ; u8 ldvarg207 ; u32 ldvarg241 ; bool *ldvarg240 ; void *tmp___2 ; int ldvarg230 ; bool *ldvarg213 ; void *tmp___3 ; u8 ldvarg204 ; bool ldvarg232 ; u8 *ldvarg201 ; void *tmp___4 ; u8 ldvarg206 ; u32 ldvarg222 ; u16 *ldvarg199 ; void *tmp___5 ; u32 ldvarg211 ; struct net_device *ldvarg221 ; void *tmp___6 ; int ldvarg226 ; ixgbe_link_speed *ldvarg214 ; void *tmp___7 ; int ldvarg228 ; u32 ldvarg244 ; bool ldvarg208 ; u32 ldvarg234 ; ixgbe_link_speed *ldvarg210 ; void *tmp___8 ; u32 ldvarg216 ; u32 ldvarg243 ; u32 ldvarg236 ; u32 ldvarg233 ; ixgbe_link_speed ldvarg218 ; u8 *ldvarg215 ; void *tmp___9 ; u16 *ldvarg219 ; void *tmp___10 ; u32 ldvarg202 ; u8 *ldvarg212 ; void *tmp___11 ; u32 ldvarg225 ; u32 ldvarg203 ; bool ldvarg237 ; bool ldvarg242 ; int tmp___12 ; { tmp = ldv_init_zalloc(4UL); ldvarg239 = (u32 *)tmp; tmp___0 = ldv_init_zalloc(2UL); ldvarg220 = (u16 *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg209 = (bool *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg240 = (bool *)tmp___2; tmp___3 = ldv_init_zalloc(1UL); ldvarg213 = (bool *)tmp___3; tmp___4 = ldv_init_zalloc(1UL); ldvarg201 = (u8 *)tmp___4; tmp___5 = ldv_init_zalloc(2UL); ldvarg199 = (u16 *)tmp___5; tmp___6 = ldv_init_zalloc(3008UL); ldvarg221 = (struct net_device *)tmp___6; tmp___7 = ldv_init_zalloc(4UL); ldvarg214 = (ixgbe_link_speed *)tmp___7; tmp___8 = ldv_init_zalloc(4UL); ldvarg210 = (ixgbe_link_speed *)tmp___8; tmp___9 = ldv_init_zalloc(1UL); ldvarg215 = (u8 *)tmp___9; tmp___10 = ldv_init_zalloc(2UL); ldvarg219 = (u16 *)tmp___10; tmp___11 = ldv_init_zalloc(1UL); ldvarg212 = (u8 *)tmp___11; ldv_memset((void *)(& ldvarg205), 0, 1UL); ldv_memset((void *)(& ldvarg227), 0, 1UL); ldv_memset((void *)(& ldvarg229), 0, 4UL); ldv_memset((void *)(& ldvarg217), 0, 1UL); ldv_memset((void *)(& ldvarg200), 0, 4UL); ldv_memset((void *)(& ldvarg235), 0, 4UL); ldv_memset((void *)(& ldvarg245), 0, 4UL); ldv_memset((void *)(& ldvarg223), 0, 4UL); ldv_memset((void *)(& ldvarg224), 0, 4UL); ldv_memset((void *)(& ldvarg238), 0, 4UL); ldv_memset((void *)(& ldvarg231), 0, 4UL); ldv_memset((void *)(& ldvarg246), 0, 4UL); ldv_memset((void *)(& ldvarg207), 0, 1UL); ldv_memset((void *)(& ldvarg241), 0, 4UL); ldv_memset((void *)(& ldvarg230), 0, 4UL); ldv_memset((void *)(& ldvarg204), 0, 1UL); ldv_memset((void *)(& ldvarg232), 0, 1UL); ldv_memset((void *)(& ldvarg206), 0, 1UL); ldv_memset((void *)(& ldvarg222), 0, 4UL); ldv_memset((void *)(& ldvarg211), 0, 4UL); ldv_memset((void *)(& ldvarg226), 0, 4UL); ldv_memset((void *)(& ldvarg228), 0, 4UL); ldv_memset((void *)(& ldvarg244), 0, 4UL); ldv_memset((void *)(& ldvarg208), 0, 1UL); ldv_memset((void *)(& ldvarg234), 0, 4UL); ldv_memset((void *)(& ldvarg216), 0, 4UL); ldv_memset((void *)(& ldvarg243), 0, 4UL); ldv_memset((void *)(& ldvarg236), 0, 4UL); ldv_memset((void *)(& ldvarg233), 0, 4UL); ldv_memset((void *)(& ldvarg218), 0, 4UL); ldv_memset((void *)(& ldvarg202), 0, 4UL); ldv_memset((void *)(& ldvarg225), 0, 4UL); ldv_memset((void *)(& ldvarg203), 0, 4UL); ldv_memset((void *)(& ldvarg237), 0, 1UL); ldv_memset((void *)(& ldvarg242), 0, 1UL); tmp___12 = __VERIFIER_nondet_int(); switch (tmp___12) { case 0: ; if (ldv_state_variable_25 == 1) { ixgbe_stop_adapter_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_stop_adapter_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 1: ; if (ldv_state_variable_25 == 1) { ixgbe_set_vmdq_san_mac_generic(mac_ops_X540_group0, ldvarg246); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_set_vmdq_san_mac_generic(mac_ops_X540_group0, ldvarg246); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 2: ; if (ldv_state_variable_25 == 1) { ixgbe_led_off_generic(mac_ops_X540_group0, ldvarg245); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_led_off_generic(mac_ops_X540_group0, ldvarg245); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 3: ; if (ldv_state_variable_25 == 1) { ixgbe_set_vfta_generic(mac_ops_X540_group0, ldvarg244, ldvarg243, (int )ldvarg242); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_set_vfta_generic(mac_ops_X540_group0, ldvarg244, ldvarg243, (int )ldvarg242); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 4: ; if (ldv_state_variable_25 == 1) { ixgbe_enable_rx_dma_generic(mac_ops_X540_group0, ldvarg241); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_enable_rx_dma_generic(mac_ops_X540_group0, ldvarg241); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 5: ; if (ldv_state_variable_25 == 2) { prot_autoc_read_generic(mac_ops_X540_group0, ldvarg240, ldvarg239); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 6: ; if (ldv_state_variable_25 == 1) { ixgbe_enable_rx_buff_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_enable_rx_buff_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 7: ; if (ldv_state_variable_25 == 2) { prot_autoc_write_generic(mac_ops_X540_group0, ldvarg238, (int )ldvarg237); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 8: ; if (ldv_state_variable_25 == 1) { ixgbe_led_on_generic(mac_ops_X540_group0, ldvarg236); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_led_on_generic(mac_ops_X540_group0, ldvarg236); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 9: ; if (ldv_state_variable_25 == 1) { ixgbe_blink_led_stop_X540(mac_ops_X540_group0, ldvarg235); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_blink_led_stop_X540(mac_ops_X540_group0, ldvarg235); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 10: ; if (ldv_state_variable_25 == 1) { ixgbe_clear_rar_generic(mac_ops_X540_group0, ldvarg234); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_clear_rar_generic(mac_ops_X540_group0, ldvarg234); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 11: ; if (ldv_state_variable_25 == 1) { ixgbe_enable_rx_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_enable_rx_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 12: ; if (ldv_state_variable_25 == 1) { ixgbe_get_bus_info_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_get_bus_info_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 13: ; if (ldv_state_variable_25 == 1) { ixgbe_blink_led_start_X540(mac_ops_X540_group0, ldvarg233); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_blink_led_start_X540(mac_ops_X540_group0, ldvarg233); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 14: ; if (ldv_state_variable_25 == 1) { ixgbe_disable_mc_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_disable_mc_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 15: ; if (ldv_state_variable_25 == 1) { ixgbe_set_vlan_anti_spoofing(mac_ops_X540_group0, (int )ldvarg232, ldvarg231); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_set_vlan_anti_spoofing(mac_ops_X540_group0, (int )ldvarg232, ldvarg231); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 16: ; if (ldv_state_variable_25 == 1) { ixgbe_set_rxpba_generic(mac_ops_X540_group0, ldvarg230, ldvarg229, ldvarg228); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_set_rxpba_generic(mac_ops_X540_group0, ldvarg230, ldvarg229, ldvarg228); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 17: ; if (ldv_state_variable_25 == 1) { ixgbe_init_uta_tables_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_init_uta_tables_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 18: ; if (ldv_state_variable_25 == 1) { ixgbe_set_mac_anti_spoofing(mac_ops_X540_group0, (int )ldvarg227, ldvarg226); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_set_mac_anti_spoofing(mac_ops_X540_group0, (int )ldvarg227, ldvarg226); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 19: ; if (ldv_state_variable_25 == 1) { ixgbe_set_vmdq_generic(mac_ops_X540_group0, ldvarg225, ldvarg224); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_set_vmdq_generic(mac_ops_X540_group0, ldvarg225, ldvarg224); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 20: ; if (ldv_state_variable_25 == 1) { ixgbe_clear_vmdq_generic(mac_ops_X540_group0, ldvarg223, ldvarg222); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_clear_vmdq_generic(mac_ops_X540_group0, ldvarg223, ldvarg222); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 21: ; if (ldv_state_variable_25 == 1) { ixgbe_clear_vfta_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_clear_vfta_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 22: ; if (ldv_state_variable_25 == 1) { ixgbe_get_media_type_X540(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_get_media_type_X540(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 23: ; if (ldv_state_variable_25 == 1) { ixgbe_update_mc_addr_list_generic(mac_ops_X540_group0, ldvarg221); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_update_mc_addr_list_generic(mac_ops_X540_group0, ldvarg221); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 24: ; if (ldv_state_variable_25 == 1) { ixgbe_init_rx_addrs_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_init_rx_addrs_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 25: ; if (ldv_state_variable_25 == 1) { ixgbe_fc_enable_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_fc_enable_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 26: ; if (ldv_state_variable_25 == 1) { ixgbe_get_wwn_prefix_generic(mac_ops_X540_group0, ldvarg220, ldvarg219); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_get_wwn_prefix_generic(mac_ops_X540_group0, ldvarg220, ldvarg219); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 27: ; if (ldv_state_variable_25 == 1) { ixgbe_clear_hw_cntrs_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_clear_hw_cntrs_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 28: ; if (ldv_state_variable_25 == 1) { ixgbe_setup_mac_link_X540(mac_ops_X540_group0, ldvarg218, (int )ldvarg217); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_setup_mac_link_X540(mac_ops_X540_group0, ldvarg218, (int )ldvarg217); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 29: ; if (ldv_state_variable_25 == 1) { ixgbe_disable_rx_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_disable_rx_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 30: ; if (ldv_state_variable_25 == 1) { ixgbe_set_lan_id_multi_port_pcie(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_set_lan_id_multi_port_pcie(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 31: ; if (ldv_state_variable_25 == 1) { ixgbe_acquire_swfw_sync_X540(mac_ops_X540_group0, ldvarg216); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_acquire_swfw_sync_X540(mac_ops_X540_group0, ldvarg216); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 32: ; if (ldv_state_variable_25 == 1) { ixgbe_start_hw_X540(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_start_hw_X540(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 33: ; if (ldv_state_variable_25 == 1) { ixgbe_enable_mc_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_enable_mc_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 34: ; if (ldv_state_variable_25 == 1) { ixgbe_get_mac_addr_generic(mac_ops_X540_group0, ldvarg215); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_get_mac_addr_generic(mac_ops_X540_group0, ldvarg215); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 35: ; if (ldv_state_variable_25 == 1) { ixgbe_get_copper_link_capabilities_generic(mac_ops_X540_group0, ldvarg214, ldvarg213); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_get_copper_link_capabilities_generic(mac_ops_X540_group0, ldvarg214, ldvarg213); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 36: ; if (ldv_state_variable_25 == 1) { ixgbe_get_san_mac_addr_generic(mac_ops_X540_group0, ldvarg212); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_get_san_mac_addr_generic(mac_ops_X540_group0, ldvarg212); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 37: ; if (ldv_state_variable_25 == 1) { ixgbe_init_hw_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_init_hw_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 38: ; if (ldv_state_variable_25 == 1) { ixgbe_reset_hw_X540(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_reset_hw_X540(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 39: ; if (ldv_state_variable_25 == 1) { ixgbe_release_swfw_sync_X540(mac_ops_X540_group0, ldvarg211); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_release_swfw_sync_X540(mac_ops_X540_group0, ldvarg211); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 40: ; if (ldv_state_variable_25 == 1) { ixgbe_check_mac_link_generic(mac_ops_X540_group0, ldvarg210, ldvarg209, (int )ldvarg208); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_check_mac_link_generic(mac_ops_X540_group0, ldvarg210, ldvarg209, (int )ldvarg208); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 41: ; if (ldv_state_variable_25 == 1) { ixgbe_set_fw_drv_ver_generic(mac_ops_X540_group0, (int )ldvarg206, (int )ldvarg205, (int )ldvarg204, (int )ldvarg207); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_set_fw_drv_ver_generic(mac_ops_X540_group0, (int )ldvarg206, (int )ldvarg205, (int )ldvarg204, (int )ldvarg207); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 42: ; if (ldv_state_variable_25 == 1) { ixgbe_set_rar_generic(mac_ops_X540_group0, ldvarg202, ldvarg201, ldvarg200, ldvarg203); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_set_rar_generic(mac_ops_X540_group0, ldvarg202, ldvarg201, ldvarg200, ldvarg203); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 43: ; if (ldv_state_variable_25 == 1) { ixgbe_get_device_caps_generic(mac_ops_X540_group0, ldvarg199); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_get_device_caps_generic(mac_ops_X540_group0, ldvarg199); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 44: ; if (ldv_state_variable_25 == 1) { ixgbe_disable_rx_buff_generic(mac_ops_X540_group0); ldv_state_variable_25 = 1; } else { } if (ldv_state_variable_25 == 2) { ixgbe_disable_rx_buff_generic(mac_ops_X540_group0); ldv_state_variable_25 = 2; } else { } goto ldv_55724; case 45: ; if (ldv_state_variable_25 == 2) { ldv_release_25(); ldv_state_variable_25 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_55724; case 46: ; if (ldv_state_variable_25 == 1) { ldv_probe_25(); ldv_state_variable_25 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_55724; default: ldv_stop(); } ldv_55724: ; return; } } void ldv_main_exported_22(void) { struct ixgbe_hw *ldvarg352 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1696UL); ldvarg352 = (struct ixgbe_hw *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_22 == 1) { ixgbe_get_invariants_X540(ldvarg352); ldv_state_variable_22 = 1; } else { } goto ldv_55777; default: ldv_stop(); } ldv_55777: ; return; } } void ldv_main_exported_24(void) { u16 ldvarg316 ; u16 ldvarg317 ; u16 *ldvarg321 ; void *tmp ; u16 ldvarg320 ; u16 *ldvarg313 ; void *tmp___0 ; u16 *ldvarg318 ; void *tmp___1 ; u16 ldvarg315 ; u16 ldvarg323 ; u16 ldvarg319 ; u16 *ldvarg314 ; void *tmp___2 ; u16 ldvarg322 ; int tmp___3 ; { tmp = ldv_init_zalloc(2UL); ldvarg321 = (u16 *)tmp; tmp___0 = ldv_init_zalloc(2UL); ldvarg313 = (u16 *)tmp___0; tmp___1 = ldv_init_zalloc(2UL); ldvarg318 = (u16 *)tmp___1; tmp___2 = ldv_init_zalloc(2UL); ldvarg314 = (u16 *)tmp___2; ldv_memset((void *)(& ldvarg316), 0, 2UL); ldv_memset((void *)(& ldvarg317), 0, 2UL); ldv_memset((void *)(& ldvarg320), 0, 2UL); ldv_memset((void *)(& ldvarg315), 0, 2UL); ldv_memset((void *)(& ldvarg323), 0, 2UL); ldv_memset((void *)(& ldvarg319), 0, 2UL); ldv_memset((void *)(& ldvarg322), 0, 2UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_24 == 2) { ixgbe_write_eewr_buffer_X540(eeprom_ops_X540_group0, (int )ldvarg323, (int )ldvarg322, ldvarg321); ldv_state_variable_24 = 2; } else { } goto ldv_55794; case 1: ; if (ldv_state_variable_24 == 2) { ixgbe_read_eerd_buffer_X540(eeprom_ops_X540_group0, (int )ldvarg320, (int )ldvarg319, ldvarg318); ldv_state_variable_24 = 2; } else { } goto ldv_55794; case 2: ; if (ldv_state_variable_24 == 1) { ixgbe_update_eeprom_checksum_X540(eeprom_ops_X540_group0); ldv_state_variable_24 = 1; } else { } if (ldv_state_variable_24 == 2) { ixgbe_update_eeprom_checksum_X540(eeprom_ops_X540_group0); ldv_state_variable_24 = 2; } else { } goto ldv_55794; case 3: ; if (ldv_state_variable_24 == 1) { ixgbe_calc_eeprom_checksum_X540(eeprom_ops_X540_group0); ldv_state_variable_24 = 1; } else { } if (ldv_state_variable_24 == 2) { ixgbe_calc_eeprom_checksum_X540(eeprom_ops_X540_group0); ldv_state_variable_24 = 2; } else { } goto ldv_55794; case 4: ; if (ldv_state_variable_24 == 1) { ixgbe_write_eewr_X540(eeprom_ops_X540_group0, (int )ldvarg317, (int )ldvarg316); ldv_state_variable_24 = 1; } else { } if (ldv_state_variable_24 == 2) { ixgbe_write_eewr_X540(eeprom_ops_X540_group0, (int )ldvarg317, (int )ldvarg316); ldv_state_variable_24 = 2; } else { } goto ldv_55794; case 5: ; if (ldv_state_variable_24 == 1) { ixgbe_read_eerd_X540(eeprom_ops_X540_group0, (int )ldvarg315, ldvarg314); ldv_state_variable_24 = 1; } else { } if (ldv_state_variable_24 == 2) { ixgbe_read_eerd_X540(eeprom_ops_X540_group0, (int )ldvarg315, ldvarg314); ldv_state_variable_24 = 2; } else { } goto ldv_55794; case 6: ; if (ldv_state_variable_24 == 1) { ixgbe_init_eeprom_params_X540(eeprom_ops_X540_group0); ldv_state_variable_24 = 1; } else { } if (ldv_state_variable_24 == 2) { ixgbe_init_eeprom_params_X540(eeprom_ops_X540_group0); ldv_state_variable_24 = 2; } else { } goto ldv_55794; case 7: ; if (ldv_state_variable_24 == 1) { ixgbe_validate_eeprom_checksum_X540(eeprom_ops_X540_group0, ldvarg313); ldv_state_variable_24 = 1; } else { } if (ldv_state_variable_24 == 2) { ixgbe_validate_eeprom_checksum_X540(eeprom_ops_X540_group0, ldvarg313); ldv_state_variable_24 = 2; } else { } goto ldv_55794; case 8: ; if (ldv_state_variable_24 == 2) { ldv_release_24(); ldv_state_variable_24 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_55794; case 9: ; if (ldv_state_variable_24 == 1) { ldv_probe_24(); ldv_state_variable_24 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_55794; default: ldv_stop(); } ldv_55794: ; return; } } void ldv_main_exported_23(void) { u8 ldvarg411 ; u8 ldvarg414 ; u8 *ldvarg413 ; void *tmp ; bool ldvarg406 ; u8 ldvarg399 ; u8 ldvarg409 ; u32 ldvarg405 ; u16 *ldvarg400 ; void *tmp___0 ; ixgbe_link_speed ldvarg407 ; bool ldvarg418 ; u32 ldvarg402 ; u8 *ldvarg415 ; void *tmp___1 ; u8 ldvarg398 ; u8 ldvarg410 ; u8 ldvarg416 ; u16 ldvarg403 ; u8 ldvarg408 ; u16 *ldvarg417 ; void *tmp___2 ; u8 *ldvarg397 ; void *tmp___3 ; u8 ldvarg412 ; u32 ldvarg404 ; u32 ldvarg401 ; int tmp___4 ; { tmp = ldv_init_zalloc(1UL); ldvarg413 = (u8 *)tmp; tmp___0 = ldv_init_zalloc(2UL); ldvarg400 = (u16 *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg415 = (u8 *)tmp___1; tmp___2 = ldv_init_zalloc(2UL); ldvarg417 = (u16 *)tmp___2; tmp___3 = ldv_init_zalloc(1UL); ldvarg397 = (u8 *)tmp___3; ldv_memset((void *)(& ldvarg411), 0, 1UL); ldv_memset((void *)(& ldvarg414), 0, 1UL); ldv_memset((void *)(& ldvarg406), 0, 1UL); ldv_memset((void *)(& ldvarg399), 0, 1UL); ldv_memset((void *)(& ldvarg409), 0, 1UL); ldv_memset((void *)(& ldvarg405), 0, 4UL); ldv_memset((void *)(& ldvarg407), 0, 4UL); ldv_memset((void *)(& ldvarg418), 0, 1UL); ldv_memset((void *)(& ldvarg402), 0, 4UL); ldv_memset((void *)(& ldvarg398), 0, 1UL); ldv_memset((void *)(& ldvarg410), 0, 1UL); ldv_memset((void *)(& ldvarg416), 0, 1UL); ldv_memset((void *)(& ldvarg403), 0, 2UL); ldv_memset((void *)(& ldvarg408), 0, 1UL); ldv_memset((void *)(& ldvarg412), 0, 1UL); ldv_memset((void *)(& ldvarg404), 0, 4UL); ldv_memset((void *)(& ldvarg401), 0, 4UL); tmp___4 = __VERIFIER_nondet_int(); switch (tmp___4) { case 0: ; if (ldv_state_variable_23 == 1) { ixgbe_set_copper_phy_power(phy_ops_X540_group0, (int )ldvarg418); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 2) { ixgbe_set_copper_phy_power(phy_ops_X540_group0, (int )ldvarg418); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 1: ; if (ldv_state_variable_23 == 1) { ixgbe_get_phy_firmware_version_generic(phy_ops_X540_group0, ldvarg417); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 2) { ixgbe_get_phy_firmware_version_generic(phy_ops_X540_group0, ldvarg417); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 2: ; if (ldv_state_variable_23 == 2) { ixgbe_read_i2c_eeprom_generic(phy_ops_X540_group0, (int )ldvarg416, ldvarg415); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 3: ; if (ldv_state_variable_23 == 1) { ixgbe_read_i2c_sff8472_generic(phy_ops_X540_group0, (int )ldvarg414, ldvarg413); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 2) { ixgbe_read_i2c_sff8472_generic(phy_ops_X540_group0, (int )ldvarg414, ldvarg413); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 4: ; if (ldv_state_variable_23 == 2) { ixgbe_write_i2c_byte_generic(phy_ops_X540_group0, (int )ldvarg412, (int )ldvarg411, (int )ldvarg410); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 5: ; if (ldv_state_variable_23 == 1) { ixgbe_identify_phy_generic(phy_ops_X540_group0); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 2) { ixgbe_identify_phy_generic(phy_ops_X540_group0); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 6: ; if (ldv_state_variable_23 == 1) { ixgbe_setup_phy_link_generic(phy_ops_X540_group0); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 2) { ixgbe_setup_phy_link_generic(phy_ops_X540_group0); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 7: ; if (ldv_state_variable_23 == 1) { ixgbe_write_i2c_eeprom_generic(phy_ops_X540_group0, (int )ldvarg409, (int )ldvarg408); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 2) { ixgbe_write_i2c_eeprom_generic(phy_ops_X540_group0, (int )ldvarg409, (int )ldvarg408); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 8: ; if (ldv_state_variable_23 == 1) { ixgbe_setup_phy_link_speed_generic(phy_ops_X540_group0, ldvarg407, (int )ldvarg406); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 2) { ixgbe_setup_phy_link_speed_generic(phy_ops_X540_group0, ldvarg407, (int )ldvarg406); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 9: ; if (ldv_state_variable_23 == 1) { ixgbe_write_phy_reg_generic(phy_ops_X540_group0, ldvarg405, ldvarg404, (int )ldvarg403); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 2) { ixgbe_write_phy_reg_generic(phy_ops_X540_group0, ldvarg405, ldvarg404, (int )ldvarg403); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 10: ; if (ldv_state_variable_23 == 1) { ixgbe_identify_sfp_module_generic(phy_ops_X540_group0); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 2) { ixgbe_identify_sfp_module_generic(phy_ops_X540_group0); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 11: ; if (ldv_state_variable_23 == 1) { ixgbe_read_phy_reg_generic(phy_ops_X540_group0, ldvarg402, ldvarg401, ldvarg400); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 2) { ixgbe_read_phy_reg_generic(phy_ops_X540_group0, ldvarg402, ldvarg401, ldvarg400); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 12: ; if (ldv_state_variable_23 == 1) { ixgbe_tn_check_overtemp(phy_ops_X540_group0); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 2) { ixgbe_tn_check_overtemp(phy_ops_X540_group0); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 13: ; if (ldv_state_variable_23 == 1) { ixgbe_read_i2c_byte_generic(phy_ops_X540_group0, (int )ldvarg399, (int )ldvarg398, ldvarg397); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 2) { ixgbe_read_i2c_byte_generic(phy_ops_X540_group0, (int )ldvarg399, (int )ldvarg398, ldvarg397); ldv_state_variable_23 = 2; } else { } goto ldv_55831; case 14: ; if (ldv_state_variable_23 == 2) { ldv_release_23(); ldv_state_variable_23 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_55831; case 15: ; if (ldv_state_variable_23 == 1) { ldv_probe_23(); ldv_state_variable_23 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_55831; default: ldv_stop(); } ldv_55831: ; return; } } bool ldv_queue_work_on_442(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_443(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_444(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_445(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_446(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_452(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_458(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_460(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_462(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_463(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_464(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_465(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_466(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_467(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_468(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_469(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_489(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_491(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_490(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_493(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_492(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_499(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_516(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_507(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_515(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_509(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_505(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_513(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_514(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_510(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_511(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_512(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw ) { u32 esdp ; u32 tmp ; { tmp = ixgbe_read_reg(hw, 32U); esdp = tmp; if ((unsigned int )hw->bus.lan_id != 0U) { esdp = esdp & 4294836221U; esdp = esdp | 512U; } else { } esdp = esdp & 4294901503U; ixgbe_write_reg(hw, 32U, esdp); ixgbe_read_reg(hw, 8U); return; } } static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw ) { s32 tmp ; s32 tmp___0 ; { switch ((int )hw->device_id) { case 5548: hw->phy.phy_semaphore_mask = 6150U; ixgbe_setup_mux_ctl(hw); tmp = ixgbe_identify_module_generic(hw); return (tmp); case 5546: hw->phy.type = 5; goto ldv_55470; case 5547: hw->phy.type = 4; goto ldv_55470; case 5550: ; case 5549: tmp___0 = ixgbe_identify_phy_generic(hw); return (tmp___0); default: ; goto ldv_55470; } ldv_55470: ; return (0); } } static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw , u32 reg_addr , u32 device_type , u16 *phy_data ) { { return (2147483647); } } static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw , u32 reg_addr , u32 device_type , u16 phy_data ) { { return (2147483647); } } static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw ) { struct ixgbe_eeprom_info *eeprom ; u32 eec ; u16 eeprom_size ; struct _ddebug descriptor ; long tmp ; { eeprom = & hw->eeprom; if ((unsigned int )eeprom->type == 0U) { eeprom->semaphore_delay = 10U; eeprom->type = 2; eec = ixgbe_read_reg(hw, *(hw->mvals)); eeprom_size = (unsigned short )((eec & 30720U) >> 11); eeprom->word_size = (u16 )(1 << ((int )eeprom_size + 6)); descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_init_eeprom_params_X550"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor.format = "Eeprom params: type = %d, size = %d\n"; descriptor.lineno = 109U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Eeprom params: type = %d, size = %d\n", (unsigned int )eeprom->type, (int )eeprom->word_size); } else { } } else { } return (0); } } static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw , u32 *ctrl ) { u32 i ; u32 command ; struct _ddebug descriptor ; long tmp ; { i = 0U; goto ldv_55503; ldv_55502: command = ixgbe_read_reg(hw, 69956U); if ((int )command >= 0) { goto ldv_55501; } else { } usleep_range(10UL, 20UL); i = i + 1U; ldv_55503: ; if (i <= 99U) { goto ldv_55502; } else { } ldv_55501: ; if ((unsigned long )ctrl != (unsigned long )((u32 *)0U)) { *ctrl = command; } else { } if (i == 100U) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_iosf_wait"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor.format = "IOSF wait timed out\n"; descriptor.lineno = 141U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "IOSF wait timed out\n"); } else { } return (-3); } else { } return (0); } } static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw , u32 reg_addr , u32 device_type , u32 *data ) { u32 gssr ; u32 command ; u32 error ; s32 ret ; struct _ddebug descriptor ; long tmp ; { gssr = 6U; ret = (*(hw->mac.ops.acquire_swfw_sync))(hw, gssr); if (ret != 0) { return (ret); } else { } ret = ixgbe_iosf_wait(hw, (u32 *)0U); if (ret != 0) { goto out; } else { } command = (device_type << 28) | reg_addr; ixgbe_write_reg(hw, 69956U, command); ret = ixgbe_iosf_wait(hw, & command); if ((command & 786432U) != 0U) { error = (command & 267386880U) >> 20; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_read_iosf_sb_reg_x550"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor.format = "Failed to read, error %x\n"; descriptor.lineno = 181U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Failed to read, error %x\n", error); } else { } return (-3); } else { } if (ret == 0) { *data = ixgbe_read_reg(hw, 69960U); } else { } out: (*(hw->mac.ops.release_swfw_sync))(hw, gssr); return (ret); } } static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw , u16 offset , u16 *data ) { s32 status ; struct ixgbe_hic_read_shadow_ram buffer ; __u32 tmp ; u32 tmp___0 ; { buffer.hdr.req.cmd = 49U; buffer.hdr.req.buf_lenh = 0U; buffer.hdr.req.buf_lenl = 6U; buffer.hdr.req.checksum = 255U; tmp = __fswab32((__u32 )((int )offset * 2)); buffer.address = tmp; buffer.length = 512U; status = ixgbe_host_interface_command(hw, (u32 *)(& buffer), 16U, 500U, 0); if (status != 0) { return (status); } else { } tmp___0 = ixgbe_read_reg(hw, 88076U); *data = (unsigned short )tmp___0; return (0); } } static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) { struct ixgbe_hic_read_shadow_ram buffer ; u32 current_word ; u16 words_to_read ; s32 status ; u32 i ; struct _ddebug descriptor ; long tmp ; __u32 tmp___0 ; __u16 tmp___1 ; struct _ddebug descriptor___0 ; long tmp___2 ; u32 reg ; u32 value ; u32 tmp___3 ; { current_word = 0U; status = (*(hw->mac.ops.acquire_swfw_sync))(hw, 1U); if (status != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_read_ee_hostif_buffer_X550"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor.format = "EEPROM read buffer - semaphore failed\n"; descriptor.lineno = 249U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read buffer - semaphore failed\n"); } else { } return (status); } else { } goto ldv_55547; ldv_55546: ; if ((unsigned int )words > 512U) { words_to_read = 512U; } else { words_to_read = words; } buffer.hdr.req.cmd = 49U; buffer.hdr.req.buf_lenh = 0U; buffer.hdr.req.buf_lenl = 6U; buffer.hdr.req.checksum = 255U; tmp___0 = __fswab32(((u32 )offset + current_word) * 2U); buffer.address = tmp___0; tmp___1 = __fswab16((int )((unsigned int )words_to_read * 2U)); buffer.length = tmp___1; status = ixgbe_host_interface_command(hw, (u32 *)(& buffer), 16U, 500U, 0); if (status != 0) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_read_ee_hostif_buffer_X550"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor___0.format = "Host interface command failed\n"; descriptor___0.lineno = 273U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Host interface command failed\n"); } else { } goto out; } else { } i = 0U; goto ldv_55544; ldv_55543: reg = (i + 44038U) * 2U; tmp___3 = ixgbe_read_reg(hw, reg); value = tmp___3; *(data + (unsigned long )current_word) = (unsigned short )value; current_word = current_word + 1U; i = i + 1U; if ((u32 )words_to_read > i) { value = value >> 16; *(data + (unsigned long )current_word) = (unsigned short )value; current_word = current_word + 1U; } else { } i = i + 1U; ldv_55544: ; if ((u32 )words_to_read > i) { goto ldv_55543; } else { } words = (int )words - (int )words_to_read; ldv_55547: ; if ((unsigned int )words != 0U) { goto ldv_55546; } else { } out: (*(hw->mac.ops.release_swfw_sync))(hw, 1U); return (status); } } static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw , u16 ptr , u16 size , u16 *csum , u16 *buffer , u32 buffer_size ) { u16 buf[256U] ; s32 status ; u16 length ; u16 bufsz ; u16 i ; u16 start ; u16 *local_buffer ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; { bufsz = 256U; if ((unsigned long )buffer == (unsigned long )((u16 *)0U)) { status = ixgbe_read_ee_hostif_buffer_X550(hw, (int )ptr, (int )bufsz, (u16 *)(& buf)); if (status != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_checksum_ptr_x550"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor.format = "Failed to read EEPROM image\n"; descriptor.lineno = 322U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Failed to read EEPROM image\n"); } else { } return (status); } else { } local_buffer = (u16 *)(& buf); } else { if ((u32 )ptr > buffer_size) { return (-5); } else { } local_buffer = buffer + (unsigned long )ptr; } if ((unsigned int )size != 0U) { start = 0U; length = size; } else { start = 1U; length = *local_buffer; if (((unsigned int )length == 65535U || (unsigned int )length == 0U) || (int )ptr + (int )length >= (int )hw->eeprom.word_size) { return (0); } else { } } if ((unsigned long )buffer != (unsigned long )((u16 *)0U) && (unsigned int )start + (unsigned int )length > buffer_size) { return (-5); } else { } i = start; goto ldv_55568; ldv_55567: ; if ((int )i == (int )bufsz && (unsigned long )buffer == (unsigned long )((u16 *)0U)) { ptr = (int )ptr + (int )bufsz; i = 0U; if ((int )length < (int )bufsz) { bufsz = length; } else { } status = ixgbe_read_ee_hostif_buffer_X550(hw, (int )ptr, (int )bufsz, (u16 *)(& buf)); if (status != 0) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_checksum_ptr_x550"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor___0.format = "Failed to read EEPROM image\n"; descriptor___0.lineno = 359U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Failed to read EEPROM image\n"); } else { } return (status); } else { } } else { } *csum = (int )*csum + (int )*(local_buffer + (unsigned long )i); i = (u16 )((int )i + 1); length = (u16 )((int )length - 1); ldv_55568: ; if ((unsigned int )length != 0U) { goto ldv_55567; } else { } return (0); } } static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw , u16 *buffer , u32 buffer_size ) { u16 eeprom_ptrs[66U] ; u16 *local_buffer ; s32 status ; u16 checksum ; u16 pointer ; u16 i ; u16 size ; struct _ddebug descriptor ; long tmp ; { checksum = 0U; (*(hw->eeprom.ops.init_params))(hw); if ((unsigned long )buffer == (unsigned long )((u16 *)0U)) { status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, 66, (u16 *)(& eeprom_ptrs)); if (status != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_calc_checksum_X550"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor.format = "Failed to read EEPROM image\n"; descriptor.lineno = 392U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Failed to read EEPROM image\n"); } else { } return (status); } else { } local_buffer = (u16 *)(& eeprom_ptrs); } else { if (buffer_size <= 64U) { return (-5); } else { } local_buffer = buffer; } i = 0U; goto ldv_55585; ldv_55584: ; if ((unsigned int )i != 63U) { checksum = (int )*(local_buffer + (unsigned long )i) + (int )checksum; } else { } i = (u16 )((int )i + 1); ldv_55585: ; if ((unsigned int )i <= 65U) { goto ldv_55584; } else { } i = 2U; goto ldv_55594; ldv_55593: ; if ((unsigned int )i == 4U || (unsigned int )i == 5U) { goto ldv_55587; } else { } pointer = *(local_buffer + (unsigned long )i); if (((unsigned int )pointer == 65535U || (unsigned int )pointer == 0U) || (int )hw->eeprom.word_size <= (int )pointer) { goto ldv_55587; } else { } switch ((int )i) { case 6: size = 36U; goto ldv_55589; case 7: ; case 8: size = 8U; goto ldv_55589; default: size = 0U; goto ldv_55589; } ldv_55589: status = ixgbe_checksum_ptr_x550(hw, (int )pointer, (int )size, & checksum, buffer, buffer_size); if (status != 0) { return (status); } else { } ldv_55587: i = (u16 )((int )i + 1); ldv_55594: ; if ((unsigned int )i <= 14U) { goto ldv_55593; } else { } checksum = 47802U - (unsigned int )checksum; return ((s32 )checksum); } } static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw ) { s32 tmp ; { tmp = ixgbe_calc_checksum_X550(hw, (u16 *)0U, 0U); return (tmp); } } static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw , u16 offset , u16 *data ) { s32 status ; s32 tmp ; { status = 0; tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, 1U); if (tmp == 0) { status = ixgbe_read_ee_hostif_data_X550(hw, (int )offset, data); (*(hw->mac.ops.release_swfw_sync))(hw, 1U); } else { status = -16; } return (status); } } static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw , u16 *checksum_val ) { s32 status ; u16 checksum ; u16 read_checksum ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; { read_checksum = 0U; status = (*(hw->eeprom.ops.read))(hw, 0, & checksum); if (status != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_validate_eeprom_checksum_X550"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor.format = "EEPROM read failed\n"; descriptor.lineno = 498U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } return (status); } else { } status = (*(hw->eeprom.ops.calc_checksum))(hw); if (status < 0) { return (status); } else { } checksum = (unsigned short )status; status = ixgbe_read_ee_hostif_X550(hw, 63, & read_checksum); if (status != 0) { return (status); } else { } if ((int )read_checksum != (int )checksum) { status = -2; descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_validate_eeprom_checksum_X550"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor___0.format = "Invalid EEPROM checksum"; descriptor___0.lineno = 518U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Invalid EEPROM checksum"); } else { } } else { } if ((unsigned long )checksum_val != (unsigned long )((u16 *)0U)) { *checksum_val = checksum; } else { } return (status); } } static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw , u16 offset , u16 data ) { s32 status ; struct ixgbe_hic_write_shadow_ram buffer ; __u32 tmp ; { buffer.hdr.req.cmd = 51U; buffer.hdr.req.buf_lenh = 0U; buffer.hdr.req.buf_lenl = 10U; buffer.hdr.req.checksum = 255U; buffer.length = 512U; buffer.data = data; tmp = __fswab32((__u32 )((int )offset * 2)); buffer.address = tmp; status = ixgbe_host_interface_command(hw, (u32 *)(& buffer), 16U, 500U, 0); return (status); } } static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw , u16 offset , u16 data ) { s32 status ; struct _ddebug descriptor ; long tmp ; s32 tmp___0 ; { status = 0; tmp___0 = (*(hw->mac.ops.acquire_swfw_sync))(hw, 1U); if (tmp___0 == 0) { status = ixgbe_write_ee_hostif_data_X550(hw, (int )offset, (int )data); (*(hw->mac.ops.release_swfw_sync))(hw, 1U); } else { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_write_ee_hostif_X550"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor.format = "write ee hostif failed to get semaphore"; descriptor.lineno = 572U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "write ee hostif failed to get semaphore"); } else { } status = -16; } return (status); } } static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw ) { s32 status ; union ixgbe_hic_hdr2 buffer ; { status = 0; buffer.req.cmd = 54U; buffer.req.buf_lenh = 0U; buffer.req.buf_lenl = 0U; buffer.req.checksum = 255U; status = ixgbe_host_interface_command(hw, (u32 *)(& buffer), 4U, 500U, 0); return (status); } } static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw ) { u32 rxctrl ; u32 pfdtxgswc ; s32 status ; struct ixgbe_hic_disable_rxen fw_cmd ; { rxctrl = ixgbe_read_reg(hw, 12288U); if ((int )rxctrl & 1) { pfdtxgswc = ixgbe_read_reg(hw, 33312U); if ((int )pfdtxgswc & 1) { pfdtxgswc = pfdtxgswc & 4294967294U; ixgbe_write_reg(hw, 33312U, pfdtxgswc); hw->mac.set_lben = 1; } else { hw->mac.set_lben = 0; } fw_cmd.hdr.cmd = 222U; fw_cmd.hdr.buf_len = 1U; fw_cmd.hdr.checksum = 255U; fw_cmd.port_number = (unsigned char )hw->bus.lan_id; status = ixgbe_host_interface_command(hw, (u32 *)(& fw_cmd), 8U, 500U, 1); if (status != 0) { rxctrl = ixgbe_read_reg(hw, 12288U); if ((int )rxctrl & 1) { rxctrl = rxctrl & 4294967294U; ixgbe_write_reg(hw, 12288U, rxctrl); } else { } } else { } } else { } return; } } static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw ) { s32 status ; u16 checksum ; struct _ddebug descriptor ; long tmp ; { checksum = 0U; status = ixgbe_read_ee_hostif_X550(hw, 0, & checksum); if (status != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_update_eeprom_checksum_X550"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor.format = "EEPROM read failed\n"; descriptor.lineno = 659U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM read failed\n"); } else { } return (status); } else { } status = ixgbe_calc_eeprom_checksum_X550(hw); if (status < 0) { return (status); } else { } checksum = (unsigned short )status; status = ixgbe_write_ee_hostif_X550(hw, 63, (int )checksum); if (status != 0) { return (status); } else { } status = ixgbe_update_flash_X550(hw); return (status); } } static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw , u16 offset , u16 words , u16 *data ) { s32 status ; u32 i ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; { status = 0; i = 0U; status = (*(hw->mac.ops.acquire_swfw_sync))(hw, 1U); if (status != 0) { descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_write_ee_hostif_buffer_X550"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor.format = "EEPROM write buffer - semaphore failed\n"; descriptor.lineno = 698U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "EEPROM write buffer - semaphore failed\n"); } else { } return (status); } else { } i = 0U; goto ldv_55662; ldv_55661: status = ixgbe_write_ee_hostif_data_X550(hw, (int )((u16 )i) + (int )offset, (int )*(data + (unsigned long )i)); if (status != 0) { descriptor___0.modname = "ixgbe"; descriptor___0.function = "ixgbe_write_ee_hostif_buffer_X550"; descriptor___0.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor___0.format = "Eeprom buffered write failed\n"; descriptor___0.lineno = 706U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Eeprom buffered write failed\n"); } else { } goto ldv_55660; } else { } i = i + 1U; ldv_55662: ; if ((u32 )words > i) { goto ldv_55661; } else { } ldv_55660: (*(hw->mac.ops.release_swfw_sync))(hw, 1U); return (status); } } static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw , u32 reg_addr , u32 device_type , u32 data ) { u32 gssr ; u32 command ; u32 error ; s32 ret ; struct _ddebug descriptor ; long tmp ; { gssr = 6U; ret = (*(hw->mac.ops.acquire_swfw_sync))(hw, gssr); if (ret != 0) { return (ret); } else { } ret = ixgbe_iosf_wait(hw, (u32 *)0U); if (ret != 0) { goto out; } else { } command = (device_type << 28) | reg_addr; ixgbe_write_reg(hw, 69956U, command); ixgbe_write_reg(hw, 69960U, data); ret = ixgbe_iosf_wait(hw, & command); if ((command & 786432U) != 0U) { error = (command & 267386880U) >> 20; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_write_iosf_sb_reg_x550"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor.format = "Failed to write, error %x\n"; descriptor.lineno = 753U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Failed to write, error %x\n", error); } else { } return (-3); } else { } out: (*(hw->mac.ops.release_swfw_sync))(hw, gssr); return (ret); } } static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw , ixgbe_link_speed *speed ) { s32 status ; u32 reg_val ; { status = ixgbe_read_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 16908U : 33292U, 0U, & reg_val); if (status != 0) { return (status); } else { } reg_val = reg_val & 3758096383U; reg_val = reg_val & 4294965503U; switch (*speed) { case 128U: reg_val = reg_val | 1024U; goto ldv_55683; case 32U: reg_val = reg_val | 512U; goto ldv_55683; default: ; return (-8); } ldv_55683: status = ixgbe_write_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 16908U : 33292U, 0U, reg_val); if (status != 0) { return (status); } else { } status = ixgbe_read_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 19200U : 35584U, 0U, & reg_val); if (status != 0) { return (status); } else { } reg_val = reg_val | 16U; status = ixgbe_write_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 19200U : 35584U, 0U, reg_val); if (status != 0) { return (status); } else { } status = ixgbe_read_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 17972U : 34356U, 0U, & reg_val); if (status != 0) { return (status); } else { } reg_val = reg_val & 4294967231U; reg_val = reg_val & 4294934527U; reg_val = reg_val & 4294901759U; status = ixgbe_write_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 17972U : 34356U, 0U, reg_val); if (status != 0) { return (status); } else { } status = ixgbe_read_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 17976U : 34360U, 0U, & reg_val); if (status != 0) { return (status); } else { } reg_val = reg_val & 4294967231U; reg_val = reg_val & 4294934527U; reg_val = reg_val & 4294901759U; status = ixgbe_write_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 17976U : 34360U, 0U, reg_val); if (status != 0) { return (status); } else { } status = ixgbe_read_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 21792U : 38176U, 0U, & reg_val); if (status != 0) { return (status); } else { } reg_val = reg_val | 2147483648U; reg_val = reg_val | 8U; reg_val = reg_val | 4U; reg_val = reg_val | 2U; status = ixgbe_write_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 21792U : 38176U, 0U, reg_val); if (status != 0) { return (status); } else { } status = ixgbe_read_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 16908U : 33292U, 0U, & reg_val); if (status != 0) { return (status); } else { } reg_val = reg_val | 2147483648U; status = ixgbe_write_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 16908U : 33292U, 0U, reg_val); return (status); } } static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw , ixgbe_link_speed speed , bool autoneg_wait ) { s32 status ; ixgbe_link_speed force_speed ; s32 tmp ; { if ((speed & 128U) != 0U) { force_speed = 128U; } else { force_speed = 32U; } if (((unsigned long )hw->phy.nw_mng_if_sel & 16777216UL) == 0UL) { status = ixgbe_setup_ixfi_x550em(hw, & force_speed); if (status != 0) { return (status); } else { } } else { } tmp = (*(hw->phy.ops.setup_link_speed))(hw, speed, (int )autoneg_wait); return (tmp); } } static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw , ixgbe_link_speed *speed , bool *link_up , bool link_up_wait_to_complete ) { u32 status ; u16 autoneg_status ; enum ixgbe_media_type tmp ; s32 tmp___0 ; s32 tmp___1 ; { tmp = (*(hw->mac.ops.get_media_type))(hw); if ((unsigned int )tmp != 4U) { return (-4); } else { } tmp___0 = ixgbe_check_mac_link_generic(hw, speed, link_up, (int )link_up_wait_to_complete); status = (u32 )tmp___0; if (status != 0U || ! *link_up) { return ((s32 )status); } else { } tmp___1 = (*(hw->phy.ops.read_reg))(hw, 1U, 7U, & autoneg_status); status = (u32 )tmp___1; if (status != 0U) { return ((s32 )status); } else { } if (((int )autoneg_status & 4) == 0) { *link_up = 0; } else { } return (0); } } static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw ) { struct ixgbe_mac_info *mac ; enum ixgbe_media_type tmp ; { mac = & hw->mac; tmp = (*(mac->ops.get_media_type))(hw); switch ((unsigned int )tmp) { case 1U: mac->ops.disable_tx_laser = (void (*)(struct ixgbe_hw * ))0; mac->ops.enable_tx_laser = (void (*)(struct ixgbe_hw * ))0; mac->ops.flap_tx_laser = (void (*)(struct ixgbe_hw * ))0; goto ldv_55706; case 4U: mac->ops.setup_link = & ixgbe_setup_mac_link_t_X550em; mac->ops.check_link = & ixgbe_check_link_t_X550em; goto ldv_55706; default: ; goto ldv_55706; } ldv_55706: ; return; } } static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw ) { bool setup_linear ; u16 reg_slice ; u16 edc_mode ; s32 ret_val ; { switch ((unsigned int )hw->phy.sfp_type) { case 65535U: ; return (0); case 65534U: ; return (-20); case 3U: ; case 4U: setup_linear = 1; goto ldv_55720; case 5U: ; case 6U: ; case 7U: ; case 8U: ; case 11U: ; case 12U: setup_linear = 0; goto ldv_55720; default: ; return (-19); } ldv_55720: ixgbe_init_mac_link_ops_X550em(hw); hw->phy.ops.reset = (s32 (*)(struct ixgbe_hw * ))0; reg_slice = (unsigned int )((int )hw->bus.lan_id << 12U) + 4784U; if ((int )setup_linear) { edc_mode = 5U; } else { edc_mode = 9U; } ret_val = (*(hw->phy.ops.write_i2c_combined))(hw, 190, (int )reg_slice, (int )edc_mode); if (ret_val != 0) { ret_val = (*(hw->phy.ops.write_i2c_combined))(hw, 128, (int )reg_slice, (int )edc_mode); } else { } return (ret_val); } } static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw , ixgbe_link_speed *speed , bool *autoneg ) { { if ((unsigned int )hw->phy.media_type == 1U) { *autoneg = 0; if ((unsigned int )hw->phy.sfp_type == 11U || (unsigned int )hw->phy.sfp_type == 12U) { *speed = 32U; return (0); } else { } if ((int )hw->phy.multispeed_fiber) { *speed = 160U; } else { *speed = 128U; } } else { *speed = 160U; *autoneg = 1; } return (0); } } static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw , bool *lsc ) { u32 status ; u16 reg ; s32 tmp ; s32 tmp___0 ; s32 tmp___1 ; s32 tmp___2 ; s32 tmp___3 ; { *lsc = 0; tmp = (*(hw->phy.ops.read_reg))(hw, 64512U, 30U, & reg); status = (u32 )tmp; if (status != 0U || ((int )reg & 1) == 0) { return ((s32 )status); } else { } tmp___0 = (*(hw->phy.ops.read_reg))(hw, 64513U, 30U, & reg); status = (u32 )tmp___0; if (status != 0U || ((int )reg & 4100) == 0) { return ((s32 )status); } else { } tmp___1 = (*(hw->phy.ops.read_reg))(hw, 52224U, 30U, & reg); status = (u32 )tmp___1; if (status != 0U) { return ((s32 )status); } else { } if (((int )reg & 16384) != 0) { ixgbe_set_copper_phy_power(hw, 0); return (-26); } else { } tmp___2 = (*(hw->phy.ops.read_reg))(hw, 64512U, 7U, & reg); status = (u32 )tmp___2; if (status != 0U || ((int )reg & 512) == 0) { return ((s32 )status); } else { } tmp___3 = (*(hw->phy.ops.read_reg))(hw, 52225U, 7U, & reg); status = (u32 )tmp___3; if (status != 0U) { return ((s32 )status); } else { } if ((int )reg & 1) { *lsc = 1; } else { } return (0); } } static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw ) { u32 status ; u16 reg ; bool lsc ; s32 tmp ; s32 tmp___0 ; s32 tmp___1 ; s32 tmp___2 ; s32 tmp___3 ; s32 tmp___4 ; s32 tmp___5 ; s32 tmp___6 ; s32 tmp___7 ; { tmp = ixgbe_get_lasi_ext_t_x550em(hw, & lsc); status = (u32 )tmp; tmp___0 = (*(hw->phy.ops.read_reg))(hw, 54273U, 7U, & reg); status = (u32 )tmp___0; if (status != 0U) { return ((s32 )status); } else { } reg = (u16 )((unsigned int )reg | 1U); tmp___1 = (*(hw->phy.ops.write_reg))(hw, 54273U, 7U, (int )reg); status = (u32 )tmp___1; if (status != 0U) { return ((s32 )status); } else { } tmp___2 = (*(hw->phy.ops.read_reg))(hw, 54272U, 30U, & reg); status = (u32 )tmp___2; if (status != 0U) { return ((s32 )status); } else { } reg = (u16 )((unsigned int )reg | 16384U); tmp___3 = (*(hw->phy.ops.write_reg))(hw, 54272U, 30U, (int )reg); status = (u32 )tmp___3; if (status != 0U) { return ((s32 )status); } else { } tmp___4 = (*(hw->phy.ops.read_reg))(hw, 65281U, 30U, & reg); status = (u32 )tmp___4; if (status != 0U) { return ((s32 )status); } else { } reg = (u16 )((unsigned int )reg | 4100U); tmp___5 = (*(hw->phy.ops.write_reg))(hw, 65281U, 30U, (int )reg); status = (u32 )tmp___5; if (status != 0U) { return ((s32 )status); } else { } tmp___6 = (*(hw->phy.ops.read_reg))(hw, 65280U, 30U, & reg); status = (u32 )tmp___6; if (status != 0U) { return ((s32 )status); } else { } reg = (u16 )((unsigned int )reg | 1U); tmp___7 = (*(hw->phy.ops.write_reg))(hw, 65280U, 30U, (int )reg); status = (u32 )tmp___7; return ((s32 )status); } } static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw ) { struct ixgbe_phy_info *phy ; bool lsc ; u32 status ; s32 tmp ; s32 tmp___0 ; { phy = & hw->phy; tmp = ixgbe_get_lasi_ext_t_x550em(hw, & lsc); status = (u32 )tmp; if (status != 0U) { return ((s32 )status); } else { } if ((int )lsc) { tmp___0 = (*(phy->ops.setup_internal_link))(hw); return (tmp___0); } else { } return (0); } } static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw , ixgbe_link_speed speed ) { s32 status ; u32 reg_val ; { status = ixgbe_read_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 16908U : 33292U, 0U, & reg_val); if (status != 0) { return (status); } else { } reg_val = reg_val | 536870912U; reg_val = reg_val & 4294918143U; reg_val = reg_val & 4294639615U; if ((speed & 128U) != 0U) { reg_val = reg_val | 262144U; } else { } if ((speed & 32U) != 0U) { reg_val = reg_val | 65536U; } else { } reg_val = reg_val | 2147483648U; status = ixgbe_write_iosf_sb_reg_x550(hw, (unsigned int )hw->bus.lan_id == 0U ? 16908U : 33292U, 0U, reg_val); return (status); } } static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw ) { s32 status ; u32 reg_val ; { status = ixgbe_read_iosf_sb_reg_x550(hw, 76U, (u32 )((int )hw->bus.lan_id + 2), & reg_val); if (status != 0) { return (status); } else { } reg_val = reg_val & 4294770687U; reg_val = reg_val | 536870912U; if ((hw->phy.autoneg_advertised & 128U) != 0U) { reg_val = reg_val | 131072U; } else { } if ((hw->phy.autoneg_advertised & 32U) != 0U) { reg_val = reg_val | 65536U; } else { } reg_val = reg_val | 2147483648U; status = ixgbe_write_iosf_sb_reg_x550(hw, 76U, (u32 )((int )hw->bus.lan_id + 2), reg_val); return (status); } } static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw ) { s32 tmp ; { tmp = ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); return (tmp); } } static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw , bool *link_up ) { u32 ret ; u16 autoneg_status ; s32 tmp ; s32 tmp___0 ; { *link_up = 0; tmp = (*(hw->phy.ops.read_reg))(hw, 1U, 7U, & autoneg_status); ret = (u32 )tmp; if (ret != 0U) { return ((s32 )ret); } else { } tmp___0 = (*(hw->phy.ops.read_reg))(hw, 1U, 7U, & autoneg_status); ret = (u32 )tmp___0; if (ret != 0U) { return ((s32 )ret); } else { } *link_up = ((int )autoneg_status & 4) != 0; return (0); } } static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw ) { ixgbe_link_speed force_speed ; bool link_up ; u32 status ; u16 speed ; enum ixgbe_media_type tmp ; s32 tmp___0 ; s32 tmp___1 ; s32 tmp___2 ; s32 tmp___3 ; { tmp = (*(hw->mac.ops.get_media_type))(hw); if ((unsigned int )tmp != 4U) { return (-4); } else { } tmp___0 = ixgbe_ext_phy_t_x550em_get_link(hw, & link_up); status = (u32 )tmp___0; if (status != 0U) { return ((s32 )status); } else { } if (! link_up) { return (0); } else { } tmp___1 = (*(hw->phy.ops.read_reg))(hw, 51200U, 7U, & speed); status = (u32 )tmp___1; if (status != 0U) { return ((s32 )status); } else { } tmp___2 = ixgbe_ext_phy_t_x550em_get_link(hw, & link_up); status = (u32 )tmp___2; if (status != 0U) { return ((s32 )status); } else { } if (! link_up) { return (0); } else { } speed = (unsigned int )speed & 7U; switch ((int )speed) { case 7: force_speed = 128U; goto ldv_55779; case 5: force_speed = 32U; goto ldv_55779; default: ; return (-13); } ldv_55779: tmp___3 = ixgbe_setup_ixfi_x550em(hw, & force_speed); return (tmp___3); } } static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw ) { s32 status ; s32 tmp ; { status = ixgbe_reset_phy_generic(hw); if (status != 0) { return (status); } else { } tmp = ixgbe_enable_lasi_ext_t_x550em(hw); return (tmp); } } static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw ) { struct ixgbe_phy_info *phy ; ixgbe_link_speed speed ; s32 ret_val ; enum ixgbe_media_type tmp ; { phy = & hw->phy; (*(hw->mac.ops.set_lan_id))(hw); tmp = (*(hw->mac.ops.get_media_type))(hw); if ((unsigned int )tmp == 1U) { phy->phy_semaphore_mask = 6150U; ixgbe_setup_mux_ctl(hw); phy->nw_mng_if_sel = ixgbe_read_reg(hw, 70008U); if (((unsigned long )phy->nw_mng_if_sel & 16777216UL) != 0UL) { speed = 160U; ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); } else { } } else { } ret_val = (*(phy->ops.identify))(hw); ixgbe_init_mac_link_ops_X550em(hw); if ((unsigned int )phy->sfp_type != 65535U) { phy->ops.reset = (s32 (*)(struct ixgbe_hw * ))0; } else { } switch ((unsigned int )hw->phy.type) { case 5U: phy->ops.setup_link = & ixgbe_setup_kx4_x550em; phy->ops.read_reg = & ixgbe_read_phy_reg_x550em; phy->ops.write_reg = & ixgbe_write_phy_reg_x550em; goto ldv_55793; case 4U: phy->ops.setup_link = & ixgbe_setup_kr_x550em; phy->ops.read_reg = & ixgbe_read_phy_reg_x550em; phy->ops.write_reg = & ixgbe_write_phy_reg_x550em; goto ldv_55793; case 6U: phy->nw_mng_if_sel = ixgbe_read_reg(hw, 70008U); if (((unsigned long )phy->nw_mng_if_sel & 16777216UL) == 0UL) { phy->ops.setup_internal_link = & ixgbe_setup_internal_phy_t_x550em; } else { speed = 160U; ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); } phy->ops.handle_lasi = & ixgbe_handle_lasi_ext_t_x550em; phy->ops.reset = & ixgbe_reset_phy_t_X550em; goto ldv_55793; default: ; goto ldv_55793; } ldv_55793: ; return (ret_val); } } static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw ) { enum ixgbe_media_type media_type ; { switch ((int )hw->device_id) { case 5547: ; case 5546: media_type = 5; goto ldv_55803; case 5548: media_type = 1; goto ldv_55803; case 5550: ; case 5549: media_type = 4; goto ldv_55803; default: media_type = 0; goto ldv_55803; } ldv_55803: ; return (media_type); } } static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw ) { s32 status ; u16 reg ; { status = (*(hw->phy.ops.read_reg))(hw, 52226U, 1U, & reg); if (status != 0) { return (status); } else { } if (((int )reg & 3) != 0) { status = (*(hw->phy.ops.read_reg))(hw, 50297U, 30U, & reg); if (status != 0) { return (status); } else { } reg = (unsigned int )reg & 32767U; status = (*(hw->phy.ops.write_reg))(hw, 50297U, 30U, (int )reg); if (status != 0) { return (status); } else { } } else { } return (status); } } static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw ) { ixgbe_link_speed link_speed ; s32 status ; u32 ctrl ; u32 i ; u32 hlreg0 ; bool link_up ; u32 tmp ; struct _ddebug descriptor ; long tmp___0 ; { ctrl = 0U; link_up = 0; status = (*(hw->mac.ops.stop_adapter))(hw); if (status != 0) { return (status); } else { } ixgbe_clear_tx_pending(hw); status = (*(hw->phy.ops.init))(hw); if ((unsigned int )hw->phy.type == 6U) { status = ixgbe_init_ext_t_x550em(hw); if (status != 0) { return (status); } else { } } else { } if ((int )hw->phy.sfp_setup_needed) { status = (*(hw->mac.ops.setup_sfp))(hw); hw->phy.sfp_setup_needed = 0; } else { } if (! hw->phy.reset_disable && (unsigned long )hw->phy.ops.reset != (unsigned long )((s32 (*)(struct ixgbe_hw * ))0)) { (*(hw->phy.ops.reset))(hw); } else { } mac_reset_top: ctrl = 8U; if (! hw->force_full_reset) { (*(hw->mac.ops.check_link))(hw, & link_speed, & link_up, 0); if ((int )link_up) { ctrl = 67108864U; } else { } } else { } tmp = ixgbe_read_reg(hw, 0U); ctrl = tmp | ctrl; ixgbe_write_reg(hw, 0U, ctrl); ixgbe_read_reg(hw, 8U); i = 0U; goto ldv_55825; ldv_55824: __const_udelay(4295UL); ctrl = ixgbe_read_reg(hw, 0U); if ((ctrl & 67108872U) == 0U) { goto ldv_55823; } else { } i = i + 1U; ldv_55825: ; if (i <= 9U) { goto ldv_55824; } else { } ldv_55823: ; if ((ctrl & 67108872U) != 0U) { status = -15; descriptor.modname = "ixgbe"; descriptor.function = "ixgbe_reset_hw_X550em"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10467/dscv_tempdir/dscv/ri/43_2a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c"; descriptor.format = "Reset polling failed to complete.\n"; descriptor.lineno = 1670U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_netdev_dbg(& descriptor, (struct net_device const *)((struct ixgbe_adapter *)hw->back)->netdev, "Reset polling failed to complete.\n"); } else { } } else { } msleep(50U); if ((int )hw->mac.flags & 1) { hw->mac.flags = (unsigned int )hw->mac.flags & 254U; goto mac_reset_top; } else { } (*(hw->mac.ops.get_mac_addr))(hw, (u8 *)(& hw->mac.perm_addr)); hw->mac.num_rar_entries = 128U; (*(hw->mac.ops.init_rx_addrs))(hw); if ((unsigned int )hw->device_id == 5549U) { hlreg0 = ixgbe_read_reg(hw, 16960U); hlreg0 = hlreg0 & 4294901759U; ixgbe_write_reg(hw, 16960U, hlreg0); } else { } if ((unsigned int )hw->device_id == 5548U) { ixgbe_setup_mux_ctl(hw); } else { } return (status); } } static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw , bool enable , int vf ) { int vf_target_reg ; int vf_target_shift ; u32 pfvfspoof ; { vf_target_reg = vf >> 3; vf_target_shift = vf % 8 + 16; pfvfspoof = ixgbe_read_reg(hw, (u32 )((vf_target_reg + 8320) * 4)); if ((int )enable) { pfvfspoof = (u32 )(1 << vf_target_shift) | pfvfspoof; } else { pfvfspoof = (u32 )(~ (1 << vf_target_shift)) & pfvfspoof; } ixgbe_write_reg(hw, (u32 )((vf_target_reg + 8320) * 4), pfvfspoof); return; } } static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw , bool enable , unsigned int pool ) { u64 pfflp ; u32 tmp ; u32 tmp___0 ; { if (pool > 63U) { return; } else { } tmp = ixgbe_read_reg(hw, 20656U); pfflp = (unsigned long long )tmp; tmp___0 = ixgbe_read_reg(hw, 20660U); pfflp = ((unsigned long long )tmp___0 << 32) | pfflp; if ((int )enable) { pfflp = (1ULL << (int )pool) | pfflp; } else { pfflp = ~ (1ULL << (int )pool) & pfflp; } ixgbe_write_reg(hw, 20656U, (unsigned int )pfflp); ixgbe_write_reg(hw, 20660U, (unsigned int )(pfflp >> 32)); return; } } static struct ixgbe_mac_operations mac_ops_X550 = {& ixgbe_init_hw_generic, & ixgbe_reset_hw_X540, & ixgbe_start_hw_X540, & ixgbe_clear_hw_cntrs_generic, & ixgbe_get_media_type_X540, & ixgbe_get_mac_addr_generic, & ixgbe_get_san_mac_addr_generic, & ixgbe_get_device_caps_generic, & ixgbe_get_wwn_prefix_generic, & ixgbe_stop_adapter_generic, & ixgbe_get_bus_info_generic, & ixgbe_set_lan_id_multi_port_pcie, (s32 (*)(struct ixgbe_hw * , u32 , u8 * ))0, (s32 (*)(struct ixgbe_hw * , u32 , u8 ))0, (s32 (*)(struct ixgbe_hw * ))0, & ixgbe_disable_rx_buff_generic, & ixgbe_enable_rx_buff_generic, & ixgbe_enable_rx_dma_generic, & ixgbe_acquire_swfw_sync_X540, & ixgbe_release_swfw_sync_X540, & prot_autoc_read_generic, & prot_autoc_write_generic, 0, 0, 0, 0, & ixgbe_setup_mac_link_X540, & ixgbe_check_mac_link_generic, & ixgbe_get_copper_link_capabilities_generic, & ixgbe_set_rxpba_generic, & ixgbe_led_on_generic, & ixgbe_led_off_generic, & ixgbe_blink_led_start_X540, & ixgbe_blink_led_stop_X540, & ixgbe_set_rar_generic, & ixgbe_clear_rar_generic, & ixgbe_set_vmdq_generic, & ixgbe_set_vmdq_san_mac_generic, & ixgbe_clear_vmdq_generic, & ixgbe_init_rx_addrs_generic, & ixgbe_update_mc_addr_list_generic, & ixgbe_enable_mc_generic, & ixgbe_disable_mc_generic, & ixgbe_clear_vfta_generic, & ixgbe_set_vfta_generic, & ixgbe_init_uta_tables_generic, & ixgbe_set_mac_anti_spoofing, & ixgbe_set_vlan_anti_spoofing, & ixgbe_fc_enable_generic, & ixgbe_set_fw_drv_ver_generic, (s32 (*)(struct ixgbe_hw * ))0, (s32 (*)(struct ixgbe_hw * ))0, & ixgbe_disable_rx_x550, & ixgbe_enable_rx_generic, & ixgbe_set_source_address_pruning_X550, & ixgbe_set_ethertype_anti_spoofing_X550, 0, 0, 0}; static struct ixgbe_mac_operations mac_ops_X550EM_x = {& ixgbe_init_hw_generic, & ixgbe_reset_hw_X550em, & ixgbe_start_hw_X540, & ixgbe_clear_hw_cntrs_generic, & ixgbe_get_media_type_X550em, & ixgbe_get_mac_addr_generic, (s32 (*)(struct ixgbe_hw * , u8 * ))0, & ixgbe_get_device_caps_generic, (s32 (*)(struct ixgbe_hw * , u16 * , u16 * ))0, & ixgbe_stop_adapter_generic, & ixgbe_get_bus_info_generic, & ixgbe_set_lan_id_multi_port_pcie, (s32 (*)(struct ixgbe_hw * , u32 , u8 * ))0, (s32 (*)(struct ixgbe_hw * , u32 , u8 ))0, & ixgbe_setup_sfp_modules_X550em, & ixgbe_disable_rx_buff_generic, & ixgbe_enable_rx_buff_generic, & ixgbe_enable_rx_dma_generic, & ixgbe_acquire_swfw_sync_X540, & ixgbe_release_swfw_sync_X540, & prot_autoc_read_generic, & prot_autoc_write_generic, 0, 0, 0, 0, (s32 (*)(struct ixgbe_hw * , ixgbe_link_speed , bool ))0, & ixgbe_check_mac_link_generic, & ixgbe_get_link_capabilities_X550em, & ixgbe_set_rxpba_generic, & ixgbe_led_on_generic, & ixgbe_led_off_generic, & ixgbe_blink_led_start_X540, & ixgbe_blink_led_stop_X540, & ixgbe_set_rar_generic, & ixgbe_clear_rar_generic, & ixgbe_set_vmdq_generic, & ixgbe_set_vmdq_san_mac_generic, & ixgbe_clear_vmdq_generic, & ixgbe_init_rx_addrs_generic, & ixgbe_update_mc_addr_list_generic, & ixgbe_enable_mc_generic, & ixgbe_disable_mc_generic, & ixgbe_clear_vfta_generic, & ixgbe_set_vfta_generic, & ixgbe_init_uta_tables_generic, & ixgbe_set_mac_anti_spoofing, & ixgbe_set_vlan_anti_spoofing, & ixgbe_fc_enable_generic, & ixgbe_set_fw_drv_ver_generic, (s32 (*)(struct ixgbe_hw * ))0, (s32 (*)(struct ixgbe_hw * ))0, & ixgbe_disable_rx_x550, & ixgbe_enable_rx_generic, & ixgbe_set_source_address_pruning_X550, & ixgbe_set_ethertype_anti_spoofing_X550, 0, 0, 0}; static struct ixgbe_eeprom_operations eeprom_ops_X550 = {& ixgbe_init_eeprom_params_X550, & ixgbe_read_ee_hostif_X550, & ixgbe_read_ee_hostif_buffer_X550, & ixgbe_write_ee_hostif_X550, & ixgbe_write_ee_hostif_buffer_X550, & ixgbe_validate_eeprom_checksum_X550, & ixgbe_update_eeprom_checksum_X550, & ixgbe_calc_eeprom_checksum_X550}; static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {& ixgbe_init_eeprom_params_X540, & ixgbe_read_ee_hostif_X550, & ixgbe_read_ee_hostif_buffer_X550, & ixgbe_write_ee_hostif_X550, & ixgbe_write_ee_hostif_buffer_X550, & ixgbe_validate_eeprom_checksum_X550, & ixgbe_update_eeprom_checksum_X550, & ixgbe_calc_eeprom_checksum_X550}; static struct ixgbe_phy_operations phy_ops_X550 = {& ixgbe_identify_phy_generic, & ixgbe_identify_module_generic, (s32 (*)(struct ixgbe_hw * ))0, (s32 (*)(struct ixgbe_hw * ))0, & ixgbe_read_phy_reg_generic, & ixgbe_write_phy_reg_generic, 0, 0, & ixgbe_setup_phy_link_generic, 0, & ixgbe_setup_phy_link_speed_generic, 0, & ixgbe_get_phy_firmware_version_generic, & ixgbe_read_i2c_byte_generic, & ixgbe_write_i2c_byte_generic, & ixgbe_read_i2c_sff8472_generic, & ixgbe_read_i2c_eeprom_generic, & ixgbe_write_i2c_eeprom_generic, & ixgbe_read_i2c_combined_generic, & ixgbe_write_i2c_combined_generic, & ixgbe_tn_check_overtemp, & ixgbe_set_copper_phy_power, 0}; static struct ixgbe_phy_operations phy_ops_X550EM_x = {& ixgbe_identify_phy_x550em, & ixgbe_identify_module_generic, & ixgbe_init_phy_ops_X550em, (s32 (*)(struct ixgbe_hw * ))0, & ixgbe_read_phy_reg_generic, & ixgbe_write_phy_reg_generic, 0, 0, & ixgbe_setup_phy_link_generic, 0, & ixgbe_setup_phy_link_speed_generic, 0, & ixgbe_get_phy_firmware_version_generic, & ixgbe_read_i2c_byte_generic, & ixgbe_write_i2c_byte_generic, & ixgbe_read_i2c_sff8472_generic, & ixgbe_read_i2c_eeprom_generic, & ixgbe_write_i2c_eeprom_generic, 0, 0, & ixgbe_tn_check_overtemp, & ixgbe_set_copper_phy_power, 0}; static u32 const ixgbe_mvals_X550[24U] = { 65552U, 65564U, 66048U, 66064U, 65872U, 65856U, 65888U, 65864U, 2U, 4U, 8U, 33554432U, 67108864U, 134217728U, 70920U, 70928U, 16384U, 512U, 4096U, 1024U, 2048U, 256U, 8192U, 89948U}; static u32 const ixgbe_mvals_X550EM_x[24U] = { 65552U, 65564U, 66048U, 66064U, 65872U, 65856U, 65888U, 65864U, 2U, 4U, 8U, 33554432U, 67108864U, 134217728U, 70920U, 70928U, 16384U, 512U, 4096U, 1024U, 2048U, 256U, 8192U, 89948U}; struct ixgbe_info ixgbe_X550_info = {4, & ixgbe_get_invariants_X540, & mac_ops_X550, & eeprom_ops_X550, & phy_ops_X550, & mbx_ops_generic, (u32 const *)(& ixgbe_mvals_X550)}; struct ixgbe_info ixgbe_X550EM_x_info = {5, & ixgbe_get_invariants_X540, & mac_ops_X550EM_x, & eeprom_ops_X550EM_x, & phy_ops_X550EM_x, & mbx_ops_generic, (u32 const *)(& ixgbe_mvals_X550EM_x)}; extern int ldv_release_19(void) ; extern int ldv_probe_19(void) ; extern int ldv_release_20(void) ; extern int ldv_probe_20(void) ; extern int ldv_probe_17(void) ; extern int ldv_release_17(void) ; extern int ldv_probe_21(void) ; extern int ldv_release_21(void) ; extern int ldv_setup_16(void) ; extern int ldv_release_16(void) ; extern int ldv_release_18(void) ; extern int ldv_probe_18(void) ; void ldv_initialize_ixgbe_phy_operations_16(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); phy_ops_X550EM_x_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_initialize_ixgbe_eeprom_operations_18(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); eeprom_ops_X550EM_x_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_initialize_ixgbe_eeprom_operations_19(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); eeprom_ops_X550_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_initialize_ixgbe_phy_operations_17(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); phy_ops_X550_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_initialize_ixgbe_mac_operations_21(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); mac_ops_X550_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_initialize_ixgbe_mac_operations_20(void) { void *tmp ; { tmp = ldv_init_zalloc(1696UL); mac_ops_X550EM_x_group0 = (struct ixgbe_hw *)tmp; return; } } void ldv_main_exported_21(void) { u32 *ldvarg75 ; void *tmp ; ixgbe_link_speed ldvarg52 ; u32 ldvarg74 ; bool *ldvarg76 ; void *tmp___0 ; u32 ldvarg82 ; bool ldvarg61 ; u16 *ldvarg54 ; void *tmp___1 ; bool ldvarg68 ; bool ldvarg78 ; u32 ldvarg70 ; bool ldvarg73 ; u32 ldvarg63 ; bool *ldvarg45 ; void *tmp___2 ; u32 ldvarg81 ; bool ldvarg40 ; struct net_device *ldvarg55 ; void *tmp___3 ; u8 ldvarg36 ; bool ldvarg66 ; u32 ldvarg79 ; u32 ldvarg57 ; int ldvarg65 ; int ldvarg62 ; unsigned int ldvarg49 ; u32 ldvarg59 ; u32 ldvarg80 ; int ldvarg60 ; bool *ldvarg41 ; void *tmp___4 ; u16 *ldvarg31 ; void *tmp___5 ; u8 ldvarg39 ; u8 *ldvarg47 ; void *tmp___6 ; u32 ldvarg34 ; u32 ldvarg48 ; u8 *ldvarg33 ; void *tmp___7 ; u32 ldvarg69 ; u8 ldvarg38 ; u32 ldvarg35 ; u8 *ldvarg44 ; void *tmp___8 ; int ldvarg64 ; u16 *ldvarg53 ; void *tmp___9 ; u8 ldvarg37 ; u32 ldvarg58 ; ixgbe_link_speed *ldvarg46 ; void *tmp___10 ; bool ldvarg50 ; u32 ldvarg56 ; u32 ldvarg72 ; u32 ldvarg71 ; ixgbe_link_speed *ldvarg42 ; void *tmp___11 ; u32 ldvarg43 ; int ldvarg67 ; u32 ldvarg32 ; bool ldvarg51 ; u32 ldvarg77 ; int tmp___12 ; { tmp = ldv_init_zalloc(4UL); ldvarg75 = (u32 *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg76 = (bool *)tmp___0; tmp___1 = ldv_init_zalloc(2UL); ldvarg54 = (u16 *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg45 = (bool *)tmp___2; tmp___3 = ldv_init_zalloc(3008UL); ldvarg55 = (struct net_device *)tmp___3; tmp___4 = ldv_init_zalloc(1UL); ldvarg41 = (bool *)tmp___4; tmp___5 = ldv_init_zalloc(2UL); ldvarg31 = (u16 *)tmp___5; tmp___6 = ldv_init_zalloc(1UL); ldvarg47 = (u8 *)tmp___6; tmp___7 = ldv_init_zalloc(1UL); ldvarg33 = (u8 *)tmp___7; tmp___8 = ldv_init_zalloc(1UL); ldvarg44 = (u8 *)tmp___8; tmp___9 = ldv_init_zalloc(2UL); ldvarg53 = (u16 *)tmp___9; tmp___10 = ldv_init_zalloc(4UL); ldvarg46 = (ixgbe_link_speed *)tmp___10; tmp___11 = ldv_init_zalloc(4UL); ldvarg42 = (ixgbe_link_speed *)tmp___11; ldv_memset((void *)(& ldvarg52), 0, 4UL); ldv_memset((void *)(& ldvarg74), 0, 4UL); ldv_memset((void *)(& ldvarg82), 0, 4UL); ldv_memset((void *)(& ldvarg61), 0, 1UL); ldv_memset((void *)(& ldvarg68), 0, 1UL); ldv_memset((void *)(& ldvarg78), 0, 1UL); ldv_memset((void *)(& ldvarg70), 0, 4UL); ldv_memset((void *)(& ldvarg73), 0, 1UL); ldv_memset((void *)(& ldvarg63), 0, 4UL); ldv_memset((void *)(& ldvarg81), 0, 4UL); ldv_memset((void *)(& ldvarg40), 0, 1UL); ldv_memset((void *)(& ldvarg36), 0, 1UL); ldv_memset((void *)(& ldvarg66), 0, 1UL); ldv_memset((void *)(& ldvarg79), 0, 4UL); ldv_memset((void *)(& ldvarg57), 0, 4UL); ldv_memset((void *)(& ldvarg65), 0, 4UL); ldv_memset((void *)(& ldvarg62), 0, 4UL); ldv_memset((void *)(& ldvarg49), 0, 4UL); ldv_memset((void *)(& ldvarg59), 0, 4UL); ldv_memset((void *)(& ldvarg80), 0, 4UL); ldv_memset((void *)(& ldvarg60), 0, 4UL); ldv_memset((void *)(& ldvarg39), 0, 1UL); ldv_memset((void *)(& ldvarg34), 0, 4UL); ldv_memset((void *)(& ldvarg48), 0, 4UL); ldv_memset((void *)(& ldvarg69), 0, 4UL); ldv_memset((void *)(& ldvarg38), 0, 1UL); ldv_memset((void *)(& ldvarg35), 0, 4UL); ldv_memset((void *)(& ldvarg64), 0, 4UL); ldv_memset((void *)(& ldvarg37), 0, 1UL); ldv_memset((void *)(& ldvarg58), 0, 4UL); ldv_memset((void *)(& ldvarg50), 0, 1UL); ldv_memset((void *)(& ldvarg56), 0, 4UL); ldv_memset((void *)(& ldvarg72), 0, 4UL); ldv_memset((void *)(& ldvarg71), 0, 4UL); ldv_memset((void *)(& ldvarg43), 0, 4UL); ldv_memset((void *)(& ldvarg67), 0, 4UL); ldv_memset((void *)(& ldvarg32), 0, 4UL); ldv_memset((void *)(& ldvarg51), 0, 1UL); ldv_memset((void *)(& ldvarg77), 0, 4UL); tmp___12 = __VERIFIER_nondet_int(); switch (tmp___12) { case 0: ; if (ldv_state_variable_21 == 1) { ixgbe_stop_adapter_generic(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_stop_adapter_generic(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 1: ; if (ldv_state_variable_21 == 1) { ixgbe_set_vmdq_san_mac_generic(mac_ops_X550_group0, ldvarg82); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_set_vmdq_san_mac_generic(mac_ops_X550_group0, ldvarg82); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 2: ; if (ldv_state_variable_21 == 1) { ixgbe_led_off_generic(mac_ops_X550_group0, ldvarg81); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_led_off_generic(mac_ops_X550_group0, ldvarg81); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 3: ; if (ldv_state_variable_21 == 1) { ixgbe_set_vfta_generic(mac_ops_X550_group0, ldvarg80, ldvarg79, (int )ldvarg78); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_set_vfta_generic(mac_ops_X550_group0, ldvarg80, ldvarg79, (int )ldvarg78); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 4: ; if (ldv_state_variable_21 == 1) { ixgbe_enable_rx_dma_generic(mac_ops_X550_group0, ldvarg77); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_enable_rx_dma_generic(mac_ops_X550_group0, ldvarg77); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 5: ; if (ldv_state_variable_21 == 2) { prot_autoc_read_generic(mac_ops_X550_group0, ldvarg76, ldvarg75); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 6: ; if (ldv_state_variable_21 == 1) { ixgbe_enable_rx_buff_generic(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_enable_rx_buff_generic(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 7: ; if (ldv_state_variable_21 == 2) { prot_autoc_write_generic(mac_ops_X550_group0, ldvarg74, (int )ldvarg73); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 8: ; if (ldv_state_variable_21 == 1) { ixgbe_led_on_generic(mac_ops_X550_group0, ldvarg72); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_led_on_generic(mac_ops_X550_group0, ldvarg72); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 9: ; if (ldv_state_variable_21 == 1) { ixgbe_blink_led_stop_X540(mac_ops_X550_group0, ldvarg71); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_blink_led_stop_X540(mac_ops_X550_group0, ldvarg71); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 10: ; if (ldv_state_variable_21 == 1) { ixgbe_clear_rar_generic(mac_ops_X550_group0, ldvarg70); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_clear_rar_generic(mac_ops_X550_group0, ldvarg70); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 11: ; if (ldv_state_variable_21 == 1) { ixgbe_enable_rx_generic(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_enable_rx_generic(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 12: ; if (ldv_state_variable_21 == 1) { ixgbe_get_bus_info_generic(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_get_bus_info_generic(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 13: ; if (ldv_state_variable_21 == 1) { ixgbe_blink_led_start_X540(mac_ops_X550_group0, ldvarg69); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_blink_led_start_X540(mac_ops_X550_group0, ldvarg69); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 14: ; if (ldv_state_variable_21 == 1) { ixgbe_set_ethertype_anti_spoofing_X550(mac_ops_X550_group0, (int )ldvarg68, ldvarg67); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_set_ethertype_anti_spoofing_X550(mac_ops_X550_group0, (int )ldvarg68, ldvarg67); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 15: ; if (ldv_state_variable_21 == 1) { ixgbe_disable_mc_generic(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_disable_mc_generic(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 16: ; if (ldv_state_variable_21 == 1) { ixgbe_set_vlan_anti_spoofing(mac_ops_X550_group0, (int )ldvarg66, ldvarg65); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_set_vlan_anti_spoofing(mac_ops_X550_group0, (int )ldvarg66, ldvarg65); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 17: ; if (ldv_state_variable_21 == 1) { ixgbe_set_rxpba_generic(mac_ops_X550_group0, ldvarg64, ldvarg63, ldvarg62); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_set_rxpba_generic(mac_ops_X550_group0, ldvarg64, ldvarg63, ldvarg62); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 18: ; if (ldv_state_variable_21 == 1) { ixgbe_init_uta_tables_generic(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_init_uta_tables_generic(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 19: ; if (ldv_state_variable_21 == 1) { ixgbe_set_mac_anti_spoofing(mac_ops_X550_group0, (int )ldvarg61, ldvarg60); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_set_mac_anti_spoofing(mac_ops_X550_group0, (int )ldvarg61, ldvarg60); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 20: ; if (ldv_state_variable_21 == 1) { ixgbe_set_vmdq_generic(mac_ops_X550_group0, ldvarg59, ldvarg58); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_set_vmdq_generic(mac_ops_X550_group0, ldvarg59, ldvarg58); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 21: ; if (ldv_state_variable_21 == 1) { ixgbe_clear_vmdq_generic(mac_ops_X550_group0, ldvarg57, ldvarg56); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_clear_vmdq_generic(mac_ops_X550_group0, ldvarg57, ldvarg56); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 22: ; if (ldv_state_variable_21 == 1) { ixgbe_clear_vfta_generic(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_clear_vfta_generic(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 23: ; if (ldv_state_variable_21 == 1) { ixgbe_get_media_type_X540(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_get_media_type_X540(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 24: ; if (ldv_state_variable_21 == 1) { ixgbe_update_mc_addr_list_generic(mac_ops_X550_group0, ldvarg55); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_update_mc_addr_list_generic(mac_ops_X550_group0, ldvarg55); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 25: ; if (ldv_state_variable_21 == 1) { ixgbe_init_rx_addrs_generic(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_init_rx_addrs_generic(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 26: ; if (ldv_state_variable_21 == 1) { ixgbe_fc_enable_generic(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_fc_enable_generic(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 27: ; if (ldv_state_variable_21 == 1) { ixgbe_get_wwn_prefix_generic(mac_ops_X550_group0, ldvarg54, ldvarg53); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_get_wwn_prefix_generic(mac_ops_X550_group0, ldvarg54, ldvarg53); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 28: ; if (ldv_state_variable_21 == 1) { ixgbe_clear_hw_cntrs_generic(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_clear_hw_cntrs_generic(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 29: ; if (ldv_state_variable_21 == 1) { ixgbe_setup_mac_link_X540(mac_ops_X550_group0, ldvarg52, (int )ldvarg51); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_setup_mac_link_X540(mac_ops_X550_group0, ldvarg52, (int )ldvarg51); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 30: ; if (ldv_state_variable_21 == 1) { ixgbe_disable_rx_x550(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_disable_rx_x550(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 31: ; if (ldv_state_variable_21 == 1) { ixgbe_set_source_address_pruning_X550(mac_ops_X550_group0, (int )ldvarg50, ldvarg49); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_set_source_address_pruning_X550(mac_ops_X550_group0, (int )ldvarg50, ldvarg49); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 32: ; if (ldv_state_variable_21 == 1) { ixgbe_set_lan_id_multi_port_pcie(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_set_lan_id_multi_port_pcie(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 33: ; if (ldv_state_variable_21 == 1) { ixgbe_acquire_swfw_sync_X540(mac_ops_X550_group0, ldvarg48); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_acquire_swfw_sync_X540(mac_ops_X550_group0, ldvarg48); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 34: ; if (ldv_state_variable_21 == 1) { ixgbe_start_hw_X540(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_start_hw_X540(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 35: ; if (ldv_state_variable_21 == 1) { ixgbe_enable_mc_generic(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_enable_mc_generic(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 36: ; if (ldv_state_variable_21 == 1) { ixgbe_get_mac_addr_generic(mac_ops_X550_group0, ldvarg47); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_get_mac_addr_generic(mac_ops_X550_group0, ldvarg47); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 37: ; if (ldv_state_variable_21 == 1) { ixgbe_get_copper_link_capabilities_generic(mac_ops_X550_group0, ldvarg46, ldvarg45); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_get_copper_link_capabilities_generic(mac_ops_X550_group0, ldvarg46, ldvarg45); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 38: ; if (ldv_state_variable_21 == 1) { ixgbe_get_san_mac_addr_generic(mac_ops_X550_group0, ldvarg44); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_get_san_mac_addr_generic(mac_ops_X550_group0, ldvarg44); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 39: ; if (ldv_state_variable_21 == 1) { ixgbe_init_hw_generic(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_init_hw_generic(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 40: ; if (ldv_state_variable_21 == 1) { ixgbe_reset_hw_X540(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_reset_hw_X540(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 41: ; if (ldv_state_variable_21 == 1) { ixgbe_release_swfw_sync_X540(mac_ops_X550_group0, ldvarg43); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_release_swfw_sync_X540(mac_ops_X550_group0, ldvarg43); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 42: ; if (ldv_state_variable_21 == 1) { ixgbe_check_mac_link_generic(mac_ops_X550_group0, ldvarg42, ldvarg41, (int )ldvarg40); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_check_mac_link_generic(mac_ops_X550_group0, ldvarg42, ldvarg41, (int )ldvarg40); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 43: ; if (ldv_state_variable_21 == 1) { ixgbe_set_fw_drv_ver_generic(mac_ops_X550_group0, (int )ldvarg38, (int )ldvarg37, (int )ldvarg36, (int )ldvarg39); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_set_fw_drv_ver_generic(mac_ops_X550_group0, (int )ldvarg38, (int )ldvarg37, (int )ldvarg36, (int )ldvarg39); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 44: ; if (ldv_state_variable_21 == 1) { ixgbe_set_rar_generic(mac_ops_X550_group0, ldvarg34, ldvarg33, ldvarg32, ldvarg35); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_set_rar_generic(mac_ops_X550_group0, ldvarg34, ldvarg33, ldvarg32, ldvarg35); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 45: ; if (ldv_state_variable_21 == 1) { ixgbe_get_device_caps_generic(mac_ops_X550_group0, ldvarg31); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_get_device_caps_generic(mac_ops_X550_group0, ldvarg31); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 46: ; if (ldv_state_variable_21 == 1) { ixgbe_disable_rx_buff_generic(mac_ops_X550_group0); ldv_state_variable_21 = 1; } else { } if (ldv_state_variable_21 == 2) { ixgbe_disable_rx_buff_generic(mac_ops_X550_group0); ldv_state_variable_21 = 2; } else { } goto ldv_55950; case 47: ; if (ldv_state_variable_21 == 2) { ldv_release_21(); ldv_state_variable_21 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_55950; case 48: ; if (ldv_state_variable_21 == 1) { ldv_probe_21(); ldv_state_variable_21 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_55950; default: ldv_stop(); } ldv_55950: ; return; } } void ldv_main_exported_17(void) { bool ldvarg107 ; u8 ldvarg117 ; u8 ldvarg110 ; ixgbe_link_speed ldvarg108 ; u32 ldvarg105 ; u16 ldvarg124 ; u8 *ldvarg116 ; void *tmp ; u32 ldvarg103 ; u8 ldvarg99 ; u8 *ldvarg114 ; void *tmp___0 ; bool ldvarg119 ; u8 ldvarg125 ; u8 ldvarg113 ; u16 *ldvarg120 ; void *tmp___1 ; u8 ldvarg112 ; u32 ldvarg102 ; u8 *ldvarg98 ; void *tmp___2 ; u8 ldvarg111 ; u16 ldvarg123 ; u8 ldvarg109 ; u8 ldvarg115 ; u16 *ldvarg118 ; void *tmp___3 ; u8 ldvarg100 ; u8 ldvarg122 ; u16 *ldvarg101 ; void *tmp___4 ; u32 ldvarg106 ; u16 ldvarg104 ; u16 ldvarg121 ; int tmp___5 ; { tmp = ldv_init_zalloc(1UL); ldvarg116 = (u8 *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg114 = (u8 *)tmp___0; tmp___1 = ldv_init_zalloc(2UL); ldvarg120 = (u16 *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg98 = (u8 *)tmp___2; tmp___3 = ldv_init_zalloc(2UL); ldvarg118 = (u16 *)tmp___3; tmp___4 = ldv_init_zalloc(2UL); ldvarg101 = (u16 *)tmp___4; ldv_memset((void *)(& ldvarg107), 0, 1UL); ldv_memset((void *)(& ldvarg117), 0, 1UL); ldv_memset((void *)(& ldvarg110), 0, 1UL); ldv_memset((void *)(& ldvarg108), 0, 4UL); ldv_memset((void *)(& ldvarg105), 0, 4UL); ldv_memset((void *)(& ldvarg124), 0, 2UL); ldv_memset((void *)(& ldvarg103), 0, 4UL); ldv_memset((void *)(& ldvarg99), 0, 1UL); ldv_memset((void *)(& ldvarg119), 0, 1UL); ldv_memset((void *)(& ldvarg125), 0, 1UL); ldv_memset((void *)(& ldvarg113), 0, 1UL); ldv_memset((void *)(& ldvarg112), 0, 1UL); ldv_memset((void *)(& ldvarg102), 0, 4UL); ldv_memset((void *)(& ldvarg111), 0, 1UL); ldv_memset((void *)(& ldvarg123), 0, 2UL); ldv_memset((void *)(& ldvarg109), 0, 1UL); ldv_memset((void *)(& ldvarg115), 0, 1UL); ldv_memset((void *)(& ldvarg100), 0, 1UL); ldv_memset((void *)(& ldvarg122), 0, 1UL); ldv_memset((void *)(& ldvarg106), 0, 4UL); ldv_memset((void *)(& ldvarg104), 0, 2UL); ldv_memset((void *)(& ldvarg121), 0, 2UL); tmp___5 = __VERIFIER_nondet_int(); switch (tmp___5) { case 0: ; if (ldv_state_variable_17 == 2) { ixgbe_write_i2c_combined_generic(phy_ops_X550_group0, (int )ldvarg125, (int )ldvarg124, (int )ldvarg123); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 1: ; if (ldv_state_variable_17 == 2) { ixgbe_read_i2c_combined_generic(phy_ops_X550_group0, (int )ldvarg122, (int )ldvarg121, ldvarg120); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 2: ; if (ldv_state_variable_17 == 1) { ixgbe_set_copper_phy_power(phy_ops_X550_group0, (int )ldvarg119); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_set_copper_phy_power(phy_ops_X550_group0, (int )ldvarg119); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 3: ; if (ldv_state_variable_17 == 1) { ixgbe_get_phy_firmware_version_generic(phy_ops_X550_group0, ldvarg118); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_get_phy_firmware_version_generic(phy_ops_X550_group0, ldvarg118); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 4: ; if (ldv_state_variable_17 == 1) { ixgbe_read_i2c_eeprom_generic(phy_ops_X550_group0, (int )ldvarg117, ldvarg116); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_read_i2c_eeprom_generic(phy_ops_X550_group0, (int )ldvarg117, ldvarg116); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 5: ; if (ldv_state_variable_17 == 1) { ixgbe_read_i2c_sff8472_generic(phy_ops_X550_group0, (int )ldvarg115, ldvarg114); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_read_i2c_sff8472_generic(phy_ops_X550_group0, (int )ldvarg115, ldvarg114); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 6: ; if (ldv_state_variable_17 == 1) { ixgbe_write_i2c_byte_generic(phy_ops_X550_group0, (int )ldvarg113, (int )ldvarg112, (int )ldvarg111); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_write_i2c_byte_generic(phy_ops_X550_group0, (int )ldvarg113, (int )ldvarg112, (int )ldvarg111); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 7: ; if (ldv_state_variable_17 == 1) { ixgbe_setup_phy_link_generic(phy_ops_X550_group0); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_setup_phy_link_generic(phy_ops_X550_group0); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 8: ; if (ldv_state_variable_17 == 1) { ixgbe_identify_phy_generic(phy_ops_X550_group0); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_identify_phy_generic(phy_ops_X550_group0); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 9: ; if (ldv_state_variable_17 == 1) { ixgbe_write_i2c_eeprom_generic(phy_ops_X550_group0, (int )ldvarg110, (int )ldvarg109); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_write_i2c_eeprom_generic(phy_ops_X550_group0, (int )ldvarg110, (int )ldvarg109); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 10: ; if (ldv_state_variable_17 == 1) { ixgbe_setup_phy_link_speed_generic(phy_ops_X550_group0, ldvarg108, (int )ldvarg107); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_setup_phy_link_speed_generic(phy_ops_X550_group0, ldvarg108, (int )ldvarg107); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 11: ; if (ldv_state_variable_17 == 1) { ixgbe_write_phy_reg_generic(phy_ops_X550_group0, ldvarg106, ldvarg105, (int )ldvarg104); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_write_phy_reg_generic(phy_ops_X550_group0, ldvarg106, ldvarg105, (int )ldvarg104); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 12: ; if (ldv_state_variable_17 == 1) { ixgbe_identify_module_generic(phy_ops_X550_group0); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_identify_module_generic(phy_ops_X550_group0); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 13: ; if (ldv_state_variable_17 == 1) { ixgbe_read_phy_reg_generic(phy_ops_X550_group0, ldvarg103, ldvarg102, ldvarg101); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_read_phy_reg_generic(phy_ops_X550_group0, ldvarg103, ldvarg102, ldvarg101); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 14: ; if (ldv_state_variable_17 == 1) { ixgbe_tn_check_overtemp(phy_ops_X550_group0); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_tn_check_overtemp(phy_ops_X550_group0); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 15: ; if (ldv_state_variable_17 == 1) { ixgbe_read_i2c_byte_generic(phy_ops_X550_group0, (int )ldvarg100, (int )ldvarg99, ldvarg98); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { ixgbe_read_i2c_byte_generic(phy_ops_X550_group0, (int )ldvarg100, (int )ldvarg99, ldvarg98); ldv_state_variable_17 = 2; } else { } goto ldv_56032; case 16: ; if (ldv_state_variable_17 == 2) { ldv_release_17(); ldv_state_variable_17 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_56032; case 17: ; if (ldv_state_variable_17 == 1) { ldv_probe_17(); ldv_state_variable_17 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_56032; default: ldv_stop(); } ldv_56032: ; return; } } void ldv_main_exported_20(void) { int ldvarg294 ; u32 ldvarg292 ; bool ldvarg283 ; u8 ldvarg273 ; ixgbe_link_speed *ldvarg279 ; void *tmp ; u32 ldvarg298 ; u8 *ldvarg267 ; void *tmp___0 ; bool ldvarg297 ; u16 *ldvarg265 ; void *tmp___1 ; ixgbe_link_speed *ldvarg276 ; void *tmp___2 ; u32 ldvarg287 ; u32 ldvarg286 ; u8 ldvarg270 ; u32 ldvarg301 ; unsigned int ldvarg282 ; u32 ldvarg306 ; int ldvarg289 ; int ldvarg291 ; u32 ldvarg288 ; u32 ldvarg277 ; bool *ldvarg275 ; void *tmp___3 ; int ldvarg293 ; u8 ldvarg272 ; bool ldvarg274 ; u32 *ldvarg304 ; void *tmp___4 ; u8 *ldvarg280 ; void *tmp___5 ; struct net_device *ldvarg284 ; void *tmp___6 ; bool ldvarg302 ; u32 ldvarg303 ; u8 ldvarg271 ; u32 ldvarg281 ; int ldvarg296 ; bool ldvarg295 ; u32 ldvarg310 ; u32 ldvarg300 ; u32 ldvarg285 ; u32 ldvarg309 ; u32 ldvarg311 ; u32 ldvarg266 ; bool *ldvarg278 ; void *tmp___7 ; u32 ldvarg299 ; bool ldvarg307 ; bool *ldvarg305 ; void *tmp___8 ; u32 ldvarg268 ; u32 ldvarg308 ; u32 ldvarg269 ; bool ldvarg290 ; int tmp___9 ; { tmp = ldv_init_zalloc(4UL); ldvarg279 = (ixgbe_link_speed *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg267 = (u8 *)tmp___0; tmp___1 = ldv_init_zalloc(2UL); ldvarg265 = (u16 *)tmp___1; tmp___2 = ldv_init_zalloc(4UL); ldvarg276 = (ixgbe_link_speed *)tmp___2; tmp___3 = ldv_init_zalloc(1UL); ldvarg275 = (bool *)tmp___3; tmp___4 = ldv_init_zalloc(4UL); ldvarg304 = (u32 *)tmp___4; tmp___5 = ldv_init_zalloc(1UL); ldvarg280 = (u8 *)tmp___5; tmp___6 = ldv_init_zalloc(3008UL); ldvarg284 = (struct net_device *)tmp___6; tmp___7 = ldv_init_zalloc(1UL); ldvarg278 = (bool *)tmp___7; tmp___8 = ldv_init_zalloc(1UL); ldvarg305 = (bool *)tmp___8; ldv_memset((void *)(& ldvarg294), 0, 4UL); ldv_memset((void *)(& ldvarg292), 0, 4UL); ldv_memset((void *)(& ldvarg283), 0, 1UL); ldv_memset((void *)(& ldvarg273), 0, 1UL); ldv_memset((void *)(& ldvarg298), 0, 4UL); ldv_memset((void *)(& ldvarg297), 0, 1UL); ldv_memset((void *)(& ldvarg287), 0, 4UL); ldv_memset((void *)(& ldvarg286), 0, 4UL); ldv_memset((void *)(& ldvarg270), 0, 1UL); ldv_memset((void *)(& ldvarg301), 0, 4UL); ldv_memset((void *)(& ldvarg282), 0, 4UL); ldv_memset((void *)(& ldvarg306), 0, 4UL); ldv_memset((void *)(& ldvarg289), 0, 4UL); ldv_memset((void *)(& ldvarg291), 0, 4UL); ldv_memset((void *)(& ldvarg288), 0, 4UL); ldv_memset((void *)(& ldvarg277), 0, 4UL); ldv_memset((void *)(& ldvarg293), 0, 4UL); ldv_memset((void *)(& ldvarg272), 0, 1UL); ldv_memset((void *)(& ldvarg274), 0, 1UL); ldv_memset((void *)(& ldvarg302), 0, 1UL); ldv_memset((void *)(& ldvarg303), 0, 4UL); ldv_memset((void *)(& ldvarg271), 0, 1UL); ldv_memset((void *)(& ldvarg281), 0, 4UL); ldv_memset((void *)(& ldvarg296), 0, 4UL); ldv_memset((void *)(& ldvarg295), 0, 1UL); ldv_memset((void *)(& ldvarg310), 0, 4UL); ldv_memset((void *)(& ldvarg300), 0, 4UL); ldv_memset((void *)(& ldvarg285), 0, 4UL); ldv_memset((void *)(& ldvarg309), 0, 4UL); ldv_memset((void *)(& ldvarg311), 0, 4UL); ldv_memset((void *)(& ldvarg266), 0, 4UL); ldv_memset((void *)(& ldvarg299), 0, 4UL); ldv_memset((void *)(& ldvarg307), 0, 1UL); ldv_memset((void *)(& ldvarg268), 0, 4UL); ldv_memset((void *)(& ldvarg308), 0, 4UL); ldv_memset((void *)(& ldvarg269), 0, 4UL); ldv_memset((void *)(& ldvarg290), 0, 1UL); tmp___9 = __VERIFIER_nondet_int(); switch (tmp___9) { case 0: ; if (ldv_state_variable_20 == 1) { ixgbe_stop_adapter_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_stop_adapter_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 1: ; if (ldv_state_variable_20 == 1) { ixgbe_set_vmdq_san_mac_generic(mac_ops_X550EM_x_group0, ldvarg311); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_set_vmdq_san_mac_generic(mac_ops_X550EM_x_group0, ldvarg311); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 2: ; if (ldv_state_variable_20 == 1) { ixgbe_led_off_generic(mac_ops_X550EM_x_group0, ldvarg310); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_led_off_generic(mac_ops_X550EM_x_group0, ldvarg310); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 3: ; if (ldv_state_variable_20 == 1) { ixgbe_set_vfta_generic(mac_ops_X550EM_x_group0, ldvarg309, ldvarg308, (int )ldvarg307); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_set_vfta_generic(mac_ops_X550EM_x_group0, ldvarg309, ldvarg308, (int )ldvarg307); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 4: ; if (ldv_state_variable_20 == 1) { ixgbe_enable_rx_dma_generic(mac_ops_X550EM_x_group0, ldvarg306); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_enable_rx_dma_generic(mac_ops_X550EM_x_group0, ldvarg306); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 5: ; if (ldv_state_variable_20 == 2) { prot_autoc_read_generic(mac_ops_X550EM_x_group0, ldvarg305, ldvarg304); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 6: ; if (ldv_state_variable_20 == 1) { ixgbe_enable_rx_buff_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_enable_rx_buff_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 7: ; if (ldv_state_variable_20 == 2) { prot_autoc_write_generic(mac_ops_X550EM_x_group0, ldvarg303, (int )ldvarg302); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 8: ; if (ldv_state_variable_20 == 1) { ixgbe_led_on_generic(mac_ops_X550EM_x_group0, ldvarg301); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_led_on_generic(mac_ops_X550EM_x_group0, ldvarg301); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 9: ; if (ldv_state_variable_20 == 1) { ixgbe_blink_led_stop_X540(mac_ops_X550EM_x_group0, ldvarg300); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_blink_led_stop_X540(mac_ops_X550EM_x_group0, ldvarg300); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 10: ; if (ldv_state_variable_20 == 1) { ixgbe_clear_rar_generic(mac_ops_X550EM_x_group0, ldvarg299); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_clear_rar_generic(mac_ops_X550EM_x_group0, ldvarg299); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 11: ; if (ldv_state_variable_20 == 1) { ixgbe_enable_rx_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_enable_rx_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 12: ; if (ldv_state_variable_20 == 1) { ixgbe_get_bus_info_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_get_bus_info_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 13: ; if (ldv_state_variable_20 == 1) { ixgbe_blink_led_start_X540(mac_ops_X550EM_x_group0, ldvarg298); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_blink_led_start_X540(mac_ops_X550EM_x_group0, ldvarg298); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 14: ; if (ldv_state_variable_20 == 1) { ixgbe_set_ethertype_anti_spoofing_X550(mac_ops_X550EM_x_group0, (int )ldvarg297, ldvarg296); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_set_ethertype_anti_spoofing_X550(mac_ops_X550EM_x_group0, (int )ldvarg297, ldvarg296); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 15: ; if (ldv_state_variable_20 == 1) { ixgbe_disable_mc_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_disable_mc_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 16: ; if (ldv_state_variable_20 == 1) { ixgbe_set_vlan_anti_spoofing(mac_ops_X550EM_x_group0, (int )ldvarg295, ldvarg294); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_set_vlan_anti_spoofing(mac_ops_X550EM_x_group0, (int )ldvarg295, ldvarg294); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 17: ; if (ldv_state_variable_20 == 1) { ixgbe_set_rxpba_generic(mac_ops_X550EM_x_group0, ldvarg293, ldvarg292, ldvarg291); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_set_rxpba_generic(mac_ops_X550EM_x_group0, ldvarg293, ldvarg292, ldvarg291); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 18: ; if (ldv_state_variable_20 == 1) { ixgbe_init_uta_tables_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_init_uta_tables_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 19: ; if (ldv_state_variable_20 == 1) { ixgbe_set_mac_anti_spoofing(mac_ops_X550EM_x_group0, (int )ldvarg290, ldvarg289); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_set_mac_anti_spoofing(mac_ops_X550EM_x_group0, (int )ldvarg290, ldvarg289); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 20: ; if (ldv_state_variable_20 == 1) { ixgbe_set_vmdq_generic(mac_ops_X550EM_x_group0, ldvarg288, ldvarg287); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_set_vmdq_generic(mac_ops_X550EM_x_group0, ldvarg288, ldvarg287); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 21: ; if (ldv_state_variable_20 == 1) { ixgbe_clear_vmdq_generic(mac_ops_X550EM_x_group0, ldvarg286, ldvarg285); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_clear_vmdq_generic(mac_ops_X550EM_x_group0, ldvarg286, ldvarg285); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 22: ; if (ldv_state_variable_20 == 1) { ixgbe_clear_vfta_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_clear_vfta_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 23: ; if (ldv_state_variable_20 == 1) { ixgbe_get_media_type_X550em(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_get_media_type_X550em(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 24: ; if (ldv_state_variable_20 == 1) { ixgbe_update_mc_addr_list_generic(mac_ops_X550EM_x_group0, ldvarg284); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_update_mc_addr_list_generic(mac_ops_X550EM_x_group0, ldvarg284); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 25: ; if (ldv_state_variable_20 == 1) { ixgbe_init_rx_addrs_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_init_rx_addrs_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 26: ; if (ldv_state_variable_20 == 1) { ixgbe_fc_enable_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_fc_enable_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 27: ; if (ldv_state_variable_20 == 1) { ixgbe_clear_hw_cntrs_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_clear_hw_cntrs_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 28: ; if (ldv_state_variable_20 == 1) { ixgbe_disable_rx_x550(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_disable_rx_x550(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 29: ; if (ldv_state_variable_20 == 1) { ixgbe_set_source_address_pruning_X550(mac_ops_X550EM_x_group0, (int )ldvarg283, ldvarg282); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_set_source_address_pruning_X550(mac_ops_X550EM_x_group0, (int )ldvarg283, ldvarg282); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 30: ; if (ldv_state_variable_20 == 1) { ixgbe_set_lan_id_multi_port_pcie(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_set_lan_id_multi_port_pcie(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 31: ; if (ldv_state_variable_20 == 1) { ixgbe_acquire_swfw_sync_X540(mac_ops_X550EM_x_group0, ldvarg281); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_acquire_swfw_sync_X540(mac_ops_X550EM_x_group0, ldvarg281); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 32: ; if (ldv_state_variable_20 == 1) { ixgbe_start_hw_X540(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_start_hw_X540(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 33: ; if (ldv_state_variable_20 == 1) { ixgbe_enable_mc_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_enable_mc_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 34: ; if (ldv_state_variable_20 == 1) { ixgbe_get_mac_addr_generic(mac_ops_X550EM_x_group0, ldvarg280); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_get_mac_addr_generic(mac_ops_X550EM_x_group0, ldvarg280); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 35: ; if (ldv_state_variable_20 == 1) { ixgbe_get_link_capabilities_X550em(mac_ops_X550EM_x_group0, ldvarg279, ldvarg278); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_get_link_capabilities_X550em(mac_ops_X550EM_x_group0, ldvarg279, ldvarg278); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 36: ; if (ldv_state_variable_20 == 1) { ixgbe_init_hw_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_init_hw_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 37: ; if (ldv_state_variable_20 == 1) { ixgbe_reset_hw_X550em(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_reset_hw_X550em(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 38: ; if (ldv_state_variable_20 == 1) { ixgbe_release_swfw_sync_X540(mac_ops_X550EM_x_group0, ldvarg277); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_release_swfw_sync_X540(mac_ops_X550EM_x_group0, ldvarg277); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 39: ; if (ldv_state_variable_20 == 1) { ixgbe_check_mac_link_generic(mac_ops_X550EM_x_group0, ldvarg276, ldvarg275, (int )ldvarg274); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_check_mac_link_generic(mac_ops_X550EM_x_group0, ldvarg276, ldvarg275, (int )ldvarg274); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 40: ; if (ldv_state_variable_20 == 1) { ixgbe_set_fw_drv_ver_generic(mac_ops_X550EM_x_group0, (int )ldvarg272, (int )ldvarg271, (int )ldvarg270, (int )ldvarg273); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_set_fw_drv_ver_generic(mac_ops_X550EM_x_group0, (int )ldvarg272, (int )ldvarg271, (int )ldvarg270, (int )ldvarg273); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 41: ; if (ldv_state_variable_20 == 1) { ixgbe_set_rar_generic(mac_ops_X550EM_x_group0, ldvarg268, ldvarg267, ldvarg266, ldvarg269); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_set_rar_generic(mac_ops_X550EM_x_group0, ldvarg268, ldvarg267, ldvarg266, ldvarg269); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 42: ; if (ldv_state_variable_20 == 1) { ixgbe_setup_sfp_modules_X550em(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_setup_sfp_modules_X550em(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 43: ; if (ldv_state_variable_20 == 1) { ixgbe_get_device_caps_generic(mac_ops_X550EM_x_group0, ldvarg265); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_get_device_caps_generic(mac_ops_X550EM_x_group0, ldvarg265); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 44: ; if (ldv_state_variable_20 == 1) { ixgbe_disable_rx_buff_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 2) { ixgbe_disable_rx_buff_generic(mac_ops_X550EM_x_group0); ldv_state_variable_20 = 2; } else { } goto ldv_56102; case 45: ; if (ldv_state_variable_20 == 2) { ldv_release_20(); ldv_state_variable_20 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_56102; case 46: ; if (ldv_state_variable_20 == 1) { ldv_probe_20(); ldv_state_variable_20 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_56102; default: ldv_stop(); } ldv_56102: ; return; } } void ldv_main_exported_15(void) { struct ixgbe_hw *ldvarg442 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1696UL); ldvarg442 = (struct ixgbe_hw *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_15 == 1) { ixgbe_get_invariants_X540(ldvarg442); ldv_state_variable_15 = 1; } else { } goto ldv_56155; default: ldv_stop(); } ldv_56155: ; return; } } void ldv_main_exported_14(void) { struct ixgbe_hw *ldvarg312 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1696UL); ldvarg312 = (struct ixgbe_hw *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_14 == 1) { ixgbe_get_invariants_X540(ldvarg312); ldv_state_variable_14 = 1; } else { } goto ldv_56162; default: ldv_stop(); } ldv_56162: ; return; } } void ldv_main_exported_18(void) { u16 ldvarg130 ; u16 ldvarg135 ; u16 ldvarg129 ; u16 ldvarg136 ; u16 *ldvarg126 ; void *tmp ; u16 ldvarg132 ; u16 ldvarg128 ; u16 ldvarg133 ; u16 *ldvarg134 ; void *tmp___0 ; u16 *ldvarg127 ; void *tmp___1 ; u16 *ldvarg131 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(2UL); ldvarg126 = (u16 *)tmp; tmp___0 = ldv_init_zalloc(2UL); ldvarg134 = (u16 *)tmp___0; tmp___1 = ldv_init_zalloc(2UL); ldvarg127 = (u16 *)tmp___1; tmp___2 = ldv_init_zalloc(2UL); ldvarg131 = (u16 *)tmp___2; ldv_memset((void *)(& ldvarg130), 0, 2UL); ldv_memset((void *)(& ldvarg135), 0, 2UL); ldv_memset((void *)(& ldvarg129), 0, 2UL); ldv_memset((void *)(& ldvarg136), 0, 2UL); ldv_memset((void *)(& ldvarg132), 0, 2UL); ldv_memset((void *)(& ldvarg128), 0, 2UL); ldv_memset((void *)(& ldvarg133), 0, 2UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_18 == 2) { ixgbe_write_ee_hostif_buffer_X550(eeprom_ops_X550EM_x_group0, (int )ldvarg136, (int )ldvarg135, ldvarg134); ldv_state_variable_18 = 2; } else { } goto ldv_56179; case 1: ; if (ldv_state_variable_18 == 2) { ixgbe_read_ee_hostif_buffer_X550(eeprom_ops_X550EM_x_group0, (int )ldvarg133, (int )ldvarg132, ldvarg131); ldv_state_variable_18 = 2; } else { } goto ldv_56179; case 2: ; if (ldv_state_variable_18 == 1) { ixgbe_update_eeprom_checksum_X550(eeprom_ops_X550EM_x_group0); ldv_state_variable_18 = 1; } else { } if (ldv_state_variable_18 == 2) { ixgbe_update_eeprom_checksum_X550(eeprom_ops_X550EM_x_group0); ldv_state_variable_18 = 2; } else { } goto ldv_56179; case 3: ; if (ldv_state_variable_18 == 1) { ixgbe_calc_eeprom_checksum_X550(eeprom_ops_X550EM_x_group0); ldv_state_variable_18 = 1; } else { } if (ldv_state_variable_18 == 2) { ixgbe_calc_eeprom_checksum_X550(eeprom_ops_X550EM_x_group0); ldv_state_variable_18 = 2; } else { } goto ldv_56179; case 4: ; if (ldv_state_variable_18 == 1) { ixgbe_write_ee_hostif_X550(eeprom_ops_X550EM_x_group0, (int )ldvarg130, (int )ldvarg129); ldv_state_variable_18 = 1; } else { } if (ldv_state_variable_18 == 2) { ixgbe_write_ee_hostif_X550(eeprom_ops_X550EM_x_group0, (int )ldvarg130, (int )ldvarg129); ldv_state_variable_18 = 2; } else { } goto ldv_56179; case 5: ; if (ldv_state_variable_18 == 1) { ixgbe_read_ee_hostif_X550(eeprom_ops_X550EM_x_group0, (int )ldvarg128, ldvarg127); ldv_state_variable_18 = 1; } else { } if (ldv_state_variable_18 == 2) { ixgbe_read_ee_hostif_X550(eeprom_ops_X550EM_x_group0, (int )ldvarg128, ldvarg127); ldv_state_variable_18 = 2; } else { } goto ldv_56179; case 6: ; if (ldv_state_variable_18 == 1) { ixgbe_init_eeprom_params_X540(eeprom_ops_X550EM_x_group0); ldv_state_variable_18 = 1; } else { } if (ldv_state_variable_18 == 2) { ixgbe_init_eeprom_params_X540(eeprom_ops_X550EM_x_group0); ldv_state_variable_18 = 2; } else { } goto ldv_56179; case 7: ; if (ldv_state_variable_18 == 1) { ixgbe_validate_eeprom_checksum_X550(eeprom_ops_X550EM_x_group0, ldvarg126); ldv_state_variable_18 = 1; } else { } if (ldv_state_variable_18 == 2) { ixgbe_validate_eeprom_checksum_X550(eeprom_ops_X550EM_x_group0, ldvarg126); ldv_state_variable_18 = 2; } else { } goto ldv_56179; case 8: ; if (ldv_state_variable_18 == 2) { ldv_release_18(); ldv_state_variable_18 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_56179; case 9: ; if (ldv_state_variable_18 == 1) { ldv_probe_18(); ldv_state_variable_18 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_56179; default: ldv_stop(); } ldv_56179: ; return; } } void ldv_main_exported_16(void) { u8 ldvarg189 ; u16 ldvarg182 ; u32 ldvarg180 ; bool ldvarg197 ; u16 *ldvarg179 ; void *tmp ; u8 *ldvarg194 ; void *tmp___0 ; u16 *ldvarg196 ; void *tmp___1 ; u8 *ldvarg176 ; void *tmp___2 ; u32 ldvarg183 ; u8 ldvarg191 ; u8 *ldvarg192 ; void *tmp___3 ; bool ldvarg185 ; u8 ldvarg188 ; u8 ldvarg193 ; u32 ldvarg181 ; u8 ldvarg177 ; u8 ldvarg190 ; u32 ldvarg184 ; ixgbe_link_speed ldvarg186 ; u8 ldvarg178 ; u8 ldvarg187 ; u8 ldvarg195 ; int tmp___4 ; { tmp = ldv_init_zalloc(2UL); ldvarg179 = (u16 *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg194 = (u8 *)tmp___0; tmp___1 = ldv_init_zalloc(2UL); ldvarg196 = (u16 *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg176 = (u8 *)tmp___2; tmp___3 = ldv_init_zalloc(1UL); ldvarg192 = (u8 *)tmp___3; ldv_memset((void *)(& ldvarg189), 0, 1UL); ldv_memset((void *)(& ldvarg182), 0, 2UL); ldv_memset((void *)(& ldvarg180), 0, 4UL); ldv_memset((void *)(& ldvarg197), 0, 1UL); ldv_memset((void *)(& ldvarg183), 0, 4UL); ldv_memset((void *)(& ldvarg191), 0, 1UL); ldv_memset((void *)(& ldvarg185), 0, 1UL); ldv_memset((void *)(& ldvarg188), 0, 1UL); ldv_memset((void *)(& ldvarg193), 0, 1UL); ldv_memset((void *)(& ldvarg181), 0, 4UL); ldv_memset((void *)(& ldvarg177), 0, 1UL); ldv_memset((void *)(& ldvarg190), 0, 1UL); ldv_memset((void *)(& ldvarg184), 0, 4UL); ldv_memset((void *)(& ldvarg186), 0, 4UL); ldv_memset((void *)(& ldvarg178), 0, 1UL); ldv_memset((void *)(& ldvarg187), 0, 1UL); ldv_memset((void *)(& ldvarg195), 0, 1UL); tmp___4 = __VERIFIER_nondet_int(); switch (tmp___4) { case 0: ; if (ldv_state_variable_16 == 1) { ixgbe_set_copper_phy_power(phy_ops_X550EM_x_group0, (int )ldvarg197); ldv_state_variable_16 = 1; } else { } if (ldv_state_variable_16 == 3) { ixgbe_set_copper_phy_power(phy_ops_X550EM_x_group0, (int )ldvarg197); ldv_state_variable_16 = 3; } else { } if (ldv_state_variable_16 == 2) { ixgbe_set_copper_phy_power(phy_ops_X550EM_x_group0, (int )ldvarg197); ldv_state_variable_16 = 2; } else { } goto ldv_56216; case 1: ; if (ldv_state_variable_16 == 1) { ixgbe_get_phy_firmware_version_generic(phy_ops_X550EM_x_group0, ldvarg196); ldv_state_variable_16 = 1; } else { } if (ldv_state_variable_16 == 3) { ixgbe_get_phy_firmware_version_generic(phy_ops_X550EM_x_group0, ldvarg196); ldv_state_variable_16 = 3; } else { } if (ldv_state_variable_16 == 2) { ixgbe_get_phy_firmware_version_generic(phy_ops_X550EM_x_group0, ldvarg196); ldv_state_variable_16 = 2; } else { } goto ldv_56216; case 2: ; if (ldv_state_variable_16 == 3) { ixgbe_read_i2c_eeprom_generic(phy_ops_X550EM_x_group0, (int )ldvarg195, ldvarg194); ldv_state_variable_16 = 3; } else { } goto ldv_56216; case 3: ; if (ldv_state_variable_16 == 1) { ixgbe_read_i2c_sff8472_generic(phy_ops_X550EM_x_group0, (int )ldvarg193, ldvarg192); ldv_state_variable_16 = 1; } else { } if (ldv_state_variable_16 == 3) { ixgbe_read_i2c_sff8472_generic(phy_ops_X550EM_x_group0, (int )ldvarg193, ldvarg192); ldv_state_variable_16 = 3; } else { } if (ldv_state_variable_16 == 2) { ixgbe_read_i2c_sff8472_generic(phy_ops_X550EM_x_group0, (int )ldvarg193, ldvarg192); ldv_state_variable_16 = 2; } else { } goto ldv_56216; case 4: ; if (ldv_state_variable_16 == 3) { ixgbe_write_i2c_byte_generic(phy_ops_X550EM_x_group0, (int )ldvarg191, (int )ldvarg190, (int )ldvarg189); ldv_state_variable_16 = 3; } else { } goto ldv_56216; case 5: ; if (ldv_state_variable_16 == 1) { ixgbe_identify_phy_x550em(phy_ops_X550EM_x_group0); ldv_state_variable_16 = 1; } else { } if (ldv_state_variable_16 == 3) { ixgbe_identify_phy_x550em(phy_ops_X550EM_x_group0); ldv_state_variable_16 = 3; } else { } if (ldv_state_variable_16 == 2) { ixgbe_identify_phy_x550em(phy_ops_X550EM_x_group0); ldv_state_variable_16 = 2; } else { } goto ldv_56216; case 6: ; if (ldv_state_variable_16 == 1) { ixgbe_setup_phy_link_generic(phy_ops_X550EM_x_group0); ldv_state_variable_16 = 1; } else { } if (ldv_state_variable_16 == 3) { ixgbe_setup_phy_link_generic(phy_ops_X550EM_x_group0); ldv_state_variable_16 = 3; } else { } if (ldv_state_variable_16 == 2) { ixgbe_setup_phy_link_generic(phy_ops_X550EM_x_group0); ldv_state_variable_16 = 2; } else { } goto ldv_56216; case 7: ; if (ldv_state_variable_16 == 1) { ixgbe_write_i2c_eeprom_generic(phy_ops_X550EM_x_group0, (int )ldvarg188, (int )ldvarg187); ldv_state_variable_16 = 1; } else { } if (ldv_state_variable_16 == 3) { ixgbe_write_i2c_eeprom_generic(phy_ops_X550EM_x_group0, (int )ldvarg188, (int )ldvarg187); ldv_state_variable_16 = 3; } else { } if (ldv_state_variable_16 == 2) { ixgbe_write_i2c_eeprom_generic(phy_ops_X550EM_x_group0, (int )ldvarg188, (int )ldvarg187); ldv_state_variable_16 = 2; } else { } goto ldv_56216; case 8: ; if (ldv_state_variable_16 == 1) { ixgbe_setup_phy_link_speed_generic(phy_ops_X550EM_x_group0, ldvarg186, (int )ldvarg185); ldv_state_variable_16 = 1; } else { } if (ldv_state_variable_16 == 3) { ixgbe_setup_phy_link_speed_generic(phy_ops_X550EM_x_group0, ldvarg186, (int )ldvarg185); ldv_state_variable_16 = 3; } else { } if (ldv_state_variable_16 == 2) { ixgbe_setup_phy_link_speed_generic(phy_ops_X550EM_x_group0, ldvarg186, (int )ldvarg185); ldv_state_variable_16 = 2; } else { } goto ldv_56216; case 9: ; if (ldv_state_variable_16 == 1) { ixgbe_write_phy_reg_generic(phy_ops_X550EM_x_group0, ldvarg184, ldvarg183, (int )ldvarg182); ldv_state_variable_16 = 1; } else { } if (ldv_state_variable_16 == 3) { ixgbe_write_phy_reg_generic(phy_ops_X550EM_x_group0, ldvarg184, ldvarg183, (int )ldvarg182); ldv_state_variable_16 = 3; } else { } if (ldv_state_variable_16 == 2) { ixgbe_write_phy_reg_generic(phy_ops_X550EM_x_group0, ldvarg184, ldvarg183, (int )ldvarg182); ldv_state_variable_16 = 2; } else { } goto ldv_56216; case 10: ; if (ldv_state_variable_16 == 1) { ixgbe_identify_module_generic(phy_ops_X550EM_x_group0); ldv_state_variable_16 = 1; } else { } if (ldv_state_variable_16 == 3) { ixgbe_identify_module_generic(phy_ops_X550EM_x_group0); ldv_state_variable_16 = 3; } else { } if (ldv_state_variable_16 == 2) { ixgbe_identify_module_generic(phy_ops_X550EM_x_group0); ldv_state_variable_16 = 2; } else { } goto ldv_56216; case 11: ; if (ldv_state_variable_16 == 1) { ixgbe_read_phy_reg_generic(phy_ops_X550EM_x_group0, ldvarg181, ldvarg180, ldvarg179); ldv_state_variable_16 = 1; } else { } if (ldv_state_variable_16 == 3) { ixgbe_read_phy_reg_generic(phy_ops_X550EM_x_group0, ldvarg181, ldvarg180, ldvarg179); ldv_state_variable_16 = 3; } else { } if (ldv_state_variable_16 == 2) { ixgbe_read_phy_reg_generic(phy_ops_X550EM_x_group0, ldvarg181, ldvarg180, ldvarg179); ldv_state_variable_16 = 2; } else { } goto ldv_56216; case 12: ; if (ldv_state_variable_16 == 1) { ixgbe_tn_check_overtemp(phy_ops_X550EM_x_group0); ldv_state_variable_16 = 1; } else { } if (ldv_state_variable_16 == 3) { ixgbe_tn_check_overtemp(phy_ops_X550EM_x_group0); ldv_state_variable_16 = 3; } else { } if (ldv_state_variable_16 == 2) { ixgbe_tn_check_overtemp(phy_ops_X550EM_x_group0); ldv_state_variable_16 = 2; } else { } goto ldv_56216; case 13: ; if (ldv_state_variable_16 == 1) { ixgbe_read_i2c_byte_generic(phy_ops_X550EM_x_group0, (int )ldvarg178, (int )ldvarg177, ldvarg176); ldv_state_variable_16 = 1; } else { } if (ldv_state_variable_16 == 3) { ixgbe_read_i2c_byte_generic(phy_ops_X550EM_x_group0, (int )ldvarg178, (int )ldvarg177, ldvarg176); ldv_state_variable_16 = 3; } else { } if (ldv_state_variable_16 == 2) { ixgbe_read_i2c_byte_generic(phy_ops_X550EM_x_group0, (int )ldvarg178, (int )ldvarg177, ldvarg176); ldv_state_variable_16 = 2; } else { } goto ldv_56216; case 14: ; if (ldv_state_variable_16 == 2) { ixgbe_init_phy_ops_X550em(phy_ops_X550EM_x_group0); ldv_state_variable_16 = 3; } else { } goto ldv_56216; case 15: ; if (ldv_state_variable_16 == 1) { ldv_setup_16(); ldv_state_variable_16 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_56216; case 16: ; if (ldv_state_variable_16 == 3) { ldv_release_16(); ldv_state_variable_16 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_16 == 2) { ldv_release_16(); ldv_state_variable_16 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_56216; default: ldv_stop(); } ldv_56216: ; return; } } void ldv_main_exported_19(void) { u16 ldvarg558 ; u16 *ldvarg562 ; void *tmp ; u16 ldvarg561 ; u16 *ldvarg554 ; void *tmp___0 ; u16 ldvarg557 ; u16 ldvarg563 ; u16 ldvarg564 ; u16 ldvarg560 ; u16 ldvarg556 ; u16 *ldvarg559 ; void *tmp___1 ; u16 *ldvarg555 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(2UL); ldvarg562 = (u16 *)tmp; tmp___0 = ldv_init_zalloc(2UL); ldvarg554 = (u16 *)tmp___0; tmp___1 = ldv_init_zalloc(2UL); ldvarg559 = (u16 *)tmp___1; tmp___2 = ldv_init_zalloc(2UL); ldvarg555 = (u16 *)tmp___2; ldv_memset((void *)(& ldvarg558), 0, 2UL); ldv_memset((void *)(& ldvarg561), 0, 2UL); ldv_memset((void *)(& ldvarg557), 0, 2UL); ldv_memset((void *)(& ldvarg563), 0, 2UL); ldv_memset((void *)(& ldvarg564), 0, 2UL); ldv_memset((void *)(& ldvarg560), 0, 2UL); ldv_memset((void *)(& ldvarg556), 0, 2UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_19 == 2) { ixgbe_write_ee_hostif_buffer_X550(eeprom_ops_X550_group0, (int )ldvarg564, (int )ldvarg563, ldvarg562); ldv_state_variable_19 = 2; } else { } goto ldv_56249; case 1: ; if (ldv_state_variable_19 == 2) { ixgbe_read_ee_hostif_buffer_X550(eeprom_ops_X550_group0, (int )ldvarg561, (int )ldvarg560, ldvarg559); ldv_state_variable_19 = 2; } else { } goto ldv_56249; case 2: ; if (ldv_state_variable_19 == 1) { ixgbe_update_eeprom_checksum_X550(eeprom_ops_X550_group0); ldv_state_variable_19 = 1; } else { } if (ldv_state_variable_19 == 2) { ixgbe_update_eeprom_checksum_X550(eeprom_ops_X550_group0); ldv_state_variable_19 = 2; } else { } goto ldv_56249; case 3: ; if (ldv_state_variable_19 == 1) { ixgbe_calc_eeprom_checksum_X550(eeprom_ops_X550_group0); ldv_state_variable_19 = 1; } else { } if (ldv_state_variable_19 == 2) { ixgbe_calc_eeprom_checksum_X550(eeprom_ops_X550_group0); ldv_state_variable_19 = 2; } else { } goto ldv_56249; case 4: ; if (ldv_state_variable_19 == 1) { ixgbe_write_ee_hostif_X550(eeprom_ops_X550_group0, (int )ldvarg558, (int )ldvarg557); ldv_state_variable_19 = 1; } else { } if (ldv_state_variable_19 == 2) { ixgbe_write_ee_hostif_X550(eeprom_ops_X550_group0, (int )ldvarg558, (int )ldvarg557); ldv_state_variable_19 = 2; } else { } goto ldv_56249; case 5: ; if (ldv_state_variable_19 == 1) { ixgbe_read_ee_hostif_X550(eeprom_ops_X550_group0, (int )ldvarg556, ldvarg555); ldv_state_variable_19 = 1; } else { } if (ldv_state_variable_19 == 2) { ixgbe_read_ee_hostif_X550(eeprom_ops_X550_group0, (int )ldvarg556, ldvarg555); ldv_state_variable_19 = 2; } else { } goto ldv_56249; case 6: ; if (ldv_state_variable_19 == 1) { ixgbe_init_eeprom_params_X550(eeprom_ops_X550_group0); ldv_state_variable_19 = 1; } else { } if (ldv_state_variable_19 == 2) { ixgbe_init_eeprom_params_X550(eeprom_ops_X550_group0); ldv_state_variable_19 = 2; } else { } goto ldv_56249; case 7: ; if (ldv_state_variable_19 == 1) { ixgbe_validate_eeprom_checksum_X550(eeprom_ops_X550_group0, ldvarg554); ldv_state_variable_19 = 1; } else { } if (ldv_state_variable_19 == 2) { ixgbe_validate_eeprom_checksum_X550(eeprom_ops_X550_group0, ldvarg554); ldv_state_variable_19 = 2; } else { } goto ldv_56249; case 8: ; if (ldv_state_variable_19 == 2) { ldv_release_19(); ldv_state_variable_19 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_56249; case 9: ; if (ldv_state_variable_19 == 1) { ldv_probe_19(); ldv_state_variable_19 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_56249; default: ldv_stop(); } ldv_56249: ; return; } } bool ldv_queue_work_on_489(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_490(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_491(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_492(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_493(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_499(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_505(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_507(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_509(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_510(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_511(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_512(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_513(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_514(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_515(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_516(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void cpumask_set_cpu(unsigned int cpu , struct cpumask *dstp ) { unsigned int tmp ; { tmp = cpumask_check(cpu); set_bit((long )tmp, (unsigned long volatile *)(& dstp->bits)); return; } } __inline static int cpumask_test_cpu(int cpu , struct cpumask const *cpumask ) { unsigned int tmp ; int tmp___0 ; { tmp = cpumask_check((unsigned int )cpu); tmp___0 = variable_test_bit((long )tmp, (unsigned long const volatile *)(& cpumask->bits)); return (tmp___0); } } extern void kfree_call_rcu(struct callback_head * , void (*)(struct callback_head * ) ) ; bool ldv_queue_work_on_536(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_538(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_537(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_540(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_539(struct workqueue_struct *ldv_func_arg1 ) ; extern int __cpu_to_node(int ) ; void *ldv_kmem_cache_alloc_546(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_563(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; __inline static void *kcalloc(size_t n , size_t size , gfp_t flags ) ; __inline static void *kzalloc(size_t size , gfp_t flags ) ; __inline static void *kzalloc_node(size_t size , gfp_t flags , int node ) ; extern void pci_disable_msi(struct pci_dev * ) ; extern int pci_enable_msi_range(struct pci_dev * , int , int ) ; __inline static int pci_enable_msi_exact(struct pci_dev *dev , int nvec ) { int rc ; int tmp ; { tmp = pci_enable_msi_range(dev, nvec, nvec); rc = tmp; if (rc < 0) { return (rc); } else { } return (0); } } extern int pci_enable_msix_range(struct pci_dev * , struct msix_entry * , int , int ) ; struct sk_buff *ldv_skb_clone_554(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_562(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_556(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_552(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_560(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_561(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_557(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_558(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_559(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; extern void napi_hash_add(struct napi_struct * ) ; extern void napi_hash_del(struct napi_struct * ) ; __inline static int netdev_set_tc_queue(struct net_device *dev , u8 tc , u16 count , u16 offset ) { { if ((int )dev->num_tc <= (int )tc) { return (-22); } else { } dev->tc_to_txq[(int )tc].count = count; dev->tc_to_txq[(int )tc].offset = offset; return (0); } } extern void netif_napi_add(struct net_device * , struct napi_struct * , int (*)(struct napi_struct * , int ) , int ) ; extern void netif_napi_del(struct napi_struct * ) ; static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter ) { struct ixgbe_ring_feature *fcoe ; struct ixgbe_ring_feature *vmdq ; int i ; u16 reg_idx ; u8 tcs ; int tmp ; u16 queues_per_pool ; u8 fcoe_tc ; u8 tmp___0 ; { fcoe = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 4UL; vmdq = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 1UL; tmp = netdev_get_num_tc(adapter->netdev); tcs = (u8 )tmp; if ((unsigned int )tcs <= 1U) { return (0); } else { } if ((adapter->flags & 8388608U) == 0U) { return (0); } else { } reg_idx = (int )vmdq->offset * (int )((u16 )(- ((int )vmdq->mask) & (int )vmdq->mask)); i = 0; goto ldv_55399; ldv_55398: ; if (((int )reg_idx & ~ ((int )vmdq->mask)) >= (int )tcs) { reg_idx = (u16 )((int )((short )(~ ((int )vmdq->mask) + (int )reg_idx)) & (int )((short )vmdq->mask)); } else { } (adapter->rx_ring[i])->reg_idx = (u8 )reg_idx; i = i + 1; reg_idx = (u16 )((int )reg_idx + 1); ldv_55399: ; if (adapter->num_rx_queues > i) { goto ldv_55398; } else { } reg_idx = (int )vmdq->offset * (int )((u16 )(- ((int )vmdq->mask) & (int )vmdq->mask)); i = 0; goto ldv_55402; ldv_55401: ; if (((int )reg_idx & ~ ((int )vmdq->mask)) >= (int )tcs) { reg_idx = (u16 )((int )((short )(~ ((int )vmdq->mask) + (int )reg_idx)) & (int )((short )vmdq->mask)); } else { } (adapter->tx_ring[i])->reg_idx = (u8 )reg_idx; i = i + 1; reg_idx = (u16 )((int )reg_idx + 1); ldv_55402: ; if (adapter->num_tx_queues > i) { goto ldv_55401; } else { } if ((adapter->flags & 2097152U) == 0U) { return (1); } else { } if ((int )fcoe->offset < (int )((unsigned short )tcs)) { return (1); } else { } if ((unsigned int )fcoe->indices != 0U) { queues_per_pool = (u16 )((int )((short )(- ((int )vmdq->mask))) & (int )((short )vmdq->mask)); tmp___0 = ixgbe_fcoe_get_tc(adapter); fcoe_tc = tmp___0; reg_idx = (int )((u16 )((int )vmdq->offset + (int )vmdq->indices)) * (int )queues_per_pool; i = (int )fcoe->offset; goto ldv_55407; ldv_55406: reg_idx = (int )((u16 )((int )((short )(~ ((int )vmdq->mask) + (int )reg_idx)) & (int )((short )vmdq->mask))) + (int )((u16 )fcoe_tc); (adapter->rx_ring[i])->reg_idx = (u8 )reg_idx; reg_idx = (u16 )((int )reg_idx + 1); i = i + 1; ldv_55407: ; if (adapter->num_rx_queues > i) { goto ldv_55406; } else { } reg_idx = (int )((u16 )((int )vmdq->offset + (int )vmdq->indices)) * (int )queues_per_pool; i = (int )fcoe->offset; goto ldv_55410; ldv_55409: reg_idx = (int )((u16 )((int )((short )(~ ((int )vmdq->mask) + (int )reg_idx)) & (int )((short )vmdq->mask))) + (int )((u16 )fcoe_tc); (adapter->tx_ring[i])->reg_idx = (u8 )reg_idx; reg_idx = (u16 )((int )reg_idx + 1); i = i + 1; ldv_55410: ; if (adapter->num_tx_queues > i) { goto ldv_55409; } else { } } else { } return (1); } } static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter , u8 tc , unsigned int *tx , unsigned int *rx ) { struct net_device *dev ; struct ixgbe_hw *hw ; u8 num_tcs ; int tmp ; { dev = adapter->netdev; hw = & adapter->hw; tmp = netdev_get_num_tc(dev); num_tcs = (u8 )tmp; *tx = 0U; *rx = 0U; switch ((unsigned int )hw->mac.type) { case 1U: *tx = (unsigned int )((int )tc << 2); *rx = (unsigned int )((int )tc << 3); goto ldv_55422; case 2U: ; case 3U: ; case 4U: ; case 5U: ; if ((unsigned int )num_tcs > 4U) { *rx = (unsigned int )((int )tc << 4); if ((unsigned int )tc <= 2U) { *tx = (unsigned int )((int )tc << 5); } else if ((unsigned int )tc <= 4U) { *tx = (unsigned int )(((int )tc + 2) << 4); } else { *tx = (unsigned int )(((int )tc + 8) << 3); } } else { *rx = (unsigned int )((int )tc << 5); if ((unsigned int )tc <= 1U) { *tx = (unsigned int )((int )tc << 6); } else { *tx = (unsigned int )(((int )tc + 4) << 4); } } default: ; goto ldv_55422; } ldv_55422: ; return; } } static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter ) { struct net_device *dev ; unsigned int tx_idx ; unsigned int rx_idx ; int tc ; int offset ; int rss_i ; int i ; u8 num_tcs ; int tmp ; { dev = adapter->netdev; tmp = netdev_get_num_tc(dev); num_tcs = (u8 )tmp; if ((unsigned int )num_tcs <= 1U) { return (0); } else { } rss_i = (int )adapter->ring_feature[2].indices; tc = 0; offset = 0; goto ldv_55443; ldv_55442: ixgbe_get_first_reg_idx(adapter, (int )((u8 )tc), & tx_idx, & rx_idx); i = 0; goto ldv_55440; ldv_55439: (adapter->tx_ring[offset + i])->reg_idx = (u8 )tx_idx; (adapter->rx_ring[offset + i])->reg_idx = (u8 )rx_idx; (adapter->tx_ring[offset + i])->dcb_tc = (u8 )tc; (adapter->rx_ring[offset + i])->dcb_tc = (u8 )tc; i = i + 1; tx_idx = tx_idx + 1U; rx_idx = rx_idx + 1U; ldv_55440: ; if (i < rss_i) { goto ldv_55439; } else { } tc = tc + 1; offset = offset + rss_i; ldv_55443: ; if ((int )num_tcs > tc) { goto ldv_55442; } else { } return (1); } } static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter ) { struct ixgbe_ring_feature *fcoe ; struct ixgbe_ring_feature *vmdq ; struct ixgbe_ring_feature *rss ; int i ; u16 reg_idx ; { fcoe = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 4UL; vmdq = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 1UL; rss = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 2UL; if ((adapter->flags & 16384U) == 0U) { return (0); } else { } reg_idx = (int )vmdq->offset * (int )((u16 )(- ((int )vmdq->mask) & (int )vmdq->mask)); i = 0; goto ldv_55455; ldv_55454: ; if ((unsigned int )fcoe->offset != 0U && (int )fcoe->offset < i) { goto ldv_55453; } else { } if (((int )reg_idx & ~ ((int )vmdq->mask)) >= (int )rss->indices) { reg_idx = (u16 )((int )((short )(~ ((int )vmdq->mask) + (int )reg_idx)) & (int )((short )vmdq->mask)); } else { } (adapter->rx_ring[i])->reg_idx = (u8 )reg_idx; i = i + 1; reg_idx = (u16 )((int )reg_idx + 1); ldv_55455: ; if (adapter->num_rx_queues > i) { goto ldv_55454; } else { } ldv_55453: ; goto ldv_55457; ldv_55456: (adapter->rx_ring[i])->reg_idx = (u8 )reg_idx; i = i + 1; reg_idx = (u16 )((int )reg_idx + 1); ldv_55457: ; if (adapter->num_rx_queues > i) { goto ldv_55456; } else { } reg_idx = (int )vmdq->offset * (int )((u16 )(- ((int )vmdq->mask) & (int )vmdq->mask)); i = 0; goto ldv_55461; ldv_55460: ; if ((unsigned int )fcoe->offset != 0U && (int )fcoe->offset < i) { goto ldv_55459; } else { } if (((int )rss->mask & (int )reg_idx) >= (int )rss->indices) { reg_idx = (u16 )((int )((short )(~ ((int )vmdq->mask) + (int )reg_idx)) & (int )((short )vmdq->mask)); } else { } (adapter->tx_ring[i])->reg_idx = (u8 )reg_idx; i = i + 1; reg_idx = (u16 )((int )reg_idx + 1); ldv_55461: ; if (adapter->num_tx_queues > i) { goto ldv_55460; } else { } ldv_55459: ; goto ldv_55463; ldv_55462: (adapter->tx_ring[i])->reg_idx = (u8 )reg_idx; i = i + 1; reg_idx = (u16 )((int )reg_idx + 1); ldv_55463: ; if (adapter->num_tx_queues > i) { goto ldv_55462; } else { } return (1); } } static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter ) { int i ; { i = 0; goto ldv_55470; ldv_55469: (adapter->rx_ring[i])->reg_idx = (u8 )i; i = i + 1; ldv_55470: ; if (adapter->num_rx_queues > i) { goto ldv_55469; } else { } i = 0; goto ldv_55473; ldv_55472: (adapter->tx_ring[i])->reg_idx = (u8 )i; i = i + 1; ldv_55473: ; if (adapter->num_tx_queues > i) { goto ldv_55472; } else { } return (1); } } static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter ) { bool tmp ; bool tmp___0 ; bool tmp___1 ; { (adapter->rx_ring[0])->reg_idx = 0U; (adapter->tx_ring[0])->reg_idx = 0U; tmp = ixgbe_cache_ring_dcb_sriov(adapter); if ((int )tmp) { return; } else { } tmp___0 = ixgbe_cache_ring_dcb(adapter); if ((int )tmp___0) { return; } else { } tmp___1 = ixgbe_cache_ring_sriov(adapter); if ((int )tmp___1) { return; } else { } ixgbe_cache_ring_rss(adapter); return; } } static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter ) { int i ; u16 vmdq_i ; u16 vmdq_m ; u16 fcoe_i ; u8 tcs ; int tmp ; u16 __min1 ; u16 __min2 ; u16 __min1___0 ; u16 __min2___0 ; struct ixgbe_ring_feature *fcoe ; u16 __min1___1 ; u16 __min2___1 ; u8 tmp___0 ; { vmdq_i = adapter->ring_feature[1].limit; vmdq_m = 0U; fcoe_i = 0U; tmp = netdev_get_num_tc(adapter->netdev); tcs = (u8 )tmp; if ((unsigned int )tcs <= 1U) { return (0); } else { } if ((adapter->flags & 8388608U) == 0U) { return (0); } else { } vmdq_i = (int )adapter->ring_feature[1].offset + (int )vmdq_i; if ((unsigned int )tcs > 4U) { __min1 = vmdq_i; __min2 = 16U; vmdq_i = (u16 )((int )__min1 < (int )__min2 ? __min1 : __min2); vmdq_m = 120U; } else { __min1___0 = vmdq_i; __min2___0 = 32U; vmdq_i = (u16 )((int )__min1___0 < (int )__min2___0 ? __min1___0 : __min2___0); vmdq_m = 124U; } fcoe_i = (int )((u16 )(128 / (- ((int )vmdq_m) & (int )vmdq_m))) - (int )vmdq_i; vmdq_i = (int )vmdq_i - (int )adapter->ring_feature[1].offset; adapter->ring_feature[1].indices = vmdq_i; adapter->ring_feature[1].mask = vmdq_m; adapter->ring_feature[2].indices = 1U; adapter->ring_feature[2].mask = 0U; adapter->flags = adapter->flags & 4294705151U; adapter->num_rx_pools = (int )vmdq_i; adapter->num_rx_queues_per_pool = (int )tcs; adapter->num_tx_queues = (int )vmdq_i * (int )tcs; adapter->num_rx_queues = (int )vmdq_i * (int )tcs; if ((adapter->flags & 2097152U) != 0U) { fcoe = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 4UL; __min1___1 = fcoe_i; __min2___1 = fcoe->limit; fcoe_i = (u16 )((int )__min1___1 < (int )__min2___1 ? __min1___1 : __min2___1); if ((unsigned int )fcoe_i != 0U) { fcoe->indices = fcoe_i; fcoe->offset = (int )((u16 )tcs) * (int )vmdq_i; adapter->num_tx_queues = adapter->num_tx_queues + (int )fcoe_i; adapter->num_rx_queues = adapter->num_rx_queues + (int )fcoe_i; } else if ((unsigned int )tcs > 1U) { fcoe->indices = 1U; tmp___0 = ixgbe_fcoe_get_tc(adapter); fcoe->offset = (u16 )tmp___0; } else { adapter->flags = adapter->flags & 4292870143U; fcoe->indices = 0U; fcoe->offset = 0U; } } else { } i = 0; goto ldv_55497; ldv_55496: netdev_set_tc_queue(adapter->netdev, (int )((u8 )i), 1, (int )((u16 )i)); i = i + 1; ldv_55497: ; if ((int )tcs > i) { goto ldv_55496; } else { } return (1); } } static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter ) { struct net_device *dev ; struct ixgbe_ring_feature *f ; int rss_i ; int rss_m ; int i ; int tcs ; u16 __min1 ; u16 __min2 ; u16 __min1___0 ; u16 __min2___0 ; u16 __min1___1 ; u16 __min2___1 ; int __min1___2 ; int __min2___2 ; u8 tc ; u8 tmp ; u16 __min1___3 ; u16 __min2___3 ; { dev = adapter->netdev; tcs = netdev_get_num_tc(dev); if (tcs <= 1) { return (0); } else { } rss_i = (int )(dev->num_tx_queues / (unsigned int )tcs); if ((unsigned int )adapter->hw.mac.type == 1U) { __min1 = (u16 )rss_i; __min2 = 4U; rss_i = (int )__min1 < (int )__min2 ? __min1 : __min2; rss_m = 3; } else if (tcs > 4) { __min1___0 = (u16 )rss_i; __min2___0 = 8U; rss_i = (int )__min1___0 < (int )__min2___0 ? __min1___0 : __min2___0; rss_m = 7; } else { __min1___1 = (u16 )rss_i; __min2___1 = 16U; rss_i = (int )__min1___1 < (int )__min2___1 ? __min1___1 : __min2___1; rss_m = 15; } f = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 2UL; __min1___2 = rss_i; __min2___2 = (int )f->limit; rss_i = __min1___2 < __min2___2 ? __min1___2 : __min2___2; f->indices = (u16 )rss_i; f->mask = (u16 )rss_m; adapter->flags = adapter->flags & 4294705151U; if ((adapter->flags & 2097152U) != 0U) { tmp = ixgbe_fcoe_get_tc(adapter); tc = tmp; f = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 4UL; __min1___3 = (u16 )rss_i; __min2___3 = f->limit; f->indices = (u16 )((int )__min1___3 < (int )__min2___3 ? __min1___3 : __min2___3); f->offset = (int )((u16 )tc) * (int )((u16 )rss_i); } else { } i = 0; goto ldv_55525; ldv_55524: netdev_set_tc_queue(dev, (int )((u8 )i), (int )((u16 )rss_i), (int )((u16 )rss_i) * (int )((u16 )i)); i = i + 1; ldv_55525: ; if (i < tcs) { goto ldv_55524; } else { } adapter->num_tx_queues = rss_i * tcs; adapter->num_rx_queues = rss_i * tcs; return (1); } } static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter ) { u16 vmdq_i ; u16 vmdq_m ; u16 rss_i ; u16 rss_m ; u16 fcoe_i ; bool pools ; unsigned long tmp ; u16 __min1 ; u16 __min2 ; u16 __min1___0 ; u16 __min2___0 ; struct ixgbe_ring_feature *fcoe ; u16 __min1___1 ; u16 __min2___1 ; u16 __min1___2 ; u16 __min2___2 ; unsigned int tmp___0 ; u16 __min1___3 ; u16 __min2___3 ; { vmdq_i = adapter->ring_feature[1].limit; vmdq_m = 0U; rss_i = adapter->ring_feature[2].limit; rss_m = 0U; fcoe_i = 0U; tmp = find_first_zero_bit((unsigned long const *)(& adapter->fwd_bitmask), 32UL); pools = tmp > 1UL; if ((adapter->flags & 8388608U) == 0U) { return (0); } else { } vmdq_i = (int )adapter->ring_feature[1].offset + (int )vmdq_i; __min1 = 64U; __min2 = vmdq_i; vmdq_i = (u16 )((int )__min1 < (int )__min2 ? __min1 : __min2); if (((unsigned int )vmdq_i > 32U || (unsigned int )rss_i <= 3U) || ((unsigned int )vmdq_i > 16U && (int )pools)) { vmdq_m = 126U; rss_m = 1U; __min1___0 = rss_i; __min2___0 = 2U; rss_i = (u16 )((int )__min1___0 < (int )__min2___0 ? __min1___0 : __min2___0); } else { vmdq_m = 124U; rss_m = 3U; rss_i = 4U; } fcoe_i = 128U - (unsigned int )((int )((u16 )(- ((int )vmdq_m) & (int )vmdq_m)) * (int )vmdq_i); vmdq_i = (int )vmdq_i - (int )adapter->ring_feature[1].offset; adapter->ring_feature[1].indices = vmdq_i; adapter->ring_feature[1].mask = vmdq_m; adapter->ring_feature[2].indices = rss_i; adapter->ring_feature[2].mask = rss_m; adapter->num_rx_pools = (int )vmdq_i; adapter->num_rx_queues_per_pool = (int )rss_i; adapter->num_rx_queues = (int )vmdq_i * (int )rss_i; adapter->num_tx_queues = (int )vmdq_i * (int )rss_i; adapter->flags = adapter->flags & 4294705151U; if ((adapter->flags & 2097152U) != 0U) { fcoe = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 4UL; __min1___1 = fcoe_i; __min2___1 = fcoe->limit; fcoe_i = (u16 )((int )__min1___1 < (int )__min2___1 ? __min1___1 : __min2___1); if ((unsigned int )vmdq_i > 1U && (unsigned int )fcoe_i != 0U) { fcoe->indices = fcoe_i; fcoe->offset = (int )vmdq_i * (int )rss_i; } else { __min1___2 = (int )fcoe_i + (int )rss_i; tmp___0 = cpumask_weight(cpu_online_mask); __min2___2 = (u16 )tmp___0; fcoe_i = (u16 )((int )__min1___2 < (int )__min2___2 ? __min1___2 : __min2___2); if ((adapter->flags & 8U) == 0U) { fcoe_i = rss_i; } else { } __min1___3 = fcoe_i; __min2___3 = fcoe->limit; fcoe->indices = (u16 )((int )__min1___3 < (int )__min2___3 ? __min1___3 : __min2___3); fcoe->offset = (int )fcoe_i - (int )fcoe->indices; fcoe_i = (int )fcoe_i - (int )rss_i; } adapter->num_tx_queues = adapter->num_tx_queues + (int )fcoe_i; adapter->num_rx_queues = adapter->num_rx_queues + (int )fcoe_i; } else { } return (1); } } static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter ) { struct ixgbe_ring_feature *f ; u16 rss_i ; u16 tmp ; struct net_device *dev ; u16 fcoe_i ; u16 __min1 ; u16 __min2 ; unsigned int tmp___0 ; u16 __min1___0 ; u16 __min2___0 ; u16 __min1___1 ; u16 __min2___1 ; u16 __max1 ; u16 __max2 ; { f = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 2UL; rss_i = f->limit; f->indices = rss_i; f->mask = 15U; adapter->flags = adapter->flags & 4294705151U; if ((unsigned int )rss_i > 1U && adapter->atr_sample_rate != 0U) { f = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 3UL; tmp = f->limit; f->indices = tmp; rss_i = tmp; if ((adapter->flags & 524288U) == 0U) { adapter->flags = adapter->flags | 262144U; } else { } } else { } if ((adapter->flags & 2097152U) != 0U) { dev = adapter->netdev; f = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 4UL; __min1 = (int )f->limit + (int )rss_i; tmp___0 = cpumask_weight(cpu_online_mask); __min2 = (u16 )tmp___0; fcoe_i = (u16 )((int )__min1 < (int )__min2 ? __min1 : __min2); __min1___0 = fcoe_i; __min2___0 = (u16 )dev->num_tx_queues; fcoe_i = (u16 )((int )__min1___0 < (int )__min2___0 ? __min1___0 : __min2___0); if ((adapter->flags & 8U) == 0U) { fcoe_i = rss_i; } else { } __min1___1 = fcoe_i; __min2___1 = f->limit; f->indices = (u16 )((int )__min1___1 < (int )__min2___1 ? __min1___1 : __min2___1); f->offset = (int )fcoe_i - (int )f->indices; __max1 = fcoe_i; __max2 = rss_i; rss_i = (u16 )((int )__max1 > (int )__max2 ? __max1 : __max2); } else { } adapter->num_rx_queues = (int )rss_i; adapter->num_tx_queues = (int )rss_i; return (1); } } static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter ) { bool tmp ; bool tmp___0 ; bool tmp___1 ; { adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; adapter->num_rx_pools = adapter->num_rx_queues; adapter->num_rx_queues_per_pool = 1; tmp = ixgbe_set_dcb_sriov_queues(adapter); if ((int )tmp) { return; } else { } tmp___0 = ixgbe_set_dcb_queues(adapter); if ((int )tmp___0) { return; } else { } tmp___1 = ixgbe_set_sriov_queues(adapter); if ((int )tmp___1) { return; } else { } ixgbe_set_rss_queues(adapter); return; } } static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int i ; int vectors ; int vector_threshold ; int _max1 ; int _max2 ; int __min1 ; int __min2 ; unsigned int tmp ; int __min1___0 ; int __min2___0 ; void *tmp___0 ; int __min1___1 ; int __min2___1 ; { hw = & adapter->hw; _max1 = adapter->num_rx_queues; _max2 = adapter->num_tx_queues; vectors = _max1 > _max2 ? _max1 : _max2; __min1 = vectors; tmp = cpumask_weight(cpu_online_mask); __min2 = (int )tmp; vectors = __min1 < __min2 ? __min1 : __min2; vectors = vectors + 1; __min1___0 = vectors; __min2___0 = (int )hw->mac.max_msix_vectors; vectors = __min1___0 < __min2___0 ? __min1___0 : __min2___0; vector_threshold = 2; tmp___0 = kcalloc((size_t )vectors, 8UL, 208U); adapter->msix_entries = (struct msix_entry *)tmp___0; if ((unsigned long )adapter->msix_entries == (unsigned long )((struct msix_entry *)0)) { return (-12); } else { } i = 0; goto ldv_55591; ldv_55590: (adapter->msix_entries + (unsigned long )i)->entry = (u16 )i; i = i + 1; ldv_55591: ; if (i < vectors) { goto ldv_55590; } else { } vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, vector_threshold, vectors); if (vectors < 0) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "Failed to allocate MSI-X interrupts. Err: %d\n", vectors); adapter->flags = adapter->flags & 4294967287U; kfree((void const *)adapter->msix_entries); adapter->msix_entries = (struct msix_entry *)0; return (vectors); } else { } adapter->flags = adapter->flags | 8U; vectors = vectors + -1; __min1___1 = vectors; __min2___1 = adapter->max_q_vectors; adapter->num_q_vectors = __min1___1 < __min2___1 ? __min1___1 : __min2___1; return (0); } } static void ixgbe_add_ring(struct ixgbe_ring *ring , struct ixgbe_ring_container *head ) { { ring->next = head->ring; head->ring = ring; head->count = (u8 )((int )head->count + 1); return; } } static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter , int v_count , int v_idx , int txr_count , int txr_idx , int rxr_count , int rxr_idx ) { struct ixgbe_q_vector *q_vector ; struct ixgbe_ring *ring ; int node ; int cpu ; int ring_count ; int size ; u8 tcs ; int tmp ; u16 rss_i ; int tmp___0 ; void *tmp___1 ; void *tmp___2 ; struct ixgbe_ring_feature *f ; { node = -1; cpu = -1; tmp = netdev_get_num_tc(adapter->netdev); tcs = (u8 )tmp; ring_count = txr_count + rxr_count; size = (int )((unsigned int )((unsigned long )ring_count + 1UL) * 4096U); if ((unsigned int )tcs <= 1U && (adapter->flags & 8388608U) == 0U) { rss_i = adapter->ring_feature[2].indices; if ((unsigned int )rss_i > 1U && adapter->atr_sample_rate != 0U) { tmp___0 = cpumask_test_cpu(v_idx, cpu_online_mask); if (tmp___0 != 0) { cpu = v_idx; node = __cpu_to_node(cpu); } else { } } else { } } else { } tmp___1 = kzalloc_node((size_t )size, 208U, node); q_vector = (struct ixgbe_q_vector *)tmp___1; if ((unsigned long )q_vector == (unsigned long )((struct ixgbe_q_vector *)0)) { tmp___2 = kzalloc((size_t )size, 208U); q_vector = (struct ixgbe_q_vector *)tmp___2; } else { } if ((unsigned long )q_vector == (unsigned long )((struct ixgbe_q_vector *)0)) { return (-12); } else { } if (cpu != -1) { cpumask_set_cpu((unsigned int )cpu, & q_vector->affinity_mask); } else { } q_vector->numa_node = node; q_vector->cpu = -1; netif_napi_add(adapter->netdev, & q_vector->napi, & ixgbe_poll, 64); napi_hash_add(& q_vector->napi); atomic_set(& q_vector->state, 3); adapter->q_vector[v_idx] = q_vector; q_vector->adapter = adapter; q_vector->v_idx = (u16 )v_idx; q_vector->tx.work_limit = adapter->tx_work_limit; ring = (struct ixgbe_ring *)(& q_vector->ring); if (txr_count != 0 && rxr_count == 0) { if ((unsigned int )adapter->tx_itr_setting == 1U) { q_vector->itr = 400U; } else { q_vector->itr = adapter->tx_itr_setting; } } else if ((unsigned int )adapter->rx_itr_setting == 1U) { q_vector->itr = 200U; } else { q_vector->itr = adapter->rx_itr_setting; } goto ldv_55618; ldv_55617: ring->dev = & (adapter->pdev)->dev; ring->netdev = adapter->netdev; ring->q_vector = q_vector; ixgbe_add_ring(ring, & q_vector->tx); ring->count = (u16 )adapter->tx_ring_count; if (adapter->num_rx_pools > 1) { ring->queue_index = (u8 )(txr_idx % adapter->num_rx_queues_per_pool); } else { ring->queue_index = (u8 )txr_idx; } adapter->tx_ring[txr_idx] = ring; txr_count = txr_count - 1; txr_idx = txr_idx + v_count; ring = ring + 1; ldv_55618: ; if (txr_count != 0) { goto ldv_55617; } else { } goto ldv_55622; ldv_55621: ring->dev = & (adapter->pdev)->dev; ring->netdev = adapter->netdev; ring->q_vector = q_vector; ixgbe_add_ring(ring, & q_vector->rx); if ((unsigned int )adapter->hw.mac.type == 2U) { set_bit(5L, (unsigned long volatile *)(& ring->state)); } else { } if (((adapter->netdev)->features & 2147483648ULL) != 0ULL) { f = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 4UL; if ((int )f->offset <= rxr_idx && (int )f->offset + (int )f->indices > rxr_idx) { set_bit(6L, (unsigned long volatile *)(& ring->state)); } else { } } else { } ring->count = (u16 )adapter->rx_ring_count; if (adapter->num_rx_pools > 1) { ring->queue_index = (u8 )(rxr_idx % adapter->num_rx_queues_per_pool); } else { ring->queue_index = (u8 )rxr_idx; } adapter->rx_ring[rxr_idx] = ring; rxr_count = rxr_count - 1; rxr_idx = rxr_idx + v_count; ring = ring + 1; ldv_55622: ; if (rxr_count != 0) { goto ldv_55621; } else { } return (0); } } static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter , int v_idx ) { struct ixgbe_q_vector *q_vector ; struct ixgbe_ring *ring ; { q_vector = adapter->q_vector[v_idx]; ring = q_vector->tx.ring; goto ldv_55631; ldv_55630: adapter->tx_ring[(int )ring->queue_index] = (struct ixgbe_ring *)0; ring = ring->next; ldv_55631: ; if ((unsigned long )ring != (unsigned long )((struct ixgbe_ring *)0)) { goto ldv_55630; } else { } ring = q_vector->rx.ring; goto ldv_55634; ldv_55633: adapter->rx_ring[(int )ring->queue_index] = (struct ixgbe_ring *)0; ring = ring->next; ldv_55634: ; if ((unsigned long )ring != (unsigned long )((struct ixgbe_ring *)0)) { goto ldv_55633; } else { } adapter->q_vector[v_idx] = (struct ixgbe_q_vector *)0; napi_hash_del(& q_vector->napi); netif_napi_del(& q_vector->napi); kfree_call_rcu(& q_vector->rcu, (void (*)(struct callback_head * ))1376); return; } } static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter ) { int q_vectors ; int rxr_remaining ; int txr_remaining ; int rxr_idx ; int txr_idx ; int v_idx ; int err ; int rqpv ; int tqpv ; int tmp ; { q_vectors = adapter->num_q_vectors; rxr_remaining = adapter->num_rx_queues; txr_remaining = adapter->num_tx_queues; rxr_idx = 0; txr_idx = 0; v_idx = 0; if ((adapter->flags & 8U) == 0U) { q_vectors = 1; } else { } if (rxr_remaining + txr_remaining <= q_vectors) { goto ldv_55649; ldv_55648: err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 0, 0, 1, rxr_idx); if (err != 0) { goto err_out; } else { } rxr_remaining = rxr_remaining - 1; rxr_idx = rxr_idx + 1; v_idx = v_idx + 1; ldv_55649: ; if (rxr_remaining != 0) { goto ldv_55648; } else { } } else { } goto ldv_55654; ldv_55653: rqpv = (((q_vectors - v_idx) + rxr_remaining) + -1) / (q_vectors - v_idx); tqpv = (((q_vectors - v_idx) + txr_remaining) + -1) / (q_vectors - v_idx); err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, tqpv, txr_idx, rqpv, rxr_idx); if (err != 0) { goto err_out; } else { } rxr_remaining = rxr_remaining - rqpv; txr_remaining = txr_remaining - tqpv; rxr_idx = rxr_idx + 1; txr_idx = txr_idx + 1; v_idx = v_idx + 1; ldv_55654: ; if (v_idx < q_vectors) { goto ldv_55653; } else { } return (0); err_out: adapter->num_tx_queues = 0; adapter->num_rx_queues = 0; adapter->num_q_vectors = 0; goto ldv_55657; ldv_55656: ixgbe_free_q_vector(adapter, v_idx); ldv_55657: tmp = v_idx; v_idx = v_idx - 1; if (tmp != 0) { goto ldv_55656; } else { } return (-12); } } static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter ) { int v_idx ; int tmp ; { v_idx = adapter->num_q_vectors; adapter->num_tx_queues = 0; adapter->num_rx_queues = 0; adapter->num_q_vectors = 0; goto ldv_55664; ldv_55663: ixgbe_free_q_vector(adapter, v_idx); ldv_55664: tmp = v_idx; v_idx = v_idx - 1; if (tmp != 0) { goto ldv_55663; } else { } return; } } static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter ) { { if ((adapter->flags & 8U) != 0U) { adapter->flags = adapter->flags & 4294967287U; pci_disable_msix(adapter->pdev); kfree((void const *)adapter->msix_entries); adapter->msix_entries = (struct msix_entry *)0; } else if ((adapter->flags & 2U) != 0U) { adapter->flags = adapter->flags & 4294967293U; pci_disable_msi(adapter->pdev); } else { } return; } } static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter ) { int err ; int tmp ; int tmp___0 ; { tmp = ixgbe_acquire_msix_vectors(adapter); if (tmp == 0) { return; } else { } tmp___0 = netdev_get_num_tc(adapter->netdev); if (tmp___0 > 1) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); netdev_reset_tc(adapter->netdev); if ((unsigned int )adapter->hw.mac.type == 1U) { adapter->hw.fc.requested_mode = adapter->last_lfc_mode; } else { } adapter->flags = adapter->flags & 4294963199U; adapter->temp_dcb_cfg.pfc_mode_enable = 0; adapter->dcb_cfg.pfc_mode_enable = 0; } else { } adapter->dcb_cfg.num_tcs.pg_tcs = 1U; adapter->dcb_cfg.num_tcs.pfc_tcs = 1U; dev_warn((struct device const *)(& (adapter->pdev)->dev), "Disabling SR-IOV support\n"); ixgbe_disable_sriov(adapter); dev_warn((struct device const *)(& (adapter->pdev)->dev), "Disabling RSS support\n"); adapter->ring_feature[2].limit = 1U; ixgbe_set_num_queues(adapter); adapter->num_q_vectors = 1; err = pci_enable_msi_exact(adapter->pdev, 1); if (err != 0) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", err); } else { adapter->flags = adapter->flags | 2U; } return; } } int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter ) { int err ; { ixgbe_set_num_queues(adapter); ixgbe_set_interrupt_capability(adapter); err = ixgbe_alloc_q_vectors(adapter); if (err != 0) { dev_err((struct device const *)(& (adapter->pdev)->dev), "Unable to allocate memory for queue vectors\n"); goto err_alloc_q_vectors; } else { } ixgbe_cache_ring_register(adapter); _dev_info((struct device const *)(& (adapter->pdev)->dev), "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", adapter->num_rx_queues > 1 ? (char *)"Enabled" : (char *)"Disabled", adapter->num_rx_queues, adapter->num_tx_queues); set_bit(2L, (unsigned long volatile *)(& adapter->state)); return (0); err_alloc_q_vectors: ixgbe_reset_interrupt_capability(adapter); return (err); } } void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter ) { { adapter->num_tx_queues = 0; adapter->num_rx_queues = 0; ixgbe_free_q_vectors(adapter); ixgbe_reset_interrupt_capability(adapter); return; } } void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring , u32 vlan_macip_lens , u32 fcoe_sof_eof , u32 type_tucmd , u32 mss_l4len_idx ) { struct ixgbe_adv_tx_context_desc *context_desc ; u16 i ; { i = tx_ring->next_to_use; context_desc = (struct ixgbe_adv_tx_context_desc *)tx_ring->desc + (unsigned long )i; i = (u16 )((int )i + 1); tx_ring->next_to_use = (int )tx_ring->count > (int )i ? i : 0U; type_tucmd = type_tucmd | 538968064U; context_desc->vlan_macip_lens = vlan_macip_lens; context_desc->seqnum_seed = fcoe_sof_eof; context_desc->type_tucmd_mlhl = type_tucmd; context_desc->mss_l4len_idx = mss_l4len_idx; return; } } bool ldv_queue_work_on_536(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_537(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_538(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_539(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_540(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_546(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static void *kzalloc_node(size_t size , gfp_t flags , int node ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_552(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_554(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_556(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_557(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_558(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_559(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_560(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_561(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_562(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_563(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void clear_bit_unlock(long nr , unsigned long volatile *addr ) { { __asm__ volatile ("": : : "memory"); clear_bit(nr, addr); return; } } extern void __might_fault(char const * , int ) ; bool ldv_is_err(void const *ptr ) ; bool ldv_is_err_or_null(void const *ptr ) ; long ldv_ptr_err(void const *ptr ) ; __inline static u64 div_u64_rem(u64 dividend , u32 divisor , u32 *remainder ) { { *remainder = (u32 )(dividend % (u64 )divisor); return (dividend / (u64 )divisor); } } __inline static u64 div_u64(u64 dividend , u32 divisor ) { u32 remainder ; u64 tmp ; { tmp = div_u64_rem(dividend, divisor, & remainder); return (tmp); } } __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static bool IS_ERR_OR_NULL(void const *ptr ) ; extern void _raw_spin_unlock_irqrestore(raw_spinlock_t * , unsigned long ) ; __inline static void ldv_spin_unlock_irqrestore_580(spinlock_t *lock , unsigned long flags ) { { _raw_spin_unlock_irqrestore(& lock->__annonCompField18.rlock, flags); return; } } __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) ; __inline static s64 timespec_to_ns(struct timespec const *ts ) { { return ((long long )ts->tv_sec * 1000000000LL + (long long )ts->tv_nsec); } } extern struct timespec ns_to_timespec(s64 const ) ; __inline static ktime_t ns_to_ktime(u64 ns ) { ktime_t ktime_zero ; ktime_t __constr_expr_0 ; { ktime_zero.tv64 = 0LL; __constr_expr_0.tv64 = (long long )((unsigned long long )ktime_zero.tv64 + ns); return (__constr_expr_0); } } extern ktime_t ktime_get_with_offset(enum tk_offsets ) ; __inline static ktime_t ktime_get_real(void) { ktime_t tmp ; { tmp = ktime_get_with_offset(0); return (tmp); } } bool ldv_queue_work_on_583(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_585(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_584(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_587(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_586(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_cancel_work_sync_611(struct work_struct *ldv_func_arg1 ) ; __inline static bool queue_work___0(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_583(8192, wq, work); return (tmp); } } __inline static bool schedule_work___0(struct work_struct *work ) { bool tmp ; { tmp = queue_work___0(system_wq, work); return (tmp); } } void *ldv_kmem_cache_alloc_593(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_610(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void invoke_work_9(void) ; void call_and_disable_work_9(struct work_struct *work ) ; extern unsigned long _copy_from_user(void * , void const * , unsigned int ) ; extern unsigned long _copy_to_user(void * , void const * , unsigned int ) ; extern void __copy_from_user_overflow(void) ; extern void __copy_to_user_overflow(void) ; __inline static unsigned long copy_from_user(void *to , void const *from , unsigned long n ) { int sz ; unsigned long tmp ; long tmp___0 ; { tmp = __builtin_object_size((void const *)to, 0); sz = (int )tmp; __might_fault("./arch/x86/include/asm/uaccess.h", 697); tmp___0 = ldv__builtin_expect((long )(sz < 0 || (unsigned long )sz >= n), 1L); if (tmp___0 != 0L) { n = _copy_from_user(to, from, (unsigned int )n); } else { __copy_from_user_overflow(); } return (n); } } __inline static unsigned long copy_to_user(void *to , void const *from , unsigned long n ) { int sz ; unsigned long tmp ; long tmp___0 ; { tmp = __builtin_object_size(from, 0); sz = (int )tmp; __might_fault("./arch/x86/include/asm/uaccess.h", 732); tmp___0 = ldv__builtin_expect((long )(sz < 0 || (unsigned long )sz >= n), 1L); if (tmp___0 != 0L) { n = _copy_to_user(to, from, (unsigned int )n); } else { __copy_to_user_overflow(); } return (n); } } struct sk_buff *ldv_skb_clone_601(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_609(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_603(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_599(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_607(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_608(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; __inline static struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb ) { unsigned char *tmp ; { tmp = skb_end_pointer((struct sk_buff const *)skb); return (& ((struct skb_shared_info *)tmp)->hwtstamps); } } struct sk_buff *ldv___netdev_alloc_skb_604(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_605(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_606(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; __inline static void timecounter_adjtime(struct timecounter *tc , s64 delta ) { { tc->nsec = tc->nsec + (unsigned long long )delta; return; } } extern void timecounter_init(struct timecounter * , struct cyclecounter const * , u64 ) ; extern u64 timecounter_read(struct timecounter * ) ; extern u64 timecounter_cyc2time(struct timecounter * , cycle_t ) ; extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info * , struct device * ) ; extern int ptp_clock_unregister(struct ptp_clock * ) ; extern void ptp_clock_event(struct ptp_clock * , struct ptp_clock_event * ) ; static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; int shift ; u32 esdp ; u32 tsauxc ; u32 clktiml ; u32 clktimh ; u32 trgttiml ; u32 trgttimh ; u32 rem ; u64 ns ; u64 clock_edge ; u32 tmp ; u32 tmp___0 ; { hw = & adapter->hw; shift = (int )adapter->cc.shift; ns = 0ULL; clock_edge = 0ULL; if ((adapter->flags2 & 1024U) != 0U && (unsigned int )hw->mac.type == 3U) { ixgbe_write_reg(hw, 35872U, 0U); ixgbe_read_reg(hw, 8U); esdp = ixgbe_read_reg(hw, 32U); esdp = esdp | 65792U; tsauxc = 76U; clktiml = (unsigned int )(500000000ULL << shift); clktimh = (unsigned int )((500000000ULL << shift) >> 32); tmp = ixgbe_read_reg(hw, 35852U); clock_edge = (unsigned long long )tmp | clock_edge; tmp___0 = ixgbe_read_reg(hw, 35856U); clock_edge = ((unsigned long long )tmp___0 << 32) | clock_edge; ns = timecounter_cyc2time(& adapter->tc, clock_edge); div_u64_rem(ns, 500000000U, & rem); clock_edge = ((500000000ULL - (unsigned long long )rem) << shift) + clock_edge; trgttiml = (unsigned int )clock_edge; trgttimh = (unsigned int )(clock_edge >> 32); ixgbe_write_reg(hw, 35892U, clktiml); ixgbe_write_reg(hw, 35896U, clktimh); ixgbe_write_reg(hw, 35876U, trgttiml); ixgbe_write_reg(hw, 35880U, trgttimh); ixgbe_write_reg(hw, 32U, esdp); ixgbe_write_reg(hw, 35872U, tsauxc); } else { ixgbe_write_reg(hw, 35872U, 0U); } ixgbe_read_reg(hw, 8U); return; } } static cycle_t ixgbe_ptp_read(struct cyclecounter const *cc ) { struct ixgbe_adapter *adapter ; struct cyclecounter const *__mptr ; struct ixgbe_hw *hw ; u64 stamp ; u32 tmp ; u32 tmp___0 ; { __mptr = cc; adapter = (struct ixgbe_adapter *)__mptr + 0xfffffffffffddf18UL; hw = & adapter->hw; stamp = 0ULL; tmp = ixgbe_read_reg(hw, 35852U); stamp = (unsigned long long )tmp | stamp; tmp___0 = ixgbe_read_reg(hw, 35856U); stamp = ((unsigned long long )tmp___0 << 32) | stamp; return (stamp); } } static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp , s32 ppb ) { struct ixgbe_adapter *adapter ; struct ptp_clock_info const *__mptr ; struct ixgbe_hw *hw ; u64 freq ; u32 diff ; u32 incval ; int neg_adj ; u32 __var ; u64 tmp ; { __mptr = (struct ptp_clock_info const *)ptp; adapter = (struct ixgbe_adapter *)__mptr + 0xfffffffffffde050UL; hw = & adapter->hw; neg_adj = 0; if (ppb < 0) { neg_adj = 1; ppb = - ppb; } else { } __asm__ volatile ("mfence": : : "memory"); __var = 0U; incval = *((u32 volatile *)(& adapter->base_incval)); freq = (u64 )incval; freq = (u64 )ppb * freq; tmp = div_u64(freq, 1000000000U); diff = (u32 )tmp; incval = neg_adj != 0 ? incval - diff : incval + diff; switch ((unsigned int )hw->mac.type) { case 3U: ixgbe_write_reg(hw, 35860U, incval); goto ldv_55381; case 2U: ixgbe_write_reg(hw, 35860U, incval | 16777216U); goto ldv_55381; default: ; goto ldv_55381; } ldv_55381: ; return (0); } } static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp , s64 delta ) { struct ixgbe_adapter *adapter ; struct ptp_clock_info const *__mptr ; unsigned long flags ; { __mptr = (struct ptp_clock_info const *)ptp; adapter = (struct ixgbe_adapter *)__mptr + 0xfffffffffffde050UL; ldv_spin_lock(); timecounter_adjtime(& adapter->tc, delta); spin_unlock_irqrestore(& adapter->tmreg_lock, flags); ixgbe_ptp_setup_sdp(adapter); return (0); } } static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp , struct timespec *ts ) { struct ixgbe_adapter *adapter ; struct ptp_clock_info const *__mptr ; u64 ns ; unsigned long flags ; { __mptr = (struct ptp_clock_info const *)ptp; adapter = (struct ixgbe_adapter *)__mptr + 0xfffffffffffde050UL; ldv_spin_lock(); ns = timecounter_read(& adapter->tc); spin_unlock_irqrestore(& adapter->tmreg_lock, flags); *ts = ns_to_timespec((s64 const )ns); return (0); } } static int ixgbe_ptp_settime(struct ptp_clock_info *ptp , struct timespec const *ts ) { struct ixgbe_adapter *adapter ; struct ptp_clock_info const *__mptr ; u64 ns ; unsigned long flags ; s64 tmp ; { __mptr = (struct ptp_clock_info const *)ptp; adapter = (struct ixgbe_adapter *)__mptr + 0xfffffffffffde050UL; tmp = timespec_to_ns(ts); ns = (u64 )tmp; ldv_spin_lock(); timecounter_init(& adapter->tc, (struct cyclecounter const *)(& adapter->cc), ns); spin_unlock_irqrestore(& adapter->tmreg_lock, flags); ixgbe_ptp_setup_sdp(adapter); return (0); } } static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp , struct ptp_clock_request *rq , int on ) { struct ixgbe_adapter *adapter ; struct ptp_clock_info const *__mptr ; { __mptr = (struct ptp_clock_info const *)ptp; adapter = (struct ixgbe_adapter *)__mptr + 0xfffffffffffde050UL; if ((unsigned int )rq->type == 2U) { switch ((unsigned int )adapter->hw.mac.type) { case 3U: ; if (on != 0) { adapter->flags2 = adapter->flags2 | 1024U; } else { adapter->flags2 = adapter->flags2 & 4294966271U; } ixgbe_ptp_setup_sdp(adapter); return (0); default: ; goto ldv_55420; } ldv_55420: ; } else { } return (-524); } } void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter , u32 eicr ) { struct ixgbe_hw *hw ; struct ptp_clock_event event ; { hw = & adapter->hw; event.type = 2; if ((unsigned long )adapter->ptp_clock == (unsigned long )((struct ptp_clock *)0)) { return; } else { } switch ((unsigned int )hw->mac.type) { case 3U: ptp_clock_event(adapter->ptp_clock, & event); goto ldv_55428; default: ; goto ldv_55428; } ldv_55428: ; return; } } void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter ) { bool timeout ; struct timespec ts ; { timeout = (bool )((long )((adapter->last_overflow_check - (unsigned long )jiffies) + 7500UL) < 0L); if ((int )timeout) { ixgbe_ptp_gettime(& adapter->ptp_caps, & ts); adapter->last_overflow_check = jiffies; } else { } return; } } void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 tsyncrxctl ; u32 tmp ; unsigned long rx_event ; { hw = & adapter->hw; tmp = ixgbe_read_reg(hw, 20872U); tsyncrxctl = tmp; if ((tsyncrxctl & 1U) == 0U) { adapter->last_rx_ptp_check = jiffies; return; } else { } rx_event = adapter->last_rx_ptp_check; if ((long )(rx_event - adapter->last_rx_timestamp) < 0L) { rx_event = adapter->last_rx_timestamp; } else { } if ((long )((rx_event - (unsigned long )jiffies) + 1250UL) < 0L) { ixgbe_read_reg(hw, 20900U); adapter->last_rx_ptp_check = jiffies; if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "clearing RX Timestamp hang\n"); } else { } } else { } return; } } static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; struct skb_shared_hwtstamps shhwtstamps ; u64 regval ; u64 ns ; unsigned long flags ; u32 tmp ; u32 tmp___0 ; { hw = & adapter->hw; regval = 0ULL; tmp = ixgbe_read_reg(hw, 35844U); regval = (unsigned long long )tmp | regval; tmp___0 = ixgbe_read_reg(hw, 35848U); regval = ((unsigned long long )tmp___0 << 32) | regval; ldv_spin_lock(); ns = timecounter_cyc2time(& adapter->tc, regval); spin_unlock_irqrestore(& adapter->tmreg_lock, flags); memset((void *)(& shhwtstamps), 0, 8UL); shhwtstamps.hwtstamp = ns_to_ktime(ns); skb_tstamp_tx(adapter->ptp_tx_skb, & shhwtstamps); dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = (struct sk_buff *)0; clear_bit_unlock(9L, (unsigned long volatile *)(& adapter->state)); return; } } static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work ) { struct ixgbe_adapter *adapter ; struct work_struct const *__mptr ; struct ixgbe_hw *hw ; bool timeout ; u32 tsynctxctl ; { __mptr = (struct work_struct const *)work; adapter = (struct ixgbe_adapter *)__mptr + 0xfffffffffffddfe8UL; hw = & adapter->hw; timeout = (bool )((long )((adapter->ptp_tx_start - (unsigned long )jiffies) + 3750UL) < 0L); if ((int )timeout) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = (struct sk_buff *)0; clear_bit_unlock(9L, (unsigned long volatile *)(& adapter->state)); if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "clearing Tx Timestamp hang\n"); } else { } return; } else { } tsynctxctl = ixgbe_read_reg(hw, 35840U); if ((int )tsynctxctl & 1) { ixgbe_ptp_tx_hwtstamp(adapter); } else { schedule_work___0(& adapter->ptp_tx_work); } return; } } void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter , struct sk_buff *skb ) { struct ixgbe_hw *hw ; struct skb_shared_hwtstamps *shhwtstamps ; u64 regval ; u64 ns ; u32 tsyncrxctl ; unsigned long flags ; u32 tmp ; u32 tmp___0 ; { hw = & adapter->hw; regval = 0ULL; tsyncrxctl = ixgbe_read_reg(hw, 20872U); if ((tsyncrxctl & 1U) == 0U) { return; } else { } tmp = ixgbe_read_reg(hw, 20968U); regval = (unsigned long long )tmp | regval; tmp___0 = ixgbe_read_reg(hw, 20900U); regval = ((unsigned long long )tmp___0 << 32) | regval; ldv_spin_lock(); ns = timecounter_cyc2time(& adapter->tc, regval); spin_unlock_irqrestore(& adapter->tmreg_lock, flags); shhwtstamps = skb_hwtstamps(skb); shhwtstamps->hwtstamp = ns_to_ktime(ns); adapter->last_rx_timestamp = jiffies; return; } } int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter , struct ifreq *ifr ) { struct hwtstamp_config *config ; unsigned long tmp ; { config = & adapter->tstamp_config; tmp = copy_to_user(ifr->ifr_ifru.ifru_data, (void const *)config, 12UL); return (tmp != 0UL ? -14 : 0); } } static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter , struct hwtstamp_config *config ) { struct ixgbe_hw *hw ; u32 tsync_tx_ctl ; u32 tsync_rx_ctl ; u32 tsync_rx_mtrl ; bool is_l2 ; u32 regval ; { hw = & adapter->hw; tsync_tx_ctl = 16U; tsync_rx_ctl = 16U; tsync_rx_mtrl = 20905984U; is_l2 = 0; if (config->flags != 0) { return (-22); } else { } switch (config->tx_type) { case 0: tsync_tx_ctl = 0U; case 1: ; goto ldv_55509; default: ; return (-34); } ldv_55509: ; switch (config->rx_filter) { case 0: tsync_rx_ctl = 0U; tsync_rx_mtrl = 0U; goto ldv_55512; case 4: tsync_rx_ctl = tsync_rx_ctl | 2U; tsync_rx_mtrl = tsync_rx_mtrl; goto ldv_55512; case 5: tsync_rx_ctl = tsync_rx_ctl | 2U; tsync_rx_mtrl = tsync_rx_mtrl | 1U; goto ldv_55512; case 12: ; case 9: ; case 6: ; case 13: ; case 10: ; case 7: ; case 14: ; case 11: ; case 8: tsync_rx_ctl = tsync_rx_ctl | 10U; is_l2 = 1; config->rx_filter = 12; goto ldv_55512; case 3: ; case 1: ; default: config->rx_filter = 0; return (-34); } ldv_55512: ; if ((unsigned int )hw->mac.type == 1U) { if ((tsync_rx_ctl | tsync_tx_ctl) != 0U) { return (-34); } else { } return (0); } else { } if ((int )is_l2) { ixgbe_write_reg(hw, 20788U, 3221260535U); } else { ixgbe_write_reg(hw, 20788U, 0U); } regval = ixgbe_read_reg(hw, 35840U); regval = regval & 4294967279U; regval = regval | tsync_tx_ctl; ixgbe_write_reg(hw, 35840U, regval); regval = ixgbe_read_reg(hw, 20872U); regval = regval & 4294967265U; regval = regval | tsync_rx_ctl; ixgbe_write_reg(hw, 20872U, regval); ixgbe_write_reg(hw, 20768U, tsync_rx_mtrl); ixgbe_read_reg(hw, 8U); regval = ixgbe_read_reg(hw, 35848U); regval = ixgbe_read_reg(hw, 20900U); return (0); } } int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter , struct ifreq *ifr ) { struct hwtstamp_config config ; int err ; unsigned long tmp ; unsigned long tmp___0 ; { tmp = copy_from_user((void *)(& config), (void const *)ifr->ifr_ifru.ifru_data, 12UL); if (tmp != 0UL) { return (-14); } else { } err = ixgbe_ptp_set_timestamp_mode(adapter, & config); if (err != 0) { return (err); } else { } memcpy((void *)(& adapter->tstamp_config), (void const *)(& config), 12UL); tmp___0 = copy_to_user(ifr->ifr_ifru.ifru_data, (void const *)(& config), 12UL); return (tmp___0 != 0UL ? -14 : 0); } } void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; u32 incval ; u32 shift ; unsigned long flags ; u32 __var ; { hw = & adapter->hw; incval = 0U; shift = 0U; switch (adapter->link_speed) { case 8U: incval = 1342177280U; shift = 21U; goto ldv_55541; case 32U: incval = 1073741824U; shift = 24U; goto ldv_55541; case 128U: ; default: incval = 1717986918U; shift = 28U; goto ldv_55541; } ldv_55541: ; switch ((unsigned int )hw->mac.type) { case 3U: ixgbe_write_reg(hw, 35860U, incval); goto ldv_55546; case 2U: incval = incval >> 7; shift = shift - 7U; ixgbe_write_reg(hw, 35860U, incval | 16777216U); goto ldv_55546; default: ; return; } ldv_55546: __var = 0U; *((u32 volatile *)(& adapter->base_incval)) = incval; __asm__ volatile ("mfence": : : "memory"); ldv_spin_lock(); memset((void *)(& adapter->cc), 0, 24UL); adapter->cc.read = & ixgbe_ptp_read; adapter->cc.mask = 0xffffffffffffffffULL; adapter->cc.shift = shift; adapter->cc.mult = 1U; spin_unlock_irqrestore(& adapter->tmreg_lock, flags); return; } } void ixgbe_ptp_reset(struct ixgbe_adapter *adapter ) { struct ixgbe_hw *hw ; unsigned long flags ; ktime_t tmp ; { hw = & adapter->hw; ixgbe_write_reg(hw, 35852U, 0U); ixgbe_write_reg(hw, 35856U, 0U); ixgbe_read_reg(hw, 8U); ixgbe_ptp_set_timestamp_mode(adapter, & adapter->tstamp_config); ixgbe_ptp_start_cyclecounter(adapter); ldv_spin_lock(); tmp = ktime_get_real(); timecounter_init(& adapter->tc, (struct cyclecounter const *)(& adapter->cc), (u64 )tmp.tv64); spin_unlock_irqrestore(& adapter->tmreg_lock, flags); ixgbe_ptp_setup_sdp(adapter); return; } } static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter ) { struct net_device *netdev ; long err ; bool tmp ; int tmp___0 ; bool tmp___1 ; { netdev = adapter->netdev; tmp = IS_ERR_OR_NULL((void const *)adapter->ptp_clock); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } switch ((unsigned int )adapter->hw.mac.type) { case 3U: snprintf((char *)(& adapter->ptp_caps.name), 16UL, "%s", (char *)(& netdev->name)); adapter->ptp_caps.owner = & __this_module; adapter->ptp_caps.max_adj = 250000000; adapter->ptp_caps.n_alarm = 0; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 1; adapter->ptp_caps.adjfreq = & ixgbe_ptp_adjfreq; adapter->ptp_caps.adjtime = & ixgbe_ptp_adjtime; adapter->ptp_caps.gettime64 = & ixgbe_ptp_gettime; adapter->ptp_caps.settime64 = & ixgbe_ptp_settime; adapter->ptp_caps.enable = & ixgbe_ptp_feature_enable; goto ldv_55562; case 2U: snprintf((char *)(& adapter->ptp_caps.name), 16UL, "%s", (char *)(& netdev->name)); adapter->ptp_caps.owner = & __this_module; adapter->ptp_caps.max_adj = 250000000; adapter->ptp_caps.n_alarm = 0; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = & ixgbe_ptp_adjfreq; adapter->ptp_caps.adjtime = & ixgbe_ptp_adjtime; adapter->ptp_caps.gettime64 = & ixgbe_ptp_gettime; adapter->ptp_caps.settime64 = & ixgbe_ptp_settime; adapter->ptp_caps.enable = & ixgbe_ptp_feature_enable; goto ldv_55562; default: adapter->ptp_clock = (struct ptp_clock *)0; return (-95); } ldv_55562: adapter->ptp_clock = ptp_clock_register(& adapter->ptp_caps, & (adapter->pdev)->dev); tmp___1 = IS_ERR((void const *)adapter->ptp_clock); if ((int )tmp___1) { err = PTR_ERR((void const *)adapter->ptp_clock); adapter->ptp_clock = (struct ptp_clock *)0; dev_err((struct device const *)(& (adapter->pdev)->dev), "ptp_clock_register failed\n"); return ((int )err); } else { _dev_info((struct device const *)(& (adapter->pdev)->dev), "registered PHC device on %s\n", (char *)(& netdev->name)); } adapter->tstamp_config.rx_filter = 0; adapter->tstamp_config.tx_type = 0; return (0); } } void ixgbe_ptp_init(struct ixgbe_adapter *adapter ) { struct lock_class_key __key ; int tmp ; struct lock_class_key __key___0 ; atomic_long_t __constr_expr_0 ; { spinlock_check(& adapter->tmreg_lock); __raw_spin_lock_init(& adapter->tmreg_lock.__annonCompField18.rlock, "&(&adapter->tmreg_lock)->rlock", & __key); tmp = ixgbe_ptp_create_clock(adapter); if (tmp != 0) { return; } else { } __init_work(& adapter->ptp_tx_work, 0); __constr_expr_0.counter = 137438953408L; adapter->ptp_tx_work.data = __constr_expr_0; lockdep_init_map(& adapter->ptp_tx_work.lockdep_map, "(&adapter->ptp_tx_work)", & __key___0, 0); INIT_LIST_HEAD(& adapter->ptp_tx_work.entry); adapter->ptp_tx_work.func = & ixgbe_ptp_tx_hwtstamp_work; ixgbe_ptp_reset(adapter); set_bit(8L, (unsigned long volatile *)(& adapter->state)); return; } } void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter ) { int tmp ; { tmp = test_and_clear_bit(8L, (unsigned long volatile *)(& adapter->state)); if (tmp == 0) { return; } else { } ixgbe_write_reg(& adapter->hw, 35872U, 0U); ldv_cancel_work_sync_611(& adapter->ptp_tx_work); if ((unsigned long )adapter->ptp_tx_skb != (unsigned long )((struct sk_buff *)0)) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = (struct sk_buff *)0; clear_bit_unlock(9L, (unsigned long volatile *)(& adapter->state)); } else { } return; } } void ixgbe_ptp_stop(struct ixgbe_adapter *adapter ) { { ixgbe_ptp_suspend(adapter); if ((unsigned long )adapter->ptp_clock != (unsigned long )((struct ptp_clock *)0)) { ptp_clock_unregister(adapter->ptp_clock); adapter->ptp_clock = (struct ptp_clock *)0; _dev_info((struct device const *)(& (adapter->pdev)->dev), "removed PHC on %s\n", (char *)(& (adapter->netdev)->name)); } else { } return; } } void work_init_9(void) { { ldv_work_9_0 = 0; ldv_work_9_1 = 0; ldv_work_9_2 = 0; ldv_work_9_3 = 0; return; } } void call_and_disable_all_9(int state ) { { if (ldv_work_9_0 == state) { call_and_disable_work_9(ldv_work_struct_9_0); } else { } if (ldv_work_9_1 == state) { call_and_disable_work_9(ldv_work_struct_9_1); } else { } if (ldv_work_9_2 == state) { call_and_disable_work_9(ldv_work_struct_9_2); } else { } if (ldv_work_9_3 == state) { call_and_disable_work_9(ldv_work_struct_9_3); } else { } return; } } void invoke_work_9(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_9_0 == 2 || ldv_work_9_0 == 3) { ldv_work_9_0 = 4; ixgbe_ptp_tx_hwtstamp_work(ldv_work_struct_9_0); ldv_work_9_0 = 1; } else { } goto ldv_55589; case 1: ; if (ldv_work_9_1 == 2 || ldv_work_9_1 == 3) { ldv_work_9_1 = 4; ixgbe_ptp_tx_hwtstamp_work(ldv_work_struct_9_0); ldv_work_9_1 = 1; } else { } goto ldv_55589; case 2: ; if (ldv_work_9_2 == 2 || ldv_work_9_2 == 3) { ldv_work_9_2 = 4; ixgbe_ptp_tx_hwtstamp_work(ldv_work_struct_9_0); ldv_work_9_2 = 1; } else { } goto ldv_55589; case 3: ; if (ldv_work_9_3 == 2 || ldv_work_9_3 == 3) { ldv_work_9_3 = 4; ixgbe_ptp_tx_hwtstamp_work(ldv_work_struct_9_0); ldv_work_9_3 = 1; } else { } goto ldv_55589; default: ldv_stop(); } ldv_55589: ; return; } } void activate_work_9(struct work_struct *work , int state ) { { if (ldv_work_9_0 == 0) { ldv_work_struct_9_0 = work; ldv_work_9_0 = state; return; } else { } if (ldv_work_9_1 == 0) { ldv_work_struct_9_1 = work; ldv_work_9_1 = state; return; } else { } if (ldv_work_9_2 == 0) { ldv_work_struct_9_2 = work; ldv_work_9_2 = state; return; } else { } if (ldv_work_9_3 == 0) { ldv_work_struct_9_3 = work; ldv_work_9_3 = state; return; } else { } return; } } void call_and_disable_work_9(struct work_struct *work ) { { if ((ldv_work_9_0 == 2 || ldv_work_9_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_0) { ixgbe_ptp_tx_hwtstamp_work(work); ldv_work_9_0 = 1; return; } else { } if ((ldv_work_9_1 == 2 || ldv_work_9_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_1) { ixgbe_ptp_tx_hwtstamp_work(work); ldv_work_9_1 = 1; return; } else { } if ((ldv_work_9_2 == 2 || ldv_work_9_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_2) { ixgbe_ptp_tx_hwtstamp_work(work); ldv_work_9_2 = 1; return; } else { } if ((ldv_work_9_3 == 2 || ldv_work_9_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_3) { ixgbe_ptp_tx_hwtstamp_work(work); ldv_work_9_3 = 1; return; } else { } return; } } void disable_work_9(struct work_struct *work ) { { if ((ldv_work_9_0 == 3 || ldv_work_9_0 == 2) && (unsigned long )ldv_work_struct_9_0 == (unsigned long )work) { ldv_work_9_0 = 1; } else { } if ((ldv_work_9_1 == 3 || ldv_work_9_1 == 2) && (unsigned long )ldv_work_struct_9_1 == (unsigned long )work) { ldv_work_9_1 = 1; } else { } if ((ldv_work_9_2 == 3 || ldv_work_9_2 == 2) && (unsigned long )ldv_work_struct_9_2 == (unsigned long )work) { ldv_work_9_2 = 1; } else { } if ((ldv_work_9_3 == 3 || ldv_work_9_3 == 2) && (unsigned long )ldv_work_struct_9_3 == (unsigned long )work) { ldv_work_9_3 = 1; } else { } return; } } __inline static long PTR_ERR(void const *ptr ) { long tmp ; { tmp = ldv_ptr_err(ptr); return (tmp); } } __inline static bool IS_ERR(void const *ptr ) { bool tmp ; { tmp = ldv_is_err(ptr); return (tmp); } } __inline static bool IS_ERR_OR_NULL(void const *ptr ) { bool tmp ; { tmp = ldv_is_err_or_null(ptr); return (tmp); } } __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) { { ldv_spin_unlock(); ldv_spin_unlock_irqrestore_580(lock, flags); return; } } bool ldv_queue_work_on_583(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_584(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_585(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_586(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_587(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_593(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_599(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_601(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_603(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_604(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_605(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_606(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_607(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_608(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_609(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_610(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } bool ldv_cancel_work_sync_611(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___16 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_9(ldv_func_arg1); return (ldv_func_res); } } bool ldv_queue_work_on_632(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_634(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_633(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_636(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_635(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_642(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_659(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_650(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_658(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_652(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_648(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_656(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_657(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_653(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_654(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_655(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg , u8 *pfc_en ) ; void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg , int direction , u16 *refill ) ; void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg , u16 *max ) ; void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg , int direction , u8 *bwgid ) ; void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg , int direction , u8 *ptype ) ; void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg , int direction , u8 *map ) ; s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type , u8 *prio_tc ) ; void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw , u8 *map ) ; s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw , u8 pfc_en ) ; s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *prio_type ) ; s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type ) ; s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type ) ; s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw , u8 pfc_en , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type ) ; s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw , u8 pfc_en , u8 *prio_tc ) ; s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type , u8 *prio_tc ) ; s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type ) ; s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type , u8 *prio_tc ) ; s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw , u8 pfc_en , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type , u8 *prio_tc ) ; static s32 ixgbe_ieee_credits(__u8 *bw , __u16 *refill , __u16 *max , int max_frame ) { int min_percent ; int min_credit ; int multiplier ; int i ; int val ; int _min1 ; int _min2 ; { min_percent = 100; min_credit = (max_frame / 2 + 63) / 64; i = 0; goto ldv_55406; ldv_55405: ; if ((int )*(bw + (unsigned long )i) < min_percent && (unsigned int )*(bw + (unsigned long )i) != 0U) { min_percent = (int )*(bw + (unsigned long )i); } else { } i = i + 1; ldv_55406: ; if (i <= 7) { goto ldv_55405; } else { } multiplier = min_credit / min_percent + 1; i = 0; goto ldv_55413; ldv_55412: _min1 = (int )*(bw + (unsigned long )i) * multiplier; _min2 = 511; val = _min1 < _min2 ? _min1 : _min2; if (val < min_credit) { val = min_credit; } else { } *(refill + (unsigned long )i) = (__u16 )val; *(max + (unsigned long )i) = (unsigned int )*(bw + (unsigned long )i) != 0U ? (__u16 )(((int )*(bw + (unsigned long )i) * 4095) / 100) : (__u16 )min_credit; i = i + 1; ldv_55413: ; if (i <= 7) { goto ldv_55412; } else { } return (0); } } s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw , struct ixgbe_dcb_config *dcb_config , int max_frame , u8 direction ) { struct tc_bw_alloc *p ; int min_credit ; int min_multiplier ; int min_percent ; u32 credit_refill ; u32 credit_max ; u16 link_percentage ; u8 bw_percent ; u8 i ; int _min1 ; int _min2 ; { min_percent = 100; credit_refill = 0U; credit_max = 0U; link_percentage = 0U; bw_percent = 0U; if ((unsigned long )dcb_config == (unsigned long )((struct ixgbe_dcb_config *)0)) { return (-1); } else { } min_credit = (max_frame / 2 + 63) / 64; i = 0U; goto ldv_55431; ldv_55430: p = (struct tc_bw_alloc *)(& dcb_config->tc_config[(int )i].path) + (unsigned long )direction; bw_percent = dcb_config->bw_percentage[(int )direction][(int )p->bwg_id]; link_percentage = (u16 )p->bwg_percent; link_percentage = (u16 )(((int )link_percentage * (int )bw_percent) / 100); if ((unsigned int )link_percentage != 0U && (int )link_percentage < min_percent) { min_percent = (int )link_percentage; } else { } i = (u8 )((int )i + 1); ldv_55431: ; if ((unsigned int )i <= 7U) { goto ldv_55430; } else { } min_multiplier = min_credit / min_percent + 1; i = 0U; goto ldv_55437; ldv_55436: p = (struct tc_bw_alloc *)(& dcb_config->tc_config[(int )i].path) + (unsigned long )direction; bw_percent = dcb_config->bw_percentage[(int )direction][(int )p->bwg_id]; link_percentage = (u16 )p->bwg_percent; link_percentage = (u16 )(((int )link_percentage * (int )bw_percent) / 100); if ((unsigned int )p->bwg_percent != 0U && (unsigned int )link_percentage == 0U) { link_percentage = 1U; } else { } p->link_percent = (unsigned char )link_percentage; _min1 = (int )link_percentage * min_multiplier; _min2 = 511; credit_refill = (u32 )(_min1 < _min2 ? _min1 : _min2); p->data_credits_refill = (unsigned short )credit_refill; credit_max = (u32 )(((int )link_percentage * 4095) / 100); if (credit_max != 0U && (u32 )min_credit > credit_max) { credit_max = (u32 )min_credit; } else { } if ((unsigned int )direction == 0U) { if (((unsigned int )hw->mac.type == 1U && credit_max != 0U) && credit_max <= 512U) { credit_max = 513U; } else { } dcb_config->tc_config[(int )i].desc_credits_max = (unsigned short )credit_max; } else { } p->data_credits_max = (unsigned short )credit_max; i = (u8 )((int )i + 1); ldv_55437: ; if ((unsigned int )i <= 7U) { goto ldv_55436; } else { } return (0); } } void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg , u8 *pfc_en ) { struct tc_configuration *tc_config ; int tc ; { tc_config = (struct tc_configuration *)(& cfg->tc_config); *pfc_en = 0U; tc = 0; goto ldv_55446; ldv_55445: ; if ((unsigned int )(tc_config + (unsigned long )tc)->dcb_pfc != 0U) { *pfc_en = (u8 )((int )((signed char )*pfc_en) | (int )((signed char )(1 << tc))); } else { } tc = tc + 1; ldv_55446: ; if (tc <= 7) { goto ldv_55445; } else { } return; } } void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg , int direction , u16 *refill ) { struct tc_configuration *tc_config ; int tc ; { tc_config = (struct tc_configuration *)(& cfg->tc_config); tc = 0; goto ldv_55456; ldv_55455: *(refill + (unsigned long )tc) = (tc_config + (unsigned long )tc)->path[direction].data_credits_refill; tc = tc + 1; ldv_55456: ; if (tc <= 7) { goto ldv_55455; } else { } return; } } void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg , u16 *max ) { struct tc_configuration *tc_config ; int tc ; { tc_config = (struct tc_configuration *)(& cfg->tc_config); tc = 0; goto ldv_55465; ldv_55464: *(max + (unsigned long )tc) = (tc_config + (unsigned long )tc)->desc_credits_max; tc = tc + 1; ldv_55465: ; if (tc <= 7) { goto ldv_55464; } else { } return; } } void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg , int direction , u8 *bwgid ) { struct tc_configuration *tc_config ; int tc ; { tc_config = (struct tc_configuration *)(& cfg->tc_config); tc = 0; goto ldv_55475; ldv_55474: *(bwgid + (unsigned long )tc) = (tc_config + (unsigned long )tc)->path[direction].bwg_id; tc = tc + 1; ldv_55475: ; if (tc <= 7) { goto ldv_55474; } else { } return; } } void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg , int direction , u8 *ptype ) { struct tc_configuration *tc_config ; int tc ; { tc_config = (struct tc_configuration *)(& cfg->tc_config); tc = 0; goto ldv_55485; ldv_55484: *(ptype + (unsigned long )tc) = (u8 )(tc_config + (unsigned long )tc)->path[direction].prio_type; tc = tc + 1; ldv_55485: ; if (tc <= 7) { goto ldv_55484; } else { } return; } } u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg , int direction , u8 up___0 ) { struct tc_configuration *tc_config ; u8 prio_mask ; u8 tc ; { tc_config = (struct tc_configuration *)(& cfg->tc_config); prio_mask = (u8 )(1 << (int )up___0); tc = cfg->num_tcs.pg_tcs; if ((unsigned int )tc == 0U) { return (0U); } else { } tc = (u8 )((int )tc - 1); goto ldv_55497; ldv_55496: ; if ((unsigned int )((int )(tc_config + (unsigned long )tc)->path[direction].up_to_tc_bitmap & (int )prio_mask) != 0U) { goto ldv_55495; } else { } tc = (u8 )((int )tc - 1); ldv_55497: ; if ((unsigned int )tc != 0U) { goto ldv_55496; } else { } ldv_55495: ; return (tc); } } void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg , int direction , u8 *map ) { u8 up___0 ; { up___0 = 0U; goto ldv_55505; ldv_55504: *(map + (unsigned long )up___0) = ixgbe_dcb_get_tc_from_up(cfg, direction, (int )up___0); up___0 = (u8 )((int )up___0 + 1); ldv_55505: ; if ((unsigned int )up___0 <= 7U) { goto ldv_55504; } else { } return; } } s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw , struct ixgbe_dcb_config *dcb_config ) { u8 pfc_en ; u8 ptype[8U] ; u8 bwgid[8U] ; u8 prio_tc[8U] ; u16 refill[8U] ; u16 max[8U] ; s32 tmp ; s32 tmp___0 ; { ixgbe_dcb_unpack_pfc(dcb_config, & pfc_en); ixgbe_dcb_unpack_refill(dcb_config, 0, (u16 *)(& refill)); ixgbe_dcb_unpack_max(dcb_config, (u16 *)(& max)); ixgbe_dcb_unpack_bwgid(dcb_config, 0, (u8 *)(& bwgid)); ixgbe_dcb_unpack_prio(dcb_config, 0, (u8 *)(& ptype)); ixgbe_dcb_unpack_map(dcb_config, 0, (u8 *)(& prio_tc)); switch ((unsigned int )hw->mac.type) { case 1U: tmp = ixgbe_dcb_hw_config_82598(hw, (int )pfc_en, (u16 *)(& refill), (u16 *)(& max), (u8 *)(& bwgid), (u8 *)(& ptype)); return (tmp); case 2U: ; case 3U: ; case 4U: ; case 5U: tmp___0 = ixgbe_dcb_hw_config_82599(hw, (int )pfc_en, (u16 *)(& refill), (u16 *)(& max), (u8 *)(& bwgid), (u8 *)(& ptype), (u8 *)(& prio_tc)); return (tmp___0); default: ; goto ldv_55523; } ldv_55523: ; return (0); } } s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw , u8 pfc_en , u8 *prio_tc ) { s32 tmp ; s32 tmp___0 ; { switch ((unsigned int )hw->mac.type) { case 1U: tmp = ixgbe_dcb_config_pfc_82598(hw, (int )pfc_en); return (tmp); case 2U: ; case 3U: ; case 4U: ; case 5U: tmp___0 = ixgbe_dcb_config_pfc_82599(hw, (int )pfc_en, prio_tc); return (tmp___0); default: ; goto ldv_55535; } ldv_55535: ; return (-22); } } s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw , struct ieee_ets *ets , int max_frame ) { __u16 refill[8U] ; __u16 max[8U] ; __u8 prio_type[8U] ; int i ; __u8 bwg_id[8U] ; s32 tmp ; { bwg_id[0] = 0U; bwg_id[1] = 1U; bwg_id[2] = 2U; bwg_id[3] = 3U; bwg_id[4] = 4U; bwg_id[5] = 5U; bwg_id[6] = 6U; bwg_id[7] = 7U; i = 0; goto ldv_55551; ldv_55550: ; switch ((int )ets->tc_tsa[i]) { case 0: prio_type[i] = 2U; goto ldv_55547; case 2: prio_type[i] = 0U; goto ldv_55547; default: ; return (-22); } ldv_55547: i = i + 1; ldv_55551: ; if (i <= 7) { goto ldv_55550; } else { } ixgbe_ieee_credits((__u8 *)(& ets->tc_tx_bw), (__u16 *)(& refill), (__u16 *)(& max), max_frame); tmp = ixgbe_dcb_hw_ets_config(hw, (u16 *)(& refill), (u16 *)(& max), (u8 *)(& bwg_id), (u8 *)(& prio_type), (u8 *)(& ets->prio_tc)); return (tmp); } } s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type , u8 *prio_tc ) { { switch ((unsigned int )hw->mac.type) { case 1U: ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, prio_type); goto ldv_55562; case 2U: ; case 3U: ; case 4U: ; case 5U: ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); goto ldv_55562; default: ; goto ldv_55562; } ldv_55562: ; return (0); } } static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw , u8 *map ) { u32 reg ; u32 i ; { reg = ixgbe_read_reg(hw, 12320U); i = 0U; goto ldv_55575; ldv_55574: *(map + (unsigned long )i) = (unsigned int )((u8 )(reg >> (int )(i * 3U))) & 7U; i = i + 1U; ldv_55575: ; if (i <= 7U) { goto ldv_55574; } else { } return; } } void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw , u8 *map ) { { switch ((unsigned int )hw->mac.type) { case 2U: ; case 3U: ; case 4U: ; case 5U: ixgbe_dcb_read_rtrup2tc_82599(hw, map); goto ldv_55585; default: ; goto ldv_55585; } ldv_55585: ; return; } } bool ldv_queue_work_on_632(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_633(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_634(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_635(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_636(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_642(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_648(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_650(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_652(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_653(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_654(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_655(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_656(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_657(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_658(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_659(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_679(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_681(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_680(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_683(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_682(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_689(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_706(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_697(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_705(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_699(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_695(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_703(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_704(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_700(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_701(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_702(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *prio_type ) { u32 reg ; u32 credit_refill ; u32 credit_max ; u8 i ; u32 tmp ; { reg = 0U; credit_refill = 0U; credit_max = 0U; i = 0U; tmp = ixgbe_read_reg(hw, 20640U); reg = tmp | 2147483648U; ixgbe_write_reg(hw, 20640U, reg); reg = ixgbe_read_reg(hw, 15616U); reg = reg & 4294967231U; reg = reg | 2U; reg = reg | 4U; ixgbe_write_reg(hw, 15616U, reg); i = 0U; goto ldv_55374; ldv_55373: credit_refill = (u32 )*(refill + (unsigned long )i); credit_max = (u32 )*(max + (unsigned long )i); reg = (credit_max << 12) | credit_refill; if ((unsigned int )*(prio_type + (unsigned long )i) == 2U) { reg = reg | 2147483648U; } else { } ixgbe_write_reg(hw, (u32 )(((int )i + 3848) * 4), reg); i = (u8 )((int )i + 1); ldv_55374: ; if ((unsigned int )i <= 7U) { goto ldv_55373; } else { } reg = ixgbe_read_reg(hw, 12032U); reg = reg; reg = reg | 16U; reg = reg | 64U; ixgbe_write_reg(hw, 12032U, reg); reg = ixgbe_read_reg(hw, 12288U); reg = reg & 4294967293U; ixgbe_write_reg(hw, 12288U, reg); return (0); } } s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type ) { u32 reg ; u32 max_credits ; u8 i ; { reg = ixgbe_read_reg(hw, 32576U); reg = reg & 4294967231U; reg = reg | 524288U; reg = reg | 262144U; ixgbe_write_reg(hw, 32576U, reg); i = 0U; goto ldv_55387; ldv_55386: max_credits = (u32 )*(max + (unsigned long )i); reg = max_credits << 12; reg = (u32 )*(refill + (unsigned long )i) | reg; reg = ((unsigned int )*(bwg_id + (unsigned long )i) << 9) | reg; if ((unsigned int )*(prio_type + (unsigned long )i) == 1U) { reg = reg | 1073741824U; } else { } if ((unsigned int )*(prio_type + (unsigned long )i) == 2U) { reg = reg | 2147483648U; } else { } ixgbe_write_reg(hw, (u32 )((int )i * 64 + 24620), reg); i = (u8 )((int )i + 1); ldv_55387: ; if ((unsigned int )i <= 7U) { goto ldv_55386; } else { } return (0); } } s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type ) { u32 reg ; u8 i ; { reg = ixgbe_read_reg(hw, 52480U); reg = reg & 4294967231U; reg = reg | 288U; ixgbe_write_reg(hw, 52480U, reg); i = 0U; goto ldv_55399; ldv_55398: reg = (u32 )*(refill + (unsigned long )i); reg = ((unsigned int )*(max + (unsigned long )i) << 12) | reg; reg = ((unsigned int )*(bwg_id + (unsigned long )i) << 9) | reg; if ((unsigned int )*(prio_type + (unsigned long )i) == 1U) { reg = reg | 1073741824U; } else { } if ((unsigned int )*(prio_type + (unsigned long )i) == 2U) { reg = reg | 2147483648U; } else { } ixgbe_write_reg(hw, (u32 )(((int )i + 13128) * 4), reg); i = (u8 )((int )i + 1); ldv_55399: ; if ((unsigned int )i <= 7U) { goto ldv_55398; } else { } reg = ixgbe_read_reg(hw, 32256U); reg = reg | 4U; ixgbe_write_reg(hw, 32256U, reg); return (0); } } s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw , u8 pfc_en ) { u32 fcrtl ; u32 reg ; u8 i ; { reg = ixgbe_read_reg(hw, 15616U); reg = reg & 4294967287U; reg = reg | 16U; ixgbe_write_reg(hw, 15616U, reg); reg = ixgbe_read_reg(hw, 20608U); reg = reg & 4294918143U; if ((unsigned int )pfc_en != 0U) { reg = reg | 16384U; } else { } ixgbe_write_reg(hw, 20608U, reg); i = 0U; goto ldv_55410; ldv_55409: ; if ((((int )pfc_en >> (int )i) & 1) == 0) { ixgbe_write_reg(hw, (u32 )(((int )i + 1604) * 8), 0U); ixgbe_write_reg(hw, (u32 )(((int )i + 1612) * 8), 0U); goto ldv_55408; } else { } fcrtl = (hw->fc.low_water[(int )i] << 10) | 2147483648U; reg = (hw->fc.high_water[(int )i] << 10) | 2147483648U; ixgbe_write_reg(hw, (u32 )(((int )i + 1604) * 8), fcrtl); ixgbe_write_reg(hw, (u32 )(((int )i + 1612) * 8), reg); ldv_55408: i = (u8 )((int )i + 1); ldv_55410: ; if ((unsigned int )i <= 7U) { goto ldv_55409; } else { } reg = (u32 )((int )hw->fc.pause_time * 65537); i = 0U; goto ldv_55413; ldv_55412: ixgbe_write_reg(hw, (u32 )(((int )i + 3200) * 4), reg); i = (u8 )((int )i + 1); ldv_55413: ; if ((unsigned int )i <= 3U) { goto ldv_55412; } else { } ixgbe_write_reg(hw, 12960U, (unsigned int )hw->fc.pause_time / 2U); return (0); } } static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw ) { u32 reg ; u8 i ; u8 j ; { reg = 0U; i = 0U; j = 0U; i = 0U; j = 0U; goto ldv_55422; ldv_55421: reg = ixgbe_read_reg(hw, (u32 )(((int )i + 2240) * 4)); reg = (u32 )((int )j * 16843009) | reg; ixgbe_write_reg(hw, (u32 )(((int )i + 2240) * 4), reg); reg = ixgbe_read_reg(hw, (u32 )(((int )i + 2241) * 4)); reg = (u32 )((int )j * 16843009) | reg; ixgbe_write_reg(hw, (u32 )(((int )i + 2241) * 4), reg); i = (unsigned int )i + 2U; j = (u8 )((int )j + 1); ldv_55422: ; if ((unsigned int )i <= 14U && (unsigned int )j <= 7U) { goto ldv_55421; } else { } i = 0U; goto ldv_55425; ldv_55424: reg = ixgbe_read_reg(hw, (u32 )((unsigned int )i <= 7U ? ((int )i + 7360) * 4 : ((int )i + 8576) * 4)); reg = (u32 )((int )i * 16843009) | reg; ixgbe_write_reg(hw, (u32 )((unsigned int )i <= 7U ? ((int )i + 7360) * 4 : ((int )i + 8576) * 4), reg); i = (u8 )((int )i + 1); ldv_55425: ; if ((unsigned int )i <= 7U) { goto ldv_55424; } else { } return (0); } } s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw , u8 pfc_en , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type ) { { ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_pfc_82598(hw, (int )pfc_en); ixgbe_dcb_config_tc_stats_82598(hw); return (0); } } bool ldv_queue_work_on_679(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_680(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_681(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_682(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_683(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_689(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_695(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_697(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_699(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_700(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_701(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_702(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_703(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_704(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_705(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_706(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_726(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_728(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_727(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_730(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_729(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_736(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_753(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_744(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_752(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_746(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_742(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_750(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_751(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_747(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_748(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_749(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type , u8 *prio_tc ) { u32 reg ; u32 credit_refill ; u32 credit_max ; u8 i ; { reg = 0U; credit_refill = 0U; credit_max = 0U; i = 0U; reg = 70U; ixgbe_write_reg(hw, 9264U, reg); reg = 0U; i = 0U; goto ldv_55381; ldv_55380: reg = (u32 )((int )*(prio_tc + (unsigned long )i) << (int )i * 3) | reg; i = (u8 )((int )i + 1); ldv_55381: ; if ((unsigned int )i <= 7U) { goto ldv_55380; } else { } ixgbe_write_reg(hw, 12320U, reg); i = 0U; goto ldv_55384; ldv_55383: credit_refill = (u32 )*(refill + (unsigned long )i); credit_max = (u32 )*(max + (unsigned long )i); reg = (credit_max << 12) | credit_refill; reg = ((unsigned int )*(bwg_id + (unsigned long )i) << 9) | reg; if ((unsigned int )*(prio_type + (unsigned long )i) == 2U) { reg = reg | 2147483648U; } else { } ixgbe_write_reg(hw, (u32 )(((int )i + 2128) * 4), reg); i = (u8 )((int )i + 1); ldv_55384: ; if ((unsigned int )i <= 7U) { goto ldv_55383; } else { } reg = 6U; ixgbe_write_reg(hw, 9264U, reg); return (0); } } s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type ) { u32 reg ; u32 max_credits ; u8 i ; { i = 0U; goto ldv_55397; ldv_55396: ixgbe_write_reg(hw, 18692U, (u32 )i); ixgbe_write_reg(hw, 18696U, 0U); i = (u8 )((int )i + 1); ldv_55397: ; if ((int )((signed char )i) >= 0) { goto ldv_55396; } else { } i = 0U; goto ldv_55400; ldv_55399: max_credits = (u32 )*(max + (unsigned long )i); reg = max_credits << 12; reg = (u32 )*(refill + (unsigned long )i) | reg; reg = ((unsigned int )*(bwg_id + (unsigned long )i) << 9) | reg; if ((unsigned int )*(prio_type + (unsigned long )i) == 1U) { reg = reg | 1073741824U; } else { } if ((unsigned int )*(prio_type + (unsigned long )i) == 2U) { reg = reg | 2147483648U; } else { } ixgbe_write_reg(hw, (u32 )(((int )i + 4676) * 4), reg); i = (u8 )((int )i + 1); ldv_55400: ; if ((unsigned int )i <= 7U) { goto ldv_55399; } else { } reg = 17U; ixgbe_write_reg(hw, 18688U, reg); return (0); } } s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type , u8 *prio_tc ) { u32 reg ; u8 i ; { reg = 16777568U; ixgbe_write_reg(hw, 52480U, reg); reg = 0U; i = 0U; goto ldv_55413; ldv_55412: reg = (u32 )((int )*(prio_tc + (unsigned long )i) << (int )i * 3) | reg; i = (u8 )((int )i + 1); ldv_55413: ; if ((unsigned int )i <= 7U) { goto ldv_55412; } else { } ixgbe_write_reg(hw, 51200U, reg); i = 0U; goto ldv_55416; ldv_55415: reg = (u32 )*(refill + (unsigned long )i); reg = ((unsigned int )*(max + (unsigned long )i) << 12) | reg; reg = ((unsigned int )*(bwg_id + (unsigned long )i) << 9) | reg; if ((unsigned int )*(prio_type + (unsigned long )i) == 1U) { reg = reg | 1073741824U; } else { } if ((unsigned int )*(prio_type + (unsigned long )i) == 2U) { reg = reg | 2147483648U; } else { } ixgbe_write_reg(hw, (u32 )(((int )i + 13128) * 4), reg); i = (u8 )((int )i + 1); ldv_55416: ; if ((unsigned int )i <= 7U) { goto ldv_55415; } else { } reg = 16777504U; ixgbe_write_reg(hw, 52480U, reg); return (0); } } s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw , u8 pfc_en , u8 *prio_tc ) { u32 i ; u32 j ; u32 fcrtl ; u32 reg ; u8 max_tc ; int enabled ; u32 tmp ; { max_tc = 0U; ixgbe_write_reg(hw, 15616U, 16U); reg = ixgbe_read_reg(hw, 17044U); reg = reg | 2U; reg = reg & 4294963203U; if ((unsigned int )hw->mac.type == 3U) { reg = (u32 )((int )pfc_en << 4) | reg; } else { } if ((unsigned int )pfc_en != 0U) { reg = reg | 4U; } else { } ixgbe_write_reg(hw, 17044U, reg); i = 0U; goto ldv_55429; ldv_55428: ; if ((int )*(prio_tc + (unsigned long )i) > (int )max_tc) { max_tc = *(prio_tc + (unsigned long )i); } else { } i = i + 1U; ldv_55429: ; if (i <= 7U) { goto ldv_55428; } else { } i = 0U; goto ldv_55436; ldv_55435: enabled = 0; j = 0U; goto ldv_55434; ldv_55433: ; if ((u32 )*(prio_tc + (unsigned long )j) == i && ((int )pfc_en >> (int )j) & 1) { enabled = 1; goto ldv_55432; } else { } j = j + 1U; ldv_55434: ; if (j <= 7U) { goto ldv_55433; } else { } ldv_55432: ; if (enabled != 0) { reg = (hw->fc.high_water[i] << 10) | 2147483648U; fcrtl = (hw->fc.low_water[i] << 10) | 2147483648U; ixgbe_write_reg(hw, (i + 3208U) * 4U, fcrtl); } else { tmp = ixgbe_read_reg(hw, (i + 3840U) * 4U); reg = tmp - 32U; ixgbe_write_reg(hw, (i + 3208U) * 4U, 0U); } ixgbe_write_reg(hw, (i + 3224U) * 4U, reg); i = i + 1U; ldv_55436: ; if ((u32 )max_tc >= i) { goto ldv_55435; } else { } goto ldv_55439; ldv_55438: ixgbe_write_reg(hw, (i + 3208U) * 4U, 0U); ixgbe_write_reg(hw, (i + 3224U) * 4U, 0U); i = i + 1U; ldv_55439: ; if (i <= 7U) { goto ldv_55438; } else { } reg = (u32 )((int )hw->fc.pause_time * 65537); i = 0U; goto ldv_55442; ldv_55441: ixgbe_write_reg(hw, (i + 3200U) * 4U, reg); i = i + 1U; ldv_55442: ; if (i <= 3U) { goto ldv_55441; } else { } ixgbe_write_reg(hw, 12960U, (unsigned int )hw->fc.pause_time / 2U); return (0); } } static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw ) { u32 reg ; u8 i ; { reg = 0U; i = 0U; i = 0U; goto ldv_55450; ldv_55449: reg = (u32 )((int )((unsigned int )i / 4U) * 16843009); ixgbe_write_reg(hw, (u32 )(((int )i + 2240) * 4), reg); i = (u8 )((int )i + 1); ldv_55450: ; if ((unsigned int )i <= 31U) { goto ldv_55449; } else { } i = 0U; goto ldv_55453; ldv_55452: ; if ((unsigned int )i <= 7U) { reg = 0U; } else if ((unsigned int )i <= 15U) { reg = 16843009U; } else if ((unsigned int )i <= 19U) { reg = 33686018U; } else if ((unsigned int )i <= 23U) { reg = 50529027U; } else if ((unsigned int )i <= 25U) { reg = 67372036U; } else if ((unsigned int )i <= 27U) { reg = 84215045U; } else if ((unsigned int )i <= 29U) { reg = 101058054U; } else { reg = 117901063U; } ixgbe_write_reg(hw, (u32 )(((int )i + 8576) * 4), reg); i = (u8 )((int )i + 1); ldv_55453: ; if ((unsigned int )i <= 31U) { goto ldv_55452; } else { } return (0); } } s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw , u8 pfc_en , u16 *refill , u16 *max , u8 *bwg_id , u8 *prio_type , u8 *prio_tc ) { { ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); ixgbe_dcb_config_pfc_82599(hw, (int )pfc_en, prio_tc); ixgbe_dcb_config_tc_stats_82599(hw); return (0); } } bool ldv_queue_work_on_726(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_727(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_728(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_729(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_730(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_736(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_742(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_744(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_746(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_747(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_748(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_749(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_750(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_751(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_752(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_753(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_773(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_775(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_774(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_777(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_776(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_783(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_800(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; __inline static void *kzalloc(size_t size , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_791(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_799(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_793(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_789(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_797(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_798(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_794(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_795(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_796(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; extern u8 dcb_getapp(struct net_device * , struct dcb_app * ) ; extern int dcb_ieee_setapp(struct net_device * , struct dcb_app * ) ; extern int dcb_ieee_delapp(struct net_device * , struct dcb_app * ) ; static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter , int tc_max ) { struct ixgbe_dcb_config *scfg ; struct ixgbe_dcb_config *dcfg ; struct tc_configuration *src ; struct tc_configuration *dst ; int i ; int j ; int tx ; int rx ; int changes ; struct dcb_app app ; u8 up___0 ; u8 tmp ; { scfg = & adapter->temp_dcb_cfg; dcfg = & adapter->dcb_cfg; src = (struct tc_configuration *)0; dst = (struct tc_configuration *)0; tx = 0; rx = 1; changes = 0; app.selector = 0U; app.priority = (unsigned char)0; app.protocol = 35078U; tmp = dcb_getapp(adapter->netdev, & app); up___0 = tmp; if ((unsigned int )up___0 != 0U && (((int )up___0 >> (int )adapter->fcoe.up) & 1) == 0) { changes = changes | 16; } else { } i = 1; goto ldv_55465; ldv_55464: src = (struct tc_configuration *)(& scfg->tc_config) + ((unsigned long )i + 0xffffffffffffffffUL); dst = (struct tc_configuration *)(& dcfg->tc_config) + ((unsigned long )i + 0xffffffffffffffffUL); if ((unsigned int )dst->path[tx].prio_type != (unsigned int )src->path[tx].prio_type) { dst->path[tx].prio_type = src->path[tx].prio_type; changes = changes | 8; } else { } if ((int )dst->path[tx].bwg_id != (int )src->path[tx].bwg_id) { dst->path[tx].bwg_id = src->path[tx].bwg_id; changes = changes | 8; } else { } if ((int )dst->path[tx].bwg_percent != (int )src->path[tx].bwg_percent) { dst->path[tx].bwg_percent = src->path[tx].bwg_percent; changes = changes | 8; } else { } if ((int )dst->path[tx].up_to_tc_bitmap != (int )src->path[tx].up_to_tc_bitmap) { dst->path[tx].up_to_tc_bitmap = src->path[tx].up_to_tc_bitmap; changes = changes | 26; } else { } if ((unsigned int )dst->path[rx].prio_type != (unsigned int )src->path[rx].prio_type) { dst->path[rx].prio_type = src->path[rx].prio_type; changes = changes | 4; } else { } if ((int )dst->path[rx].bwg_id != (int )src->path[rx].bwg_id) { dst->path[rx].bwg_id = src->path[rx].bwg_id; changes = changes | 4; } else { } if ((int )dst->path[rx].bwg_percent != (int )src->path[rx].bwg_percent) { dst->path[rx].bwg_percent = src->path[rx].bwg_percent; changes = changes | 4; } else { } if ((int )dst->path[rx].up_to_tc_bitmap != (int )src->path[rx].up_to_tc_bitmap) { dst->path[rx].up_to_tc_bitmap = src->path[rx].up_to_tc_bitmap; changes = changes | 22; } else { } i = i + 1; ldv_55465: ; if (tc_max + 1 > i) { goto ldv_55464; } else { } i = 11; goto ldv_55468; ldv_55467: j = i + -11; if ((int )dcfg->bw_percentage[tx][j] != (int )scfg->bw_percentage[tx][j]) { dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j]; changes = changes | 8; } else { } if ((int )dcfg->bw_percentage[rx][j] != (int )scfg->bw_percentage[rx][j]) { dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j]; changes = changes | 4; } else { } i = i + 1; ldv_55468: ; if (i <= 18) { goto ldv_55467; } else { } i = 1; goto ldv_55471; ldv_55470: j = i + -1; if ((unsigned int )dcfg->tc_config[j].dcb_pfc != (unsigned int )scfg->tc_config[j].dcb_pfc) { dcfg->tc_config[j].dcb_pfc = scfg->tc_config[j].dcb_pfc; changes = changes | 2; } else { } i = i + 1; ldv_55471: ; if (i <= 8) { goto ldv_55470; } else { } if ((int )dcfg->pfc_mode_enable != (int )scfg->pfc_mode_enable) { dcfg->pfc_mode_enable = scfg->pfc_mode_enable; changes = changes | 2; } else { } return (changes); } } static u8 ixgbe_dcbnl_get_state(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; return ((adapter->flags & 4096U) != 0U); } } static u8 ixgbe_dcbnl_set_state(struct net_device *netdev , u8 state ) { struct ixgbe_adapter *adapter ; void *tmp ; int tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; if (((int )adapter->dcbx_cap & 4) == 0) { return (1U); } else { } if (((unsigned int )state != 0U) ^ ((adapter->flags & 4096U) == 0U)) { return (0U); } else { } tmp___0 = ixgbe_setup_tc(netdev, (unsigned int )state != 0U ? (int )adapter->dcb_cfg.num_tcs.pg_tcs : 0); return (tmp___0 != 0); } } static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev , u8 *perm_addr ) { struct ixgbe_adapter *adapter ; void *tmp ; int i ; int j ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; memset((void *)perm_addr, 255, 32UL); i = 0; goto ldv_55490; ldv_55489: *(perm_addr + (unsigned long )i) = adapter->hw.mac.perm_addr[i]; i = i + 1; ldv_55490: ; if ((int )netdev->addr_len > i) { goto ldv_55489; } else { } switch ((unsigned int )adapter->hw.mac.type) { case 2U: ; case 3U: ; case 4U: j = 0; goto ldv_55496; ldv_55495: *(perm_addr + (unsigned long )i) = adapter->hw.mac.san_addr[j]; j = j + 1; i = i + 1; ldv_55496: ; if ((int )netdev->addr_len > j) { goto ldv_55495; } else { } goto ldv_55498; default: ; goto ldv_55498; } ldv_55498: ; return; } } static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev , int tc , u8 prio , u8 bwg_id , u8 bw_pct , u8 up_map ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; if ((unsigned int )prio != 255U) { adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = (enum strict_prio_type )prio; } else { } if ((unsigned int )bwg_id != 255U) { adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; } else { } if ((unsigned int )bw_pct != 255U) { adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = bw_pct; } else { } if ((unsigned int )up_map != 255U) { adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = up_map; } else { } return; } } static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev , int bwg_id , u8 bw_pct ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; return; } } static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev , int tc , u8 prio , u8 bwg_id , u8 bw_pct , u8 up_map ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; if ((unsigned int )prio != 255U) { adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = (enum strict_prio_type )prio; } else { } if ((unsigned int )bwg_id != 255U) { adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; } else { } if ((unsigned int )bw_pct != 255U) { adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = bw_pct; } else { } if ((unsigned int )up_map != 255U) { adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = up_map; } else { } return; } } static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev , int bwg_id , u8 bw_pct ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; return; } } static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev , int tc , u8 *prio , u8 *bwg_id , u8 *bw_pct , u8 *up_map ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; *prio = (u8 )adapter->dcb_cfg.tc_config[tc].path[0].prio_type; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; return; } } static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev , int bwg_id , u8 *bw_pct ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; return; } } static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev , int tc , u8 *prio , u8 *bwg_id , u8 *bw_pct , u8 *up_map ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; *prio = (u8 )adapter->dcb_cfg.tc_config[tc].path[1].prio_type; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; return; } } static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev , int bwg_id , u8 *bw_pct ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; return; } } static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev , int priority , u8 setting ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = (enum dcb_pfc_type )setting; if ((unsigned int )adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != (unsigned int )adapter->dcb_cfg.tc_config[priority].dcb_pfc) { adapter->temp_dcb_cfg.pfc_mode_enable = 1; } else { } return; } } static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev , int priority , u8 *setting ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; *setting = (u8 )adapter->dcb_cfg.tc_config[priority].dcb_pfc; return; } } static void ixgbe_dcbnl_devreset(struct net_device *dev ) { struct ixgbe_adapter *adapter ; void *tmp ; int tmp___0 ; bool tmp___1 ; bool tmp___2 ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; goto ldv_55577; ldv_55576: usleep_range(1000UL, 2000UL); ldv_55577: tmp___0 = test_and_set_bit(1L, (unsigned long volatile *)(& adapter->state)); if (tmp___0 != 0) { goto ldv_55576; } else { } tmp___1 = netif_running((struct net_device const *)dev); if ((int )tmp___1) { (*((dev->netdev_ops)->ndo_stop))(dev); } else { } ixgbe_clear_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter); tmp___2 = netif_running((struct net_device const *)dev); if ((int )tmp___2) { (*((dev->netdev_ops)->ndo_open))(dev); } else { } clear_bit(1L, (unsigned long volatile *)(& adapter->state)); return; } } static u8 ixgbe_dcbnl_set_all(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_dcb_config *dcb_cfg ; struct ixgbe_hw *hw ; int ret ; int i ; int tmp___0 ; u16 refill[8U] ; u16 max[8U] ; u8 bwg_id[8U] ; u8 prio_type[8U] ; u8 prio_tc[8U] ; int max_frame ; int _max1 ; int _max2 ; u8 pfc_en ; u8 prio_tc___0[8U] ; struct dcb_app app ; u8 up___0 ; u8 tmp___1 ; int tmp___2 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; dcb_cfg = & adapter->dcb_cfg; hw = & adapter->hw; ret = 1; if (((int )adapter->dcbx_cap & 4) == 0) { return (1U); } else { } tmp___0 = ixgbe_copy_dcb_cfg(adapter, 8); adapter->dcb_set_bitmap = (u8 )((int )((signed char )adapter->dcb_set_bitmap) | (int )((signed char )tmp___0)); if ((unsigned int )adapter->dcb_set_bitmap == 0U) { return (1U); } else { } if (((int )adapter->dcb_set_bitmap & 12) != 0) { max_frame = (int )((adapter->netdev)->mtu + 18U); if (((adapter->netdev)->features & 2147483648ULL) != 0ULL) { _max1 = max_frame; _max2 = 3072; max_frame = _max1 > _max2 ? _max1 : _max2; } else { } ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, 0); ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, 1); ixgbe_dcb_unpack_refill(dcb_cfg, 0, (u16 *)(& refill)); ixgbe_dcb_unpack_max(dcb_cfg, (u16 *)(& max)); ixgbe_dcb_unpack_bwgid(dcb_cfg, 0, (u8 *)(& bwg_id)); ixgbe_dcb_unpack_prio(dcb_cfg, 0, (u8 *)(& prio_type)); ixgbe_dcb_unpack_map(dcb_cfg, 0, (u8 *)(& prio_tc)); ixgbe_dcb_hw_ets_config(hw, (u16 *)(& refill), (u16 *)(& max), (u8 *)(& bwg_id), (u8 *)(& prio_type), (u8 *)(& prio_tc)); i = 0; goto ldv_55597; ldv_55596: netdev_set_prio_tc_map(netdev, (int )((u8 )i), (int )prio_tc[i]); i = i + 1; ldv_55597: ; if (i <= 7) { goto ldv_55596; } else { } ret = 0; } else { } if (((int )adapter->dcb_set_bitmap & 2) != 0) { if ((int )dcb_cfg->pfc_mode_enable) { ixgbe_dcb_unpack_map(dcb_cfg, 0, (u8 *)(& prio_tc___0)); ixgbe_dcb_unpack_pfc(dcb_cfg, & pfc_en); ixgbe_dcb_hw_pfc_config(hw, (int )pfc_en, (u8 *)(& prio_tc___0)); } else { (*(hw->mac.ops.fc_enable))(hw); } ixgbe_set_rx_drop_en(adapter); ret = 2; } else { } if (((int )adapter->dcb_set_bitmap & 16) != 0) { app.selector = 0U; app.priority = (unsigned char)0; app.protocol = 35078U; tmp___1 = dcb_getapp(netdev, & app); up___0 = tmp___1; tmp___2 = ffs((int )up___0); adapter->fcoe.up = (unsigned int )((u8 )tmp___2) + 255U; ixgbe_dcbnl_devreset(netdev); ret = 0; } else { } adapter->dcb_set_bitmap = 0U; return ((u8 )ret); } } static u8 ixgbe_dcbnl_getcap(struct net_device *netdev , int capid , u8 *cap ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; switch (capid) { case 2: *cap = 1U; goto ldv_55610; case 3: *cap = 1U; goto ldv_55610; case 4: *cap = 0U; goto ldv_55610; case 5: *cap = 128U; goto ldv_55610; case 6: *cap = 128U; goto ldv_55610; case 7: *cap = 1U; goto ldv_55610; case 8: *cap = 0U; goto ldv_55610; case 9: *cap = adapter->dcbx_cap; goto ldv_55610; default: *cap = 0U; goto ldv_55610; } ldv_55610: ; return (0U); } } static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev , int tcid , u8 *num ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; if ((adapter->flags & 4096U) != 0U) { switch (tcid) { case 2: *num = adapter->dcb_cfg.num_tcs.pg_tcs; goto ldv_55626; case 3: *num = adapter->dcb_cfg.num_tcs.pfc_tcs; goto ldv_55626; default: ; return (-22); } ldv_55626: ; } else { return (-22); } return (0); } } static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev , int tcid , u8 num ) { { return (-22); } } static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; return ((u8 )adapter->dcb_cfg.pfc_mode_enable); } } static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev , u8 state ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; adapter->temp_dcb_cfg.pfc_mode_enable = (unsigned int )state != 0U; return; } } static int ixgbe_dcbnl_getapp(struct net_device *netdev , u8 idtype , u16 id ) { struct ixgbe_adapter *adapter ; void *tmp ; struct dcb_app app ; u8 tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; app.selector = idtype; app.priority = (unsigned char)0; app.protocol = id; if (((int )adapter->dcbx_cap & 4) == 0) { return (-22); } else { } tmp___0 = dcb_getapp(netdev, & app); return ((int )tmp___0); } } static int ixgbe_dcbnl_ieee_getets(struct net_device *dev , struct ieee_ets *ets ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ieee_ets *my_ets ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; my_ets = adapter->ixgbe_ieee_ets; ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; if ((unsigned long )my_ets == (unsigned long )((struct ieee_ets *)0)) { return (0); } else { } ets->cbs = my_ets->cbs; memcpy((void *)(& ets->tc_tx_bw), (void const *)(& my_ets->tc_tx_bw), 8UL); memcpy((void *)(& ets->tc_rx_bw), (void const *)(& my_ets->tc_rx_bw), 8UL); memcpy((void *)(& ets->tc_tsa), (void const *)(& my_ets->tc_tsa), 8UL); memcpy((void *)(& ets->prio_tc), (void const *)(& my_ets->prio_tc), 8UL); return (0); } } static int ixgbe_dcbnl_ieee_setets(struct net_device *dev , struct ieee_ets *ets ) { struct ixgbe_adapter *adapter ; void *tmp ; int max_frame ; int i ; int err ; __u8 max_tc ; __u8 map_chg ; void *tmp___0 ; int tmp___1 ; s32 tmp___2 ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; max_frame = (int )(dev->mtu + 18U); max_tc = 0U; map_chg = 0U; if (((int )adapter->dcbx_cap & 8) == 0) { return (-22); } else { } if ((unsigned long )adapter->ixgbe_ieee_ets == (unsigned long )((struct ieee_ets *)0)) { tmp___0 = kzalloc(59UL, 208U); adapter->ixgbe_ieee_ets = (struct ieee_ets *)tmp___0; if ((unsigned long )adapter->ixgbe_ieee_ets == (unsigned long )((struct ieee_ets *)0)) { return (-12); } else { } i = 0; goto ldv_55667; ldv_55666: (adapter->ixgbe_ieee_ets)->prio_tc[i] = 8U; i = i + 1; ldv_55667: ; if (i <= 7) { goto ldv_55666; } else { } ixgbe_dcb_read_rtrup2tc(& adapter->hw, (u8 *)(& (adapter->ixgbe_ieee_ets)->prio_tc)); } else { } i = 0; goto ldv_55670; ldv_55669: ; if ((int )ets->prio_tc[i] > (int )max_tc) { max_tc = ets->prio_tc[i]; } else { } if ((int )ets->prio_tc[i] != (int )(adapter->ixgbe_ieee_ets)->prio_tc[i]) { map_chg = 1U; } else { } i = i + 1; ldv_55670: ; if (i <= 7) { goto ldv_55669; } else { } memcpy((void *)adapter->ixgbe_ieee_ets, (void const *)ets, 59UL); if ((unsigned int )max_tc != 0U) { max_tc = (__u8 )((int )max_tc + 1); } else { } if ((int )adapter->dcb_cfg.num_tcs.pg_tcs < (int )max_tc) { return (-22); } else { } tmp___1 = netdev_get_num_tc(dev); if ((int )max_tc != tmp___1) { err = ixgbe_setup_tc(dev, (int )max_tc); if (err != 0) { return (err); } else { } } else if ((unsigned int )map_chg != 0U) { ixgbe_dcbnl_devreset(dev); } else { } tmp___2 = ixgbe_dcb_hw_ets(& adapter->hw, ets, max_frame); return (tmp___2); } } static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev , struct ieee_pfc *pfc ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ieee_pfc *my_pfc ; int i ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; my_pfc = adapter->ixgbe_ieee_pfc; pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; if ((unsigned long )my_pfc == (unsigned long )((struct ieee_pfc *)0)) { return (0); } else { } pfc->pfc_en = my_pfc->pfc_en; pfc->mbc = my_pfc->mbc; pfc->delay = my_pfc->delay; i = 0; goto ldv_55680; ldv_55679: pfc->requests[i] = adapter->stats.pxoffrxc[i]; pfc->indications[i] = adapter->stats.pxofftxc[i]; i = i + 1; ldv_55680: ; if (i <= 7) { goto ldv_55679; } else { } return (0); } } static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev , struct ieee_pfc *pfc ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; u8 *prio_tc ; int err ; void *tmp___0 ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; if (((int )adapter->dcbx_cap & 8) == 0) { return (-22); } else { } if ((unsigned long )adapter->ixgbe_ieee_pfc == (unsigned long )((struct ieee_pfc *)0)) { tmp___0 = kzalloc(136UL, 208U); adapter->ixgbe_ieee_pfc = (struct ieee_pfc *)tmp___0; if ((unsigned long )adapter->ixgbe_ieee_pfc == (unsigned long )((struct ieee_pfc *)0)) { return (-12); } else { } } else { } prio_tc = (u8 *)(& (adapter->ixgbe_ieee_ets)->prio_tc); memcpy((void *)adapter->ixgbe_ieee_pfc, (void const *)pfc, 136UL); if ((unsigned int )pfc->pfc_en != 0U) { err = ixgbe_dcb_hw_pfc_config(hw, (int )pfc->pfc_en, prio_tc); } else { err = (*(hw->mac.ops.fc_enable))(hw); } ixgbe_set_rx_drop_en(adapter); return (err); } } static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev , struct dcb_app *app ) { struct ixgbe_adapter *adapter ; void *tmp ; int err ; u8 app_mask ; u8 tmp___0 ; int vf ; struct vf_data_storage *vfinfo ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; if (((int )adapter->dcbx_cap & 8) == 0) { return (-22); } else { } err = dcb_ieee_setapp(dev, app); if (err != 0) { return (err); } else { } if ((unsigned int )app->selector == 1U && (unsigned int )app->protocol == 35078U) { tmp___0 = dcb_ieee_getapp_mask(dev, app); app_mask = tmp___0; if (((int )app_mask >> (int )adapter->fcoe.up) & 1) { return (0); } else { } adapter->fcoe.up = app->priority; ixgbe_dcbnl_devreset(dev); } else { } if ((unsigned int )app->selector == 1U && (unsigned int )app->protocol == 0U) { adapter->default_up = app->priority; vf = 0; goto ldv_55700; ldv_55699: vfinfo = adapter->vfinfo + (unsigned long )vf; if ((unsigned int )vfinfo->pf_qos == 0U) { ixgbe_set_vmvir(adapter, (int )vfinfo->pf_vlan, (int )app->priority, (u32 )vf); } else { } vf = vf + 1; ldv_55700: ; if ((unsigned int )vf < adapter->num_vfs) { goto ldv_55699; } else { } } else { } return (0); } } static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev , struct dcb_app *app ) { struct ixgbe_adapter *adapter ; void *tmp ; int err ; u8 app_mask ; u8 tmp___0 ; int tmp___1 ; int vf ; unsigned long app_mask___0 ; u8 tmp___2 ; int qos ; unsigned long tmp___3 ; int tmp___4 ; struct vf_data_storage *vfinfo ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; if (((int )adapter->dcbx_cap & 8) == 0) { return (-22); } else { } err = dcb_ieee_delapp(dev, app); if ((err == 0 && (unsigned int )app->selector == 1U) && (unsigned int )app->protocol == 35078U) { tmp___0 = dcb_ieee_getapp_mask(dev, app); app_mask = tmp___0; if (((int )app_mask >> (int )adapter->fcoe.up) & 1) { return (0); } else { } if ((unsigned int )app_mask != 0U) { tmp___1 = ffs((int )app_mask); adapter->fcoe.up = (unsigned int )((u8 )tmp___1) + 255U; } else { adapter->fcoe.up = 3U; } ixgbe_dcbnl_devreset(dev); } else { } if (((unsigned int )app->selector == 1U && (unsigned int )app->protocol == 0U) && (int )adapter->default_up == (int )app->priority) { tmp___2 = dcb_ieee_getapp_mask(dev, app); app_mask___0 = (unsigned long )tmp___2; if (app_mask___0 != 0UL) { tmp___3 = find_first_bit((unsigned long const *)(& app_mask___0), 8UL); tmp___4 = (int )tmp___3; } else { tmp___4 = 0; } qos = tmp___4; adapter->default_up = (u8 )qos; vf = 0; goto ldv_55714; ldv_55713: vfinfo = adapter->vfinfo + (unsigned long )vf; if ((unsigned int )vfinfo->pf_qos == 0U) { ixgbe_set_vmvir(adapter, (int )vfinfo->pf_vlan, (int )((u16 )qos), (u32 )vf); } else { } vf = vf + 1; ldv_55714: ; if ((unsigned int )vf < adapter->num_vfs) { goto ldv_55713; } else { } } else { } return (err); } } static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev ) { struct ixgbe_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; return (adapter->dcbx_cap); } } static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev , u8 mode ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ieee_ets ets ; struct ieee_pfc pfc ; int err ; u8 mask ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct ixgbe_adapter *)tmp; ets.willing = 0U; ets.ets_cap = (unsigned char)0; ets.cbs = (unsigned char)0; ets.tc_tx_bw[0] = (unsigned char)0; ets.tc_tx_bw[1] = (unsigned char)0; ets.tc_tx_bw[2] = (unsigned char)0; ets.tc_tx_bw[3] = (unsigned char)0; ets.tc_tx_bw[4] = (unsigned char)0; ets.tc_tx_bw[5] = (unsigned char)0; ets.tc_tx_bw[6] = (unsigned char)0; ets.tc_tx_bw[7] = (unsigned char)0; ets.tc_rx_bw[0] = (unsigned char)0; ets.tc_rx_bw[1] = (unsigned char)0; ets.tc_rx_bw[2] = (unsigned char)0; ets.tc_rx_bw[3] = (unsigned char)0; ets.tc_rx_bw[4] = (unsigned char)0; ets.tc_rx_bw[5] = (unsigned char)0; ets.tc_rx_bw[6] = (unsigned char)0; ets.tc_rx_bw[7] = (unsigned char)0; ets.tc_tsa[0] = (unsigned char)0; ets.tc_tsa[1] = (unsigned char)0; ets.tc_tsa[2] = (unsigned char)0; ets.tc_tsa[3] = (unsigned char)0; ets.tc_tsa[4] = (unsigned char)0; ets.tc_tsa[5] = (unsigned char)0; ets.tc_tsa[6] = (unsigned char)0; ets.tc_tsa[7] = (unsigned char)0; ets.prio_tc[0] = (unsigned char)0; ets.prio_tc[1] = (unsigned char)0; ets.prio_tc[2] = (unsigned char)0; ets.prio_tc[3] = (unsigned char)0; ets.prio_tc[4] = (unsigned char)0; ets.prio_tc[5] = (unsigned char)0; ets.prio_tc[6] = (unsigned char)0; ets.prio_tc[7] = (unsigned char)0; ets.tc_reco_bw[0] = (unsigned char)0; ets.tc_reco_bw[1] = (unsigned char)0; ets.tc_reco_bw[2] = (unsigned char)0; ets.tc_reco_bw[3] = (unsigned char)0; ets.tc_reco_bw[4] = (unsigned char)0; ets.tc_reco_bw[5] = (unsigned char)0; ets.tc_reco_bw[6] = (unsigned char)0; ets.tc_reco_bw[7] = (unsigned char)0; ets.tc_reco_tsa[0] = (unsigned char)0; ets.tc_reco_tsa[1] = (unsigned char)0; ets.tc_reco_tsa[2] = (unsigned char)0; ets.tc_reco_tsa[3] = (unsigned char)0; ets.tc_reco_tsa[4] = (unsigned char)0; ets.tc_reco_tsa[5] = (unsigned char)0; ets.tc_reco_tsa[6] = (unsigned char)0; ets.tc_reco_tsa[7] = (unsigned char)0; ets.reco_prio_tc[0] = (unsigned char)0; ets.reco_prio_tc[1] = (unsigned char)0; ets.reco_prio_tc[2] = (unsigned char)0; ets.reco_prio_tc[3] = (unsigned char)0; ets.reco_prio_tc[4] = (unsigned char)0; ets.reco_prio_tc[5] = (unsigned char)0; ets.reco_prio_tc[6] = (unsigned char)0; ets.reco_prio_tc[7] = (unsigned char)0; pfc.pfc_cap = 0U; pfc.pfc_en = (unsigned char)0; pfc.mbc = (unsigned char)0; pfc.delay = (unsigned short)0; pfc.requests[0] = 0ULL; pfc.requests[1] = 0ULL; pfc.requests[2] = 0ULL; pfc.requests[3] = 0ULL; pfc.requests[4] = 0ULL; pfc.requests[5] = 0ULL; pfc.requests[6] = 0ULL; pfc.requests[7] = 0ULL; pfc.indications[0] = 0ULL; pfc.indications[1] = 0ULL; pfc.indications[2] = 0ULL; pfc.indications[3] = 0ULL; pfc.indications[4] = 0ULL; pfc.indications[5] = 0ULL; pfc.indications[6] = 0ULL; pfc.indications[7] = 0ULL; err = 0; if ((((int )mode & 2) != 0 || (((int )mode & 8) != 0 && ((int )mode & 4) != 0)) || ((int )mode & 1) == 0) { return (1U); } else { } if ((int )adapter->dcbx_cap == (int )mode) { return (0U); } else { } adapter->dcbx_cap = mode; ets.ets_cap = 8U; pfc.pfc_cap = 8U; if (((int )mode & 8) != 0) { ixgbe_dcbnl_ieee_setets(dev, & ets); ixgbe_dcbnl_ieee_setpfc(dev, & pfc); } else if (((int )mode & 4) != 0) { mask = 30U; adapter->dcb_set_bitmap = (u8 )((int )adapter->dcb_set_bitmap | (int )mask); ixgbe_dcbnl_set_all(dev); } else { ixgbe_dcbnl_ieee_setets(dev, & ets); ixgbe_dcbnl_ieee_setpfc(dev, & pfc); err = ixgbe_setup_tc(dev, 0); } return (err != 0); } } struct dcbnl_rtnl_ops const dcbnl_ops = {& ixgbe_dcbnl_ieee_getets, & ixgbe_dcbnl_ieee_setets, 0, 0, 0, 0, 0, & ixgbe_dcbnl_ieee_getpfc, & ixgbe_dcbnl_ieee_setpfc, 0, & ixgbe_dcbnl_ieee_setapp, & ixgbe_dcbnl_ieee_delapp, 0, 0, & ixgbe_dcbnl_get_state, & ixgbe_dcbnl_set_state, & ixgbe_dcbnl_get_perm_hw_addr, & ixgbe_dcbnl_set_pg_tc_cfg_tx, & ixgbe_dcbnl_set_pg_bwg_cfg_tx, & ixgbe_dcbnl_set_pg_tc_cfg_rx, & ixgbe_dcbnl_set_pg_bwg_cfg_rx, & ixgbe_dcbnl_get_pg_tc_cfg_tx, & ixgbe_dcbnl_get_pg_bwg_cfg_tx, & ixgbe_dcbnl_get_pg_tc_cfg_rx, & ixgbe_dcbnl_get_pg_bwg_cfg_rx, & ixgbe_dcbnl_set_pfc_cfg, & ixgbe_dcbnl_get_pfc_cfg, & ixgbe_dcbnl_set_all, & ixgbe_dcbnl_getcap, & ixgbe_dcbnl_getnumtcs, & ixgbe_dcbnl_setnumtcs, & ixgbe_dcbnl_getpfcstate, & ixgbe_dcbnl_setpfcstate, 0, 0, 0, 0, 0, & ixgbe_dcbnl_getapp, 0, 0, & ixgbe_dcbnl_getdcbx, & ixgbe_dcbnl_setdcbx, 0, 0, 0, 0}; void ldv_initialize_dcbnl_rtnl_ops_13(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; { tmp = ldv_init_zalloc(3008UL); dcbnl_ops_group0 = (struct net_device *)tmp; tmp___0 = ldv_init_zalloc(4UL); dcbnl_ops_group1 = (struct dcb_app *)tmp___0; tmp___1 = ldv_init_zalloc(136UL); dcbnl_ops_group2 = (struct ieee_pfc *)tmp___1; tmp___2 = ldv_init_zalloc(59UL); dcbnl_ops_group3 = (struct ieee_ets *)tmp___2; return; } } void ldv_main_exported_13(void) { u8 *ldvarg356 ; void *tmp ; u8 *ldvarg355 ; void *tmp___0 ; u8 ldvarg366 ; int ldvarg371 ; int ldvarg385 ; u8 ldvarg388 ; int ldvarg373 ; u8 ldvarg353 ; int ldvarg368 ; int ldvarg391 ; u8 *ldvarg359 ; void *tmp___1 ; u8 *ldvarg382 ; void *tmp___2 ; u8 *ldvarg378 ; void *tmp___3 ; u8 ldvarg367 ; u8 *ldvarg384 ; void *tmp___4 ; int ldvarg358 ; int ldvarg375 ; int ldvarg354 ; u8 ldvarg361 ; u8 *ldvarg370 ; void *tmp___5 ; u8 ldvarg374 ; u8 *ldvarg386 ; void *tmp___6 ; u8 *ldvarg387 ; void *tmp___7 ; int ldvarg377 ; u8 *ldvarg376 ; void *tmp___8 ; int ldvarg364 ; u8 ldvarg360 ; u8 ldvarg365 ; u8 ldvarg392 ; int ldvarg379 ; u8 ldvarg362 ; u8 *ldvarg383 ; void *tmp___9 ; u8 ldvarg369 ; u8 *ldvarg363 ; void *tmp___10 ; u8 ldvarg380 ; u8 ldvarg393 ; int ldvarg381 ; u8 *ldvarg357 ; void *tmp___11 ; u16 ldvarg395 ; u8 ldvarg389 ; u8 ldvarg390 ; int ldvarg394 ; u8 ldvarg396 ; u8 *ldvarg372 ; void *tmp___12 ; int tmp___13 ; { tmp = ldv_init_zalloc(1UL); ldvarg356 = (u8 *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg355 = (u8 *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg359 = (u8 *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg382 = (u8 *)tmp___2; tmp___3 = ldv_init_zalloc(1UL); ldvarg378 = (u8 *)tmp___3; tmp___4 = ldv_init_zalloc(1UL); ldvarg384 = (u8 *)tmp___4; tmp___5 = ldv_init_zalloc(1UL); ldvarg370 = (u8 *)tmp___5; tmp___6 = ldv_init_zalloc(1UL); ldvarg386 = (u8 *)tmp___6; tmp___7 = ldv_init_zalloc(1UL); ldvarg387 = (u8 *)tmp___7; tmp___8 = ldv_init_zalloc(1UL); ldvarg376 = (u8 *)tmp___8; tmp___9 = ldv_init_zalloc(1UL); ldvarg383 = (u8 *)tmp___9; tmp___10 = ldv_init_zalloc(1UL); ldvarg363 = (u8 *)tmp___10; tmp___11 = ldv_init_zalloc(1UL); ldvarg357 = (u8 *)tmp___11; tmp___12 = ldv_init_zalloc(1UL); ldvarg372 = (u8 *)tmp___12; ldv_memset((void *)(& ldvarg366), 0, 1UL); ldv_memset((void *)(& ldvarg371), 0, 4UL); ldv_memset((void *)(& ldvarg385), 0, 4UL); ldv_memset((void *)(& ldvarg388), 0, 1UL); ldv_memset((void *)(& ldvarg373), 0, 4UL); ldv_memset((void *)(& ldvarg353), 0, 1UL); ldv_memset((void *)(& ldvarg368), 0, 4UL); ldv_memset((void *)(& ldvarg391), 0, 4UL); ldv_memset((void *)(& ldvarg367), 0, 1UL); ldv_memset((void *)(& ldvarg358), 0, 4UL); ldv_memset((void *)(& ldvarg375), 0, 4UL); ldv_memset((void *)(& ldvarg354), 0, 4UL); ldv_memset((void *)(& ldvarg361), 0, 1UL); ldv_memset((void *)(& ldvarg374), 0, 1UL); ldv_memset((void *)(& ldvarg377), 0, 4UL); ldv_memset((void *)(& ldvarg364), 0, 4UL); ldv_memset((void *)(& ldvarg360), 0, 1UL); ldv_memset((void *)(& ldvarg365), 0, 1UL); ldv_memset((void *)(& ldvarg392), 0, 1UL); ldv_memset((void *)(& ldvarg379), 0, 4UL); ldv_memset((void *)(& ldvarg362), 0, 1UL); ldv_memset((void *)(& ldvarg369), 0, 1UL); ldv_memset((void *)(& ldvarg380), 0, 1UL); ldv_memset((void *)(& ldvarg393), 0, 1UL); ldv_memset((void *)(& ldvarg381), 0, 4UL); ldv_memset((void *)(& ldvarg395), 0, 2UL); ldv_memset((void *)(& ldvarg389), 0, 1UL); ldv_memset((void *)(& ldvarg390), 0, 1UL); ldv_memset((void *)(& ldvarg394), 0, 4UL); ldv_memset((void *)(& ldvarg396), 0, 1UL); tmp___13 = __VERIFIER_nondet_int(); switch (tmp___13) { case 0: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_getapp(dcbnl_ops_group0, (int )ldvarg396, (int )ldvarg395); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 1: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_ieee_getets(dcbnl_ops_group0, dcbnl_ops_group3); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 2: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_ieee_setets(dcbnl_ops_group0, dcbnl_ops_group3); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 3: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_set_pfc_cfg(dcbnl_ops_group0, ldvarg394, (int )ldvarg393); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 4: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_set_pg_tc_cfg_rx(dcbnl_ops_group0, ldvarg391, (int )ldvarg390, (int )ldvarg389, (int )ldvarg392, (int )ldvarg388); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 5: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_get_perm_hw_addr(dcbnl_ops_group0, ldvarg387); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 6: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_get_pg_tc_cfg_tx(dcbnl_ops_group0, ldvarg385, ldvarg384, ldvarg383, ldvarg386, ldvarg382); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 7: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_set_pg_bwg_cfg_tx(dcbnl_ops_group0, ldvarg381, (int )ldvarg380); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 8: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_getpfcstate(dcbnl_ops_group0); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 9: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_get_pfc_cfg(dcbnl_ops_group0, ldvarg379, ldvarg378); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 10: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_get_pg_bwg_cfg_tx(dcbnl_ops_group0, ldvarg377, ldvarg376); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 11: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_getdcbx(dcbnl_ops_group0); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 12: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_setnumtcs(dcbnl_ops_group0, ldvarg375, (int )ldvarg374); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 13: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_get_pg_bwg_cfg_rx(dcbnl_ops_group0, ldvarg373, ldvarg372); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 14: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_ieee_delapp(dcbnl_ops_group0, dcbnl_ops_group1); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 15: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_getnumtcs(dcbnl_ops_group0, ldvarg371, ldvarg370); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 16: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_ieee_setapp(dcbnl_ops_group0, dcbnl_ops_group1); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 17: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_get_state(dcbnl_ops_group0); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 18: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_set_pg_tc_cfg_tx(dcbnl_ops_group0, ldvarg368, (int )ldvarg367, (int )ldvarg366, (int )ldvarg369, (int )ldvarg365); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 19: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_ieee_setpfc(dcbnl_ops_group0, dcbnl_ops_group2); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 20: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_ieee_getpfc(dcbnl_ops_group0, dcbnl_ops_group2); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 21: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_getcap(dcbnl_ops_group0, ldvarg364, ldvarg363); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 22: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_setpfcstate(dcbnl_ops_group0, (int )ldvarg362); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 23: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_set_all(dcbnl_ops_group0); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 24: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_set_state(dcbnl_ops_group0, (int )ldvarg361); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 25: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_setdcbx(dcbnl_ops_group0, (int )ldvarg360); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 26: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_get_pg_tc_cfg_rx(dcbnl_ops_group0, ldvarg358, ldvarg357, ldvarg356, ldvarg359, ldvarg355); ldv_state_variable_13 = 1; } else { } goto ldv_55781; case 27: ; if (ldv_state_variable_13 == 1) { ixgbe_dcbnl_set_pg_bwg_cfg_rx(dcbnl_ops_group0, ldvarg354, (int )ldvarg353); ldv_state_variable_13 = 1; } else { } goto ldv_55781; default: ldv_stop(); } ldv_55781: ; return; } } bool ldv_queue_work_on_773(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_774(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_775(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_776(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_777(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_783(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_789(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_791(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_793(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_794(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_795(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_796(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_797(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_798(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_799(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_800(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; bool ldv_queue_work_on_820(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_822(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_821(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_824(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_823(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_830(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_847(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; extern void *devm_kmalloc(struct device * , size_t , gfp_t ) ; __inline static void *devm_kzalloc(struct device *dev , size_t size , gfp_t gfp ) { void *tmp ; { tmp = devm_kmalloc(dev, size, gfp | 32768U); return (tmp); } } struct sk_buff *ldv_skb_clone_838(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_846(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_840(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_836(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_844(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_845(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_841(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_842(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_843(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; extern struct device *devm_hwmon_device_register_with_groups(struct device * , char const * , void * , struct attribute_group const ** ) ; static ssize_t ixgbe_hwmon_show_location(struct device *dev , struct device_attribute *attr , char *buf ) { struct hwmon_attr *ixgbe_attr ; struct device_attribute const *__mptr ; int tmp ; { __mptr = (struct device_attribute const *)attr; ixgbe_attr = (struct hwmon_attr *)__mptr; tmp = sprintf(buf, "loc%u\n", (int )(ixgbe_attr->sensor)->location); return ((ssize_t )tmp); } } static ssize_t ixgbe_hwmon_show_temp(struct device *dev , struct device_attribute *attr , char *buf ) { struct hwmon_attr *ixgbe_attr ; struct device_attribute const *__mptr ; unsigned int value ; int tmp ; { __mptr = (struct device_attribute const *)attr; ixgbe_attr = (struct hwmon_attr *)__mptr; (*((ixgbe_attr->hw)->mac.ops.get_thermal_sensor_data))(ixgbe_attr->hw); value = (unsigned int )(ixgbe_attr->sensor)->temp; value = value * 1000U; tmp = sprintf(buf, "%u\n", value); return ((ssize_t )tmp); } } static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev , struct device_attribute *attr , char *buf ) { struct hwmon_attr *ixgbe_attr ; struct device_attribute const *__mptr ; unsigned int value ; int tmp ; { __mptr = (struct device_attribute const *)attr; ixgbe_attr = (struct hwmon_attr *)__mptr; value = (unsigned int )(ixgbe_attr->sensor)->caution_thresh; value = value * 1000U; tmp = sprintf(buf, "%u\n", value); return ((ssize_t )tmp); } } static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev , struct device_attribute *attr , char *buf ) { struct hwmon_attr *ixgbe_attr ; struct device_attribute const *__mptr ; unsigned int value ; int tmp ; { __mptr = (struct device_attribute const *)attr; ixgbe_attr = (struct hwmon_attr *)__mptr; value = (unsigned int )(ixgbe_attr->sensor)->max_op_thresh; value = value * 1000U; tmp = sprintf(buf, "%u\n", value); return ((ssize_t )tmp); } } static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter , unsigned int offset , int type ) { int rc ; unsigned int n_attr ; struct hwmon_attr *ixgbe_attr ; struct lock_class_key __key ; { n_attr = (adapter->ixgbe_hwmon_buff)->n_hwmon; ixgbe_attr = (struct hwmon_attr *)(& (adapter->ixgbe_hwmon_buff)->hwmon_list) + (unsigned long )n_attr; switch (type) { case 0: ixgbe_attr->dev_attr.show = & ixgbe_hwmon_show_location; snprintf((char *)(& ixgbe_attr->name), 12UL, "temp%u_label", offset + 1U); goto ldv_55396; case 1: ixgbe_attr->dev_attr.show = & ixgbe_hwmon_show_temp; snprintf((char *)(& ixgbe_attr->name), 12UL, "temp%u_input", offset + 1U); goto ldv_55396; case 2: ixgbe_attr->dev_attr.show = & ixgbe_hwmon_show_cautionthresh; snprintf((char *)(& ixgbe_attr->name), 12UL, "temp%u_max", offset + 1U); goto ldv_55396; case 3: ixgbe_attr->dev_attr.show = & ixgbe_hwmon_show_maxopthresh; snprintf((char *)(& ixgbe_attr->name), 12UL, "temp%u_crit", offset + 1U); goto ldv_55396; default: rc = -1; return (rc); } ldv_55396: ixgbe_attr->sensor = (struct ixgbe_thermal_diode_data *)(& adapter->hw.mac.thermal_sensor_data.sensor) + (unsigned long )offset; ixgbe_attr->hw = & adapter->hw; ixgbe_attr->dev_attr.store = (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0; ixgbe_attr->dev_attr.attr.mode = 292U; ixgbe_attr->dev_attr.attr.name = (char const *)(& ixgbe_attr->name); ixgbe_attr->dev_attr.attr.key = & __key; (adapter->ixgbe_hwmon_buff)->attrs[n_attr] = & ixgbe_attr->dev_attr.attr; (adapter->ixgbe_hwmon_buff)->n_hwmon = (adapter->ixgbe_hwmon_buff)->n_hwmon + 1U; return (0); } } static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter ) { { return; } } void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter ) { { ixgbe_sysfs_del_adapter(adapter); return; } } int ixgbe_sysfs_init(struct ixgbe_adapter *adapter ) { struct hwmon_buff *ixgbe_hwmon ; struct device *hwmon_dev ; unsigned int i ; int rc ; s32 tmp ; void *tmp___0 ; long tmp___1 ; bool tmp___2 ; { rc = 0; if ((unsigned long )adapter->hw.mac.ops.init_thermal_sensor_thresh == (unsigned long )((s32 (*)(struct ixgbe_hw * ))0)) { goto exit; } else { } tmp = (*(adapter->hw.mac.ops.init_thermal_sensor_thresh))(& adapter->hw); if (tmp != 0) { goto exit; } else { } tmp___0 = devm_kzalloc(& (adapter->pdev)->dev, 1120UL, 208U); ixgbe_hwmon = (struct hwmon_buff *)tmp___0; if ((unsigned long )ixgbe_hwmon == (unsigned long )((struct hwmon_buff *)0)) { rc = -12; goto exit; } else { } adapter->ixgbe_hwmon_buff = ixgbe_hwmon; i = 0U; goto ldv_55418; ldv_55417: ; if ((unsigned int )adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0U) { goto ldv_55416; } else { } rc = ixgbe_add_hwmon_attr(adapter, i, 2); if (rc != 0) { goto exit; } else { } rc = ixgbe_add_hwmon_attr(adapter, i, 0); if (rc != 0) { goto exit; } else { } rc = ixgbe_add_hwmon_attr(adapter, i, 1); if (rc != 0) { goto exit; } else { } rc = ixgbe_add_hwmon_attr(adapter, i, 3); if (rc != 0) { goto exit; } else { } ldv_55416: i = i + 1U; ldv_55418: ; if (i <= 2U) { goto ldv_55417; } else { } ixgbe_hwmon->groups[0] = (struct attribute_group const *)(& ixgbe_hwmon->group); ixgbe_hwmon->group.attrs = (struct attribute **)(& ixgbe_hwmon->attrs); hwmon_dev = devm_hwmon_device_register_with_groups(& (adapter->pdev)->dev, "ixgbe", (void *)ixgbe_hwmon, (struct attribute_group const **)(& ixgbe_hwmon->groups)); tmp___2 = IS_ERR((void const *)hwmon_dev); if ((int )tmp___2) { tmp___1 = PTR_ERR((void const *)hwmon_dev); rc = (int )tmp___1; } else { } exit: ; return (rc); } } bool ldv_queue_work_on_820(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_821(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_822(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_823(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_824(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_830(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_836(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_838(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_840(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_841(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_842(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_843(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_844(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_845(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_846(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_847(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern char *kasprintf(gfp_t , char const * , ...) ; extern int sscanf(char const * , char const * , ...) ; extern size_t strlen(char const * ) ; extern int strncmp(char const * , char const * , __kernel_size_t ) ; bool ldv_queue_work_on_867(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_869(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_868(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_871(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_870(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_877(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_894(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; extern int simple_open(struct inode * , struct file * ) ; extern ssize_t simple_read_from_buffer(void * , size_t , loff_t * , void const * , size_t ) ; extern ssize_t simple_write_to_buffer(void * , size_t , loff_t * , void const * , size_t ) ; extern struct dentry *debugfs_create_file(char const * , umode_t , struct dentry * , void * , struct file_operations const * ) ; extern struct dentry *debugfs_create_dir(char const * , struct dentry * ) ; extern void debugfs_remove_recursive(struct dentry * ) ; struct sk_buff *ldv_skb_clone_885(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_893(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_887(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_883(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_891(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_892(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_888(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_889(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_890(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; static struct dentry *ixgbe_dbg_root ; static char ixgbe_dbg_reg_ops_buf[256U] = { '\000'}; static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp , char *buffer , size_t count , loff_t *ppos ) { struct ixgbe_adapter *adapter ; char *buf ; int len ; size_t tmp ; size_t tmp___0 ; ssize_t tmp___1 ; { adapter = (struct ixgbe_adapter *)filp->private_data; if (*ppos != 0LL) { return (0L); } else { } buf = kasprintf(208U, "%s: %s\n", (char *)(& (adapter->netdev)->name), (char *)(& ixgbe_dbg_reg_ops_buf)); if ((unsigned long )buf == (unsigned long )((char *)0)) { return (-12L); } else { } tmp = strlen((char const *)buf); if (tmp > count) { kfree((void const *)buf); return (-28L); } else { } tmp___0 = strlen((char const *)buf); tmp___1 = simple_read_from_buffer((void *)buffer, count, ppos, (void const *)buf, tmp___0); len = (int )tmp___1; kfree((void const *)buf); return ((ssize_t )len); } } static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp , char const *buffer , size_t count , loff_t *ppos ) { struct ixgbe_adapter *adapter ; int len ; ssize_t tmp ; u32 reg ; u32 value ; int cnt ; u32 reg___0 ; u32 value___0 ; int cnt___0 ; int tmp___0 ; int tmp___1 ; { adapter = (struct ixgbe_adapter *)filp->private_data; if (*ppos != 0LL) { return (0L); } else { } if (count > 255UL) { return (-28L); } else { } tmp = simple_write_to_buffer((void *)(& ixgbe_dbg_reg_ops_buf), 255UL, ppos, (void const *)buffer, count); len = (int )tmp; if (len < 0) { return ((ssize_t )len); } else { } ixgbe_dbg_reg_ops_buf[len] = 0; tmp___1 = strncmp((char const *)(& ixgbe_dbg_reg_ops_buf), "write", 5UL); if (tmp___1 == 0) { cnt = sscanf((char const *)(& ixgbe_dbg_reg_ops_buf) + 5U, "%x %x", & reg, & value); if (cnt == 2) { ixgbe_write_reg(& adapter->hw, reg, value); value = ixgbe_read_reg(& adapter->hw, reg); _dev_info((struct device const *)(& (adapter->pdev)->dev), "write: 0x%08x = 0x%08x\n", reg, value); } else { _dev_info((struct device const *)(& (adapter->pdev)->dev), "write \n"); } } else { tmp___0 = strncmp((char const *)(& ixgbe_dbg_reg_ops_buf), "read", 4UL); if (tmp___0 == 0) { cnt___0 = sscanf((char const *)(& ixgbe_dbg_reg_ops_buf) + 4U, "%x", & reg___0); if (cnt___0 == 1) { value___0 = ixgbe_read_reg(& adapter->hw, reg___0); _dev_info((struct device const *)(& (adapter->pdev)->dev), "read 0x%08x = 0x%08x\n", reg___0, value___0); } else { _dev_info((struct device const *)(& (adapter->pdev)->dev), "read \n"); } } else { _dev_info((struct device const *)(& (adapter->pdev)->dev), "Unknown command %s\n", (char *)(& ixgbe_dbg_reg_ops_buf)); _dev_info((struct device const *)(& (adapter->pdev)->dev), "Available commands:\n"); _dev_info((struct device const *)(& (adapter->pdev)->dev), " read \n"); _dev_info((struct device const *)(& (adapter->pdev)->dev), " write \n"); } } return ((ssize_t )count); } } static struct file_operations const ixgbe_dbg_reg_ops_fops = {& __this_module, 0, & ixgbe_dbg_reg_ops_read, & ixgbe_dbg_reg_ops_write, 0, 0, 0, 0, 0, 0, 0, 0, & simple_open, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static char ixgbe_dbg_netdev_ops_buf[256U] = { '\000'}; static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp , char *buffer , size_t count , loff_t *ppos ) { struct ixgbe_adapter *adapter ; char *buf ; int len ; size_t tmp ; size_t tmp___0 ; ssize_t tmp___1 ; { adapter = (struct ixgbe_adapter *)filp->private_data; if (*ppos != 0LL) { return (0L); } else { } buf = kasprintf(208U, "%s: %s\n", (char *)(& (adapter->netdev)->name), (char *)(& ixgbe_dbg_netdev_ops_buf)); if ((unsigned long )buf == (unsigned long )((char *)0)) { return (-12L); } else { } tmp = strlen((char const *)buf); if (tmp > count) { kfree((void const *)buf); return (-28L); } else { } tmp___0 = strlen((char const *)buf); tmp___1 = simple_read_from_buffer((void *)buffer, count, ppos, (void const *)buf, tmp___0); len = (int )tmp___1; kfree((void const *)buf); return ((ssize_t )len); } } static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp , char const *buffer , size_t count , loff_t *ppos ) { struct ixgbe_adapter *adapter ; int len ; ssize_t tmp ; int tmp___0 ; { adapter = (struct ixgbe_adapter *)filp->private_data; if (*ppos != 0LL) { return (0L); } else { } if (count > 255UL) { return (-28L); } else { } tmp = simple_write_to_buffer((void *)(& ixgbe_dbg_netdev_ops_buf), 255UL, ppos, (void const *)buffer, count); len = (int )tmp; if (len < 0) { return ((ssize_t )len); } else { } ixgbe_dbg_netdev_ops_buf[len] = 0; tmp___0 = strncmp((char const *)(& ixgbe_dbg_netdev_ops_buf), "tx_timeout", 10UL); if (tmp___0 == 0) { (*(((adapter->netdev)->netdev_ops)->ndo_tx_timeout))(adapter->netdev); _dev_info((struct device const *)(& (adapter->pdev)->dev), "tx_timeout called\n"); } else { _dev_info((struct device const *)(& (adapter->pdev)->dev), "Unknown command: %s\n", (char *)(& ixgbe_dbg_netdev_ops_buf)); _dev_info((struct device const *)(& (adapter->pdev)->dev), "Available commands:\n"); _dev_info((struct device const *)(& (adapter->pdev)->dev), " tx_timeout\n"); } return ((ssize_t )count); } } static struct file_operations const ixgbe_dbg_netdev_ops_fops = {& __this_module, 0, & ixgbe_dbg_netdev_ops_read, & ixgbe_dbg_netdev_ops_write, 0, 0, 0, 0, 0, 0, 0, 0, & simple_open, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter ) { char const *name ; char const *tmp ; struct dentry *pfile ; { tmp = pci_name((struct pci_dev const *)adapter->pdev); name = tmp; adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root); if ((unsigned long )adapter->ixgbe_dbg_adapter != (unsigned long )((struct dentry *)0)) { pfile = debugfs_create_file("reg_ops", 384, adapter->ixgbe_dbg_adapter, (void *)adapter, & ixgbe_dbg_reg_ops_fops); if ((unsigned long )pfile == (unsigned long )((struct dentry *)0)) { dev_err((struct device const *)(& (adapter->pdev)->dev), "debugfs reg_ops for %s failed\n", name); } else { } pfile = debugfs_create_file("netdev_ops", 384, adapter->ixgbe_dbg_adapter, (void *)adapter, & ixgbe_dbg_netdev_ops_fops); if ((unsigned long )pfile == (unsigned long )((struct dentry *)0)) { dev_err((struct device const *)(& (adapter->pdev)->dev), "debugfs netdev_ops for %s failed\n", name); } else { } } else { dev_err((struct device const *)(& (adapter->pdev)->dev), "debugfs entry for %s failed\n", name); } return; } } void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter ) { { debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); adapter->ixgbe_dbg_adapter = (struct dentry *)0; return; } } void ixgbe_dbg_init(void) { { ixgbe_dbg_root = debugfs_create_dir((char const *)(& ixgbe_driver_name), (struct dentry *)0); if ((unsigned long )ixgbe_dbg_root == (unsigned long )((struct dentry *)0)) { printk("\vixgbe: init of debugfs failed\n"); } else { } return; } } void ixgbe_dbg_exit(void) { { debugfs_remove_recursive(ixgbe_dbg_root); return; } } int ldv_retval_0 ; extern int ldv_release_12(void) ; extern int ldv_release_11(void) ; int ldv_retval_7 ; void ldv_file_operations_12(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); ixgbe_dbg_reg_ops_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); ixgbe_dbg_reg_ops_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_11(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); ixgbe_dbg_netdev_ops_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); ixgbe_dbg_netdev_ops_fops_group2 = (struct file *)tmp___0; return; } } void ldv_main_exported_11(void) { size_t ldvarg347 ; size_t ldvarg350 ; char *ldvarg348 ; void *tmp ; char *ldvarg351 ; void *tmp___0 ; loff_t *ldvarg346 ; void *tmp___1 ; loff_t *ldvarg349 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(1UL); ldvarg348 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg351 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(8UL); ldvarg346 = (loff_t *)tmp___1; tmp___2 = ldv_init_zalloc(8UL); ldvarg349 = (loff_t *)tmp___2; ldv_memset((void *)(& ldvarg347), 0, 8UL); ldv_memset((void *)(& ldvarg350), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_11 == 1) { ldv_retval_0 = simple_open(ixgbe_dbg_netdev_ops_fops_group1, ixgbe_dbg_netdev_ops_fops_group2); if (ldv_retval_0 == 0) { ldv_state_variable_11 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_55550; case 1: ; if (ldv_state_variable_11 == 1) { ixgbe_dbg_netdev_ops_write(ixgbe_dbg_netdev_ops_fops_group2, (char const *)ldvarg351, ldvarg350, ldvarg349); ldv_state_variable_11 = 1; } else { } if (ldv_state_variable_11 == 2) { ixgbe_dbg_netdev_ops_write(ixgbe_dbg_netdev_ops_fops_group2, (char const *)ldvarg351, ldvarg350, ldvarg349); ldv_state_variable_11 = 2; } else { } goto ldv_55550; case 2: ; if (ldv_state_variable_11 == 2) { ixgbe_dbg_netdev_ops_read(ixgbe_dbg_netdev_ops_fops_group2, ldvarg348, ldvarg347, ldvarg346); ldv_state_variable_11 = 2; } else { } goto ldv_55550; case 3: ; if (ldv_state_variable_11 == 2) { ldv_release_11(); ldv_state_variable_11 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_55550; default: ldv_stop(); } ldv_55550: ; return; } } void ldv_main_exported_12(void) { size_t ldvarg437 ; loff_t *ldvarg436 ; void *tmp ; size_t ldvarg440 ; char *ldvarg438 ; void *tmp___0 ; loff_t *ldvarg439 ; void *tmp___1 ; char *ldvarg441 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(8UL); ldvarg436 = (loff_t *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg438 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(8UL); ldvarg439 = (loff_t *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg441 = (char *)tmp___2; ldv_memset((void *)(& ldvarg437), 0, 8UL); ldv_memset((void *)(& ldvarg440), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_12 == 1) { ldv_retval_7 = simple_open(ixgbe_dbg_reg_ops_fops_group1, ixgbe_dbg_reg_ops_fops_group2); if (ldv_retval_7 == 0) { ldv_state_variable_12 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_55565; case 1: ; if (ldv_state_variable_12 == 1) { ixgbe_dbg_reg_ops_write(ixgbe_dbg_reg_ops_fops_group2, (char const *)ldvarg441, ldvarg440, ldvarg439); ldv_state_variable_12 = 1; } else { } if (ldv_state_variable_12 == 2) { ixgbe_dbg_reg_ops_write(ixgbe_dbg_reg_ops_fops_group2, (char const *)ldvarg441, ldvarg440, ldvarg439); ldv_state_variable_12 = 2; } else { } goto ldv_55565; case 2: ; if (ldv_state_variable_12 == 2) { ixgbe_dbg_reg_ops_read(ixgbe_dbg_reg_ops_fops_group2, ldvarg438, ldvarg437, ldvarg436); ldv_state_variable_12 = 2; } else { } goto ldv_55565; case 3: ; if (ldv_state_variable_12 == 2) { ldv_release_12(); ldv_state_variable_12 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_55565; default: ldv_stop(); } ldv_55565: ; return; } } bool ldv_queue_work_on_867(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_868(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_869(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_870(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_871(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_877(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_883(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_885(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_887(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_888(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_889(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_890(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_891(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_892(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_893(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_894(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static int atomic_dec_and_test(atomic_t *v ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0; sete %1": "+m" (v->counter), "=qm" (c): : "memory"); return ((int )((signed char )c) != 0); } } extern void _raw_spin_lock_bh(raw_spinlock_t * ) ; extern void _raw_spin_unlock_bh(raw_spinlock_t * ) ; __inline static void ldv_spin_lock_bh_905(spinlock_t *lock ) { { _raw_spin_lock_bh(& lock->__annonCompField18.rlock); return; } } __inline static void spin_lock_bh(spinlock_t *lock ) ; __inline static void ldv_spin_unlock_bh_909(spinlock_t *lock ) { { _raw_spin_unlock_bh(& lock->__annonCompField18.rlock); return; } } __inline static void spin_unlock_bh(spinlock_t *lock ) ; bool ldv_queue_work_on_914(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_916(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_915(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_918(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_917(struct workqueue_struct *ldv_func_arg1 ) ; extern void *__alloc_percpu(size_t , size_t ) ; extern void free_percpu(void * ) ; void *ldv_kmem_cache_alloc_924(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_941(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; __inline static void *kzalloc(size_t size , gfp_t flags ) ; extern int pci_bus_read_config_byte(struct pci_bus * , unsigned int , int , u8 * ) ; __inline static int pci_read_config_byte(struct pci_dev const *dev , int where , u8 *val ) { int tmp ; { tmp = pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); return (tmp); } } __inline static struct page *sg_page(struct scatterlist *sg ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (123), "i" (12UL)); ldv_26012: ; goto ldv_26012; } else { } tmp___0 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (124), "i" (12UL)); ldv_26013: ; goto ldv_26013; } else { } return ((struct page *)(sg->page_link & 0xfffffffffffffffcUL)); } } __inline static void *sg_virt(struct scatterlist *sg ) { struct page *tmp ; void *tmp___0 ; { tmp = sg_page(sg); tmp___0 = lowmem_page_address((struct page const *)tmp); return (tmp___0 + (unsigned long )sg->offset); } } extern struct scatterlist *sg_next(struct scatterlist * ) ; extern struct dma_pool *dma_pool_create(char const * , struct device * , size_t , size_t , size_t ) ; extern void dma_pool_destroy(struct dma_pool * ) ; void *ldv_dma_pool_alloc_942(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) ; extern void dma_pool_free(struct dma_pool * , void * , dma_addr_t ) ; extern void debug_dma_map_sg(struct device * , struct scatterlist * , int , int , int ) ; extern void debug_dma_unmap_sg(struct device * , struct scatterlist * , int , int ) ; __inline static dma_addr_t dma_map_single_attrs___0(struct device *dev , void *ptr , size_t size , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; dma_addr_t addr ; int tmp___0 ; long tmp___1 ; unsigned long tmp___2 ; unsigned long tmp___3 ; { tmp = get_dma_ops(dev); ops = tmp; kmemcheck_mark_initialized(ptr, (unsigned int )size); tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (19), "i" (12UL)); ldv_26790: ; goto ldv_26790; } else { } tmp___2 = __phys_addr((unsigned long )ptr); addr = (*(ops->map_page))(dev, (struct page *)-24189255811072L + (tmp___2 >> 12), (unsigned long )ptr & 4095UL, size, dir, attrs); tmp___3 = __phys_addr((unsigned long )ptr); debug_dma_map_page(dev, (struct page *)-24189255811072L + (tmp___3 >> 12), (unsigned long )ptr & 4095UL, size, (int )dir, addr, 1); return (addr); } } __inline static void dma_unmap_single_attrs___0(struct device *dev , dma_addr_t addr , size_t size , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (36), "i" (12UL)); ldv_26799: ; goto ldv_26799; } else { } if ((unsigned long )ops->unmap_page != (unsigned long )((void (*)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_page))(dev, addr, size, dir, attrs); } else { } debug_dma_unmap_page(dev, addr, size, (int )dir, 1); return; } } __inline static int dma_map_sg_attrs(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int i ; int ents ; struct scatterlist *s ; void *tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; { tmp = get_dma_ops(dev); ops = tmp; i = 0; s = sg; goto ldv_26812; ldv_26811: tmp___0 = sg_virt(s); kmemcheck_mark_initialized(tmp___0, s->length); i = i + 1; s = sg_next(s); ldv_26812: ; if (i < nents) { goto ldv_26811; } else { } tmp___1 = valid_dma_direction((int )dir); tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (56), "i" (12UL)); ldv_26814: ; goto ldv_26814; } else { } ents = (*(ops->map_sg))(dev, sg, nents, dir, attrs); tmp___3 = ldv__builtin_expect(ents < 0, 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (58), "i" (12UL)); ldv_26815: ; goto ldv_26815; } else { } debug_dma_map_sg(dev, sg, nents, ents, (int )dir); return (ents); } } __inline static void dma_unmap_sg_attrs(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (70), "i" (12UL)); ldv_26824: ; goto ldv_26824; } else { } debug_dma_unmap_sg(dev, sg, nents, (int )dir); if ((unsigned long )ops->unmap_sg != (unsigned long )((void (*)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_sg))(dev, sg, nents, dir, attrs); } else { } return; } } struct sk_buff *ldv_skb_clone_932(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_940(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_934(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_930(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_938(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_939(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; __inline static void skb_reset_transport_header(struct sk_buff *skb ) { { skb->transport_header = (int )((__u16 )((long )skb->data)) - (int )((__u16 )((long )skb->head)); return; } } __inline static void skb_set_transport_header(struct sk_buff *skb , int const offset ) { { skb_reset_transport_header(skb); skb->transport_header = (int )skb->transport_header + (int )((__u16 )offset); return; } } __inline static void skb_reset_network_header(struct sk_buff *skb ) { { skb->network_header = (int )((__u16 )((long )skb->data)) - (int )((__u16 )((long )skb->head)); return; } } __inline static void skb_set_network_header(struct sk_buff *skb , int const offset ) { { skb_reset_network_header(skb); skb->network_header = (int )skb->network_header + (int )((__u16 )offset); return; } } __inline static unsigned char *skb_mac_header(struct sk_buff const *skb ) { { return ((unsigned char *)skb->head + (unsigned long )skb->mac_header); } } struct sk_buff *ldv___netdev_alloc_skb_935(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_936(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_937(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; __inline static int __skb_linearize(struct sk_buff *skb ) { unsigned char *tmp ; { tmp = __pskb_pull_tail(skb, (int )skb->data_len); return ((unsigned long )tmp != (unsigned long )((unsigned char *)0U) ? 0 : -12); } } __inline static int skb_linearize(struct sk_buff *skb ) { int tmp___0 ; int tmp___1 ; bool tmp___2 ; { tmp___2 = skb_is_nonlinear((struct sk_buff const *)skb); if ((int )tmp___2) { tmp___0 = __skb_linearize(skb); tmp___1 = tmp___0; } else { tmp___1 = 0; } return (tmp___1); } } __inline static struct ethhdr *eth_hdr(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_mac_header(skb); return ((struct ethhdr *)tmp); } } extern void netdev_features_change(struct net_device * ) ; __inline static u32 ntoh24(u8 const *p ) { { return ((u32 )((((int )*p << 16) | ((int )*(p + 1UL) << 8)) | (int )*(p + 2UL))); } } __inline static void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp ) { { ddp->len = 0; ddp->err = 1U; ddp->udl = (u64 *)0ULL; ddp->udp = 0ULL; ddp->sgl = (struct scatterlist *)0; ddp->sgc = 0U; return; } } int ixgbe_fcoe_ddp_put(struct net_device *netdev , u16 xid ) { int len ; struct ixgbe_fcoe *fcoe ; struct ixgbe_adapter *adapter ; struct ixgbe_fcoe_ddp *ddp ; struct ixgbe_hw *hw ; u32 fcbuff ; void *tmp ; { if ((unsigned long )netdev == (unsigned long )((struct net_device *)0)) { return (0); } else { } if ((unsigned int )xid > 511U) { return (0); } else { } tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; fcoe = & adapter->fcoe; ddp = (struct ixgbe_fcoe_ddp *)(& fcoe->ddp) + (unsigned long )xid; if ((unsigned long )ddp->udl == (unsigned long )((u64 *)0ULL)) { return (0); } else { } hw = & adapter->hw; len = ddp->len; if (ddp->err == 0U) { goto skip_ddpinv; } else { } if ((unsigned int )hw->mac.type == 4U) { ixgbe_write_reg(hw, (u32 )(((int )xid + 10240) * 16), 0U); ixgbe_write_reg(hw, (u32 )((int )xid * 16 + 163852), (unsigned int )xid | 16384U); ixgbe_write_reg(hw, (u32 )((int )xid * 16 + 131080), 0U); ixgbe_write_reg(hw, (u32 )((int )xid * 16 + 131084), (unsigned int )xid | 16384U); ixgbe_write_reg(hw, (u32 )((int )xid * 16 + 131084), (unsigned int )xid | 32768U); fcbuff = ixgbe_read_reg(hw, (u32 )((int )xid * 16 + 131080)); } else { spin_lock_bh(& fcoe->lock); ixgbe_write_reg(hw, 20744U, 0U); ixgbe_write_reg(hw, 20752U, (unsigned int )xid | 16384U); ixgbe_write_reg(hw, 9240U, 0U); ixgbe_write_reg(hw, 9248U, (unsigned int )xid | 16384U); ixgbe_write_reg(hw, 9248U, (unsigned int )xid | 32768U); fcbuff = ixgbe_read_reg(hw, 9240U); spin_unlock_bh(& fcoe->lock); } if ((int )fcbuff & 1) { usleep_range(100UL, 150UL); } else { } skip_ddpinv: ; if ((unsigned long )ddp->sgl != (unsigned long )((struct scatterlist *)0)) { dma_unmap_sg_attrs(& (adapter->pdev)->dev, ddp->sgl, (int )ddp->sgc, 2, (struct dma_attrs *)0); } else { } if ((unsigned long )ddp->pool != (unsigned long )((struct dma_pool *)0)) { dma_pool_free(ddp->pool, (void *)ddp->udl, ddp->udp); ddp->pool = (struct dma_pool *)0; } else { } ixgbe_fcoe_clear_ddp(ddp); return (len); } } static int ixgbe_fcoe_ddp_setup(struct net_device *netdev , u16 xid , struct scatterlist *sgl , unsigned int sgc , int target_mode ) { struct ixgbe_adapter *adapter ; struct ixgbe_hw *hw ; struct ixgbe_fcoe *fcoe ; struct ixgbe_fcoe_ddp *ddp ; struct ixgbe_fcoe_ddp_pool *ddp_pool ; struct scatterlist *sg ; unsigned int i ; unsigned int j ; unsigned int dmacount ; unsigned int len ; unsigned int bufflen ; unsigned int firstoff ; unsigned int lastsize ; unsigned int thisoff ; unsigned int thislen ; u32 fcbuff ; u32 fcdmarw ; u32 fcfltrw ; u32 fcrxctl ; dma_addr_t addr ; void *tmp ; int tmp___0 ; int tmp___1 ; void const *__vpp_verify ; unsigned long __ptr ; int pscr_ret__ ; void const *__vpp_verify___0 ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___2 ; void *tmp___3 ; unsigned int _min1 ; unsigned int _min2 ; int tmp___4 ; { bufflen = 4096U; firstoff = 0U; thisoff = 0U; thislen = 0U; addr = 0ULL; if ((unsigned long )netdev == (unsigned long )((struct net_device *)0) || (unsigned long )sgl == (unsigned long )((struct scatterlist *)0)) { return (0); } else { } tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; if ((unsigned int )xid > 511U) { if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "xid=0x%x out-of-range\n", (int )xid); } else { } return (0); } else { } tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 != 0) { return (0); } else { tmp___1 = constant_test_bit(1L, (unsigned long const volatile *)(& adapter->state)); if (tmp___1 != 0) { return (0); } else { } } fcoe = & adapter->fcoe; ddp = (struct ixgbe_fcoe_ddp *)(& fcoe->ddp) + (unsigned long )xid; if ((unsigned long )ddp->sgl != (unsigned long )((struct scatterlist *)0)) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "xid 0x%x w/ non-null sgl=%p nents=%d\n", (int )xid, ddp->sgl, ddp->sgc); } else { } return (0); } else { } ixgbe_fcoe_clear_ddp(ddp); if ((unsigned long )fcoe->ddp_pool == (unsigned long )((struct ixgbe_fcoe_ddp_pool *)0)) { if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "No ddp_pool resources allocated\n"); } else { } return (0); } else { } __vpp_verify = (void const *)0; __asm__ ("": "=r" (__ptr): "0" (fcoe->ddp_pool)); __preempt_count_add(1); __asm__ volatile ("": : : "memory"); __vpp_verify___0 = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_61764; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_61764; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_61764; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_61764; default: __bad_percpu_size(); } ldv_61764: pscr_ret__ = pfo_ret__; goto ldv_61770; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_61774; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_61774; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_61774; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_61774; default: __bad_percpu_size(); } ldv_61774: pscr_ret__ = pfo_ret_____0; goto ldv_61770; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_61783; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_61783; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_61783; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_61783; default: __bad_percpu_size(); } ldv_61783: pscr_ret__ = pfo_ret_____1; goto ldv_61770; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_61792; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_61792; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_61792; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_61792; default: __bad_percpu_size(); } ldv_61792: pscr_ret__ = pfo_ret_____2; goto ldv_61770; default: __bad_size_call_parameter(); goto ldv_61770; } ldv_61770: ddp_pool = (struct ixgbe_fcoe_ddp_pool *)(__per_cpu_offset[pscr_ret__] + __ptr); if ((unsigned long )ddp_pool->pool == (unsigned long )((struct dma_pool *)0)) { if ((int )adapter->msg_enable & 1) { netdev_warn((struct net_device const *)adapter->netdev, "xid=0x%x no ddp pool for fcoe\n", (int )xid); } else { } goto out_noddp; } else { } tmp___2 = dma_map_sg_attrs(& (adapter->pdev)->dev, sgl, (int )sgc, 2, (struct dma_attrs *)0); dmacount = (unsigned int )tmp___2; if (dmacount == 0U) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "xid 0x%x DMA map error\n", (int )xid); } else { } goto out_noddp; } else { } tmp___3 = ldv_dma_pool_alloc_942(ddp_pool->pool, 32U, & ddp->udp); ddp->udl = (u64 *)tmp___3; if ((unsigned long )ddp->udl == (unsigned long )((u64 *)0ULL)) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "failed allocated ddp context\n"); } else { } goto out_noddp_unmap; } else { } ddp->pool = ddp_pool->pool; ddp->sgl = sgl; ddp->sgc = sgc; j = 0U; i = 0U; sg = sgl; goto ldv_61813; ldv_61812: addr = sg->dma_address; len = sg->dma_length; goto ldv_61810; ldv_61809: ; if (j > 255U) { ddp_pool->noddp = ddp_pool->noddp + 1ULL; goto out_noddp_free; } else { } thisoff = (bufflen - 1U) & (unsigned int )addr; _min1 = bufflen - thisoff; _min2 = len; thislen = _min1 < _min2 ? _min1 : _min2; if (j != 0U && thisoff != 0U) { goto out_noddp_free; } else { } if ((dmacount - 1U != i || thislen != len) && thislen + thisoff != bufflen) { goto out_noddp_free; } else { } *(ddp->udl + (unsigned long )j) = addr - (dma_addr_t )thisoff; if (j == 0U) { firstoff = thisoff; } else { } len = len - thislen; addr = (dma_addr_t )thislen + addr; j = j + 1U; ldv_61810: ; if (len != 0U) { goto ldv_61809; } else { } i = i + 1U; sg = sg_next(sg); ldv_61813: ; if (i < dmacount) { goto ldv_61812; } else { } lastsize = thisoff + thislen; if (lastsize == bufflen) { if (j > 255U) { ddp_pool->noddp_ext_buff = ddp_pool->noddp_ext_buff + 1ULL; goto out_noddp_free; } else { } *(ddp->udl + (unsigned long )j) = fcoe->extra_ddp_buffer_dma; j = j + 1U; lastsize = 1U; } else { } __asm__ volatile ("": : : "memory"); __preempt_count_sub(1); fcbuff = 0U; fcbuff = ((j << 8) & 65535U) | fcbuff; fcbuff = (firstoff << 16) | fcbuff; if (target_mode != 0) { fcbuff = fcbuff | 128U; } else { } fcbuff = fcbuff | 1U; fcdmarw = (u32 )xid; fcdmarw = fcdmarw | 16384U; fcdmarw = (lastsize << 16) | fcdmarw; fcfltrw = (u32 )xid; fcfltrw = fcfltrw | 16384U; hw = & adapter->hw; if (target_mode != 0) { tmp___4 = constant_test_bit(1L, (unsigned long const volatile *)(& fcoe->mode)); if (tmp___4 == 0) { set_bit(1L, (unsigned long volatile *)(& fcoe->mode)); fcrxctl = ixgbe_read_reg(hw, 20736U); fcrxctl = fcrxctl | 8U; ixgbe_write_reg(hw, 20736U, fcrxctl); } else { } } else { } if ((unsigned int )hw->mac.type == 4U) { ixgbe_write_reg(hw, (u32 )(((int )xid + 8192) * 16), (u32 )ddp->udp); ixgbe_write_reg(hw, (u32 )((int )xid * 16 + 131076), (u32 )(ddp->udp >> 32)); ixgbe_write_reg(hw, (u32 )((int )xid * 16 + 131080), fcbuff); ixgbe_write_reg(hw, (u32 )((int )xid * 16 + 131084), fcdmarw); ixgbe_write_reg(hw, (u32 )(((int )xid + 10240) * 16), 1U); ixgbe_write_reg(hw, (u32 )((int )xid * 16 + 163844), 0U); ixgbe_write_reg(hw, (u32 )((int )xid * 16 + 163852), fcfltrw); } else { spin_lock_bh(& fcoe->lock); ixgbe_write_reg(hw, 9232U, (u32 )ddp->udp); ixgbe_write_reg(hw, 9236U, (u32 )(ddp->udp >> 32)); ixgbe_write_reg(hw, 9240U, fcbuff); ixgbe_write_reg(hw, 9248U, fcdmarw); ixgbe_write_reg(hw, 20952U, 0U); ixgbe_write_reg(hw, 20744U, 1U); ixgbe_write_reg(hw, 20752U, fcfltrw); spin_unlock_bh(& fcoe->lock); } return (1); out_noddp_free: dma_pool_free(ddp->pool, (void *)ddp->udl, ddp->udp); ixgbe_fcoe_clear_ddp(ddp); out_noddp_unmap: dma_unmap_sg_attrs(& (adapter->pdev)->dev, sgl, (int )sgc, 2, (struct dma_attrs *)0); out_noddp: __asm__ volatile ("": : : "memory"); __preempt_count_sub(1); return (0); } } int ixgbe_fcoe_ddp_get(struct net_device *netdev , u16 xid , struct scatterlist *sgl , unsigned int sgc ) { int tmp ; { tmp = ixgbe_fcoe_ddp_setup(netdev, (int )xid, sgl, sgc, 0); return (tmp); } } int ixgbe_fcoe_ddp_target(struct net_device *netdev , u16 xid , struct scatterlist *sgl , unsigned int sgc ) { int tmp ; { tmp = ixgbe_fcoe_ddp_setup(netdev, (int )xid, sgl, sgc, 1); return (tmp); } } int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter , union ixgbe_adv_rx_desc *rx_desc , struct sk_buff *skb ) { int rc ; struct ixgbe_fcoe *fcoe ; struct ixgbe_fcoe_ddp *ddp ; struct fc_frame_header *fh ; struct fcoe_crc_eof *crc ; __le32 fcerr ; __le32 tmp ; __le32 ddp_err ; int ddp_max ; u32 fctl ; u16 xid ; struct ethhdr *tmp___0 ; __u16 tmp___1 ; __u16 tmp___2 ; __le32 tmp___3 ; unsigned char *tmp___4 ; { rc = -22; tmp = ixgbe_test_staterr(rx_desc, 7340032U); fcerr = tmp; if (fcerr == 1048576U) { skb->ip_summed = 0U; } else { skb->ip_summed = 1U; } tmp___0 = eth_hdr((struct sk_buff const *)skb); if ((unsigned int )tmp___0->h_proto == 129U) { fh = (struct fc_frame_header *)skb->data + 18U; } else { fh = (struct fc_frame_header *)skb->data + 14U; } fctl = ntoh24((u8 const *)(& fh->fh_f_ctl)); if ((fctl & 8388608U) != 0U) { tmp___1 = __fswab16((int )fh->fh_ox_id); xid = tmp___1; } else { tmp___2 = __fswab16((int )fh->fh_rx_id); xid = tmp___2; } ddp_max = 512; if ((unsigned int )adapter->hw.mac.type == 4U) { ddp_max = 2048; } else { } if ((int )xid >= ddp_max) { return (-22); } else { } fcoe = & adapter->fcoe; ddp = (struct ixgbe_fcoe_ddp *)(& fcoe->ddp) + (unsigned long )xid; if ((unsigned long )ddp->udl == (unsigned long )((u64 *)0ULL)) { return (-22); } else { } ddp_err = ixgbe_test_staterr(rx_desc, 2154823680U); if (ddp_err != 0U) { return (-22); } else { } tmp___3 = ixgbe_test_staterr(rx_desc, 48U); switch (tmp___3) { case 48U: ddp->len = (int )rx_desc->wb.lower.hi_dword.rss; rc = 0; goto ldv_61843; case 32U: dma_unmap_sg_attrs(& (adapter->pdev)->dev, ddp->sgl, (int )ddp->sgc, 2, (struct dma_attrs *)0); ddp->err = ddp_err; ddp->sgl = (struct scatterlist *)0; ddp->sgc = 0U; case 16U: ddp->len = (int )rx_desc->wb.lower.hi_dword.rss; if (ddp->len != 0) { rc = ddp->len; } else { } goto ldv_61843; case 0U: ; default: ; goto ldv_61843; } ldv_61843: ; if ((unsigned int )fh->fh_r_ctl == 1U && (fctl & 524288U) != 0U) { skb_linearize(skb); tmp___4 = skb_put(skb, 8U); crc = (struct fcoe_crc_eof *)tmp___4; crc->fcoe_eof = 66U; } else { } return (rc); } } int ixgbe_fso(struct ixgbe_ring *tx_ring , struct ixgbe_tx_buffer *first , u8 *hdr_len ) { struct sk_buff *skb ; struct fc_frame_header *fh ; u32 vlan_macip_lens ; u32 fcoe_sof_eof ; u32 mss_l4len_idx ; u8 sof ; u8 eof ; unsigned char *tmp ; bool tmp___0 ; unsigned char *tmp___1 ; unsigned char *tmp___2 ; bool tmp___3 ; unsigned char *tmp___4 ; int tmp___5 ; unsigned char *tmp___6 ; unsigned char *tmp___7 ; bool tmp___8 ; unsigned char *tmp___9 ; int tmp___10 ; int tmp___11 ; { skb = first->skb; fcoe_sof_eof = 0U; tmp___0 = skb_is_gso((struct sk_buff const *)skb); if ((int )tmp___0) { tmp___1 = skb_end_pointer((struct sk_buff const *)skb); if ((unsigned int )((struct skb_shared_info *)tmp___1)->gso_type != 32U) { tmp = skb_end_pointer((struct sk_buff const *)skb); dev_err((struct device const *)tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", (int )((struct skb_shared_info *)tmp)->gso_type); return (-22); } else { } } else { } skb_set_network_header(skb, (int const )skb->mac_len); skb_set_transport_header(skb, (int const )((unsigned int )skb->mac_len + 14U)); tmp___2 = skb_network_header((struct sk_buff const *)skb); sof = ((struct fcoe_hdr *)tmp___2)->fcoe_sof; switch ((int )sof) { case 45: fcoe_sof_eof = 32768U; goto ldv_61861; case 46: fcoe_sof_eof = 36864U; goto ldv_61861; case 53: ; goto ldv_61861; case 54: fcoe_sof_eof = 4096U; goto ldv_61861; default: dev_warn((struct device const *)tx_ring->dev, "unknown sof = 0x%x\n", (int )sof); return (-22); } ldv_61861: skb_copy_bits((struct sk_buff const *)skb, (int )(skb->len - 4U), (void *)(& eof), 1); switch ((int )eof) { case 65: fcoe_sof_eof = fcoe_sof_eof; goto ldv_61867; case 66: tmp___3 = skb_is_gso((struct sk_buff const *)skb); if ((int )tmp___3) { fcoe_sof_eof = fcoe_sof_eof | 16384U; } else { fcoe_sof_eof = fcoe_sof_eof | 1024U; } goto ldv_61867; case 73: fcoe_sof_eof = fcoe_sof_eof | 2048U; goto ldv_61867; case 80: fcoe_sof_eof = fcoe_sof_eof | 3072U; goto ldv_61867; default: dev_warn((struct device const *)tx_ring->dev, "unknown eof = 0x%x\n", (int )eof); return (-22); } ldv_61867: tmp___4 = skb_transport_header((struct sk_buff const *)skb); fh = (struct fc_frame_header *)tmp___4; if (((int )fh->fh_f_ctl[2] & 8) != 0) { fcoe_sof_eof = fcoe_sof_eof | 8192U; } else { } *hdr_len = 8U; tmp___8 = skb_is_gso((struct sk_buff const *)skb); if ((int )tmp___8) { tmp___5 = skb_transport_offset((struct sk_buff const *)skb); *hdr_len = (unsigned int )((int )*hdr_len + (int )((u8 )tmp___5)) + 24U; tmp___6 = skb_end_pointer((struct sk_buff const *)skb); tmp___7 = skb_end_pointer((struct sk_buff const *)skb); first->gso_segs = (unsigned short )((((skb->len - (unsigned int )*hdr_len) + (unsigned int )((struct skb_shared_info *)tmp___6)->gso_size) - 1U) / (unsigned int )((struct skb_shared_info *)tmp___7)->gso_size); first->bytecount = first->bytecount + (unsigned int )(((int )first->gso_segs + -1) * (int )*hdr_len); first->tx_flags = first->tx_flags | 2U; } else { } first->tx_flags = first->tx_flags | 136U; tmp___9 = skb_end_pointer((struct sk_buff const *)skb); mss_l4len_idx = (u32 )((int )((struct skb_shared_info *)tmp___9)->gso_size << 16); tmp___10 = skb_transport_offset((struct sk_buff const *)skb); vlan_macip_lens = (u32 )tmp___10 + 24U; tmp___11 = skb_transport_offset((struct sk_buff const *)skb); vlan_macip_lens = (u32 )((tmp___11 + -4) << 9) | vlan_macip_lens; vlan_macip_lens = (first->tx_flags & 4294901760U) | vlan_macip_lens; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, 32768U, mss_l4len_idx); return (0); } } static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe , unsigned int cpu ) { struct ixgbe_fcoe_ddp_pool *ddp_pool ; void const *__vpp_verify ; unsigned long __ptr ; { __vpp_verify = (void const *)0; __asm__ ("": "=r" (__ptr): "0" (fcoe->ddp_pool)); ddp_pool = (struct ixgbe_fcoe_ddp_pool *)(__per_cpu_offset[cpu] + __ptr); if ((unsigned long )ddp_pool->pool != (unsigned long )((struct dma_pool *)0)) { dma_pool_destroy(ddp_pool->pool); } else { } ddp_pool->pool = (struct dma_pool *)0; return; } } static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe , struct device *dev , unsigned int cpu ) { struct ixgbe_fcoe_ddp_pool *ddp_pool ; struct dma_pool *pool ; char pool_name[32U] ; void const *__vpp_verify ; unsigned long __ptr ; { snprintf((char *)(& pool_name), 32UL, "ixgbe_fcoe_ddp_%u", cpu); pool = dma_pool_create((char const *)(& pool_name), dev, 2048UL, 16UL, 4096UL); if ((unsigned long )pool == (unsigned long )((struct dma_pool *)0)) { return (-12); } else { } __vpp_verify = (void const *)0; __asm__ ("": "=r" (__ptr): "0" (fcoe->ddp_pool)); ddp_pool = (struct ixgbe_fcoe_ddp_pool *)(__per_cpu_offset[cpu] + __ptr); ddp_pool->pool = pool; ddp_pool->noddp = 0ULL; ddp_pool->noddp_ext_buff = 0ULL; return (0); } } void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter ) { struct ixgbe_ring_feature *fcoe ; struct ixgbe_hw *hw ; int i ; int fcoe_q ; int fcoe_i ; int fcoe_q_h ; int fcreta_size ; u32 etqf ; int fcoe_i_h ; { fcoe = (struct ixgbe_ring_feature *)(& adapter->ring_feature) + 4UL; hw = & adapter->hw; fcoe_q_h = 0; if (((adapter->netdev)->features & 536870912ULL) == 0ULL) { return; } else { } etqf = 2281736454U; if ((adapter->flags & 8388608U) != 0U) { etqf = etqf | 67108864U; etqf = (u32 )((int )adapter->ring_feature[1].offset << 20) | etqf; } else { } ixgbe_write_reg(hw, 20784U, etqf); ixgbe_write_reg(hw, 60424U, 0U); if ((adapter->flags & 2097152U) == 0U) { return; } else { } fcreta_size = 8; if ((unsigned int )adapter->hw.mac.type == 4U) { fcreta_size = 32; } else { } i = 0; goto ldv_61906; ldv_61905: ; if ((unsigned int )adapter->hw.mac.type == 4U) { fcoe_i_h = (int )fcoe->offset + (i + fcreta_size) % (int )fcoe->indices; fcoe_q_h = (int )(adapter->rx_ring[fcoe_i_h])->reg_idx; fcoe_q_h = (fcoe_q_h << 16) & 8323072; } else { } fcoe_i = (int )fcoe->offset + i % (int )fcoe->indices; fcoe_i = fcoe_i & 127; fcoe_q = (int )(adapter->rx_ring[fcoe_i])->reg_idx; fcoe_q = fcoe_q | fcoe_q_h; ixgbe_write_reg(hw, (u32 )((i + 15172) * 4), (u32 )fcoe_q); i = i + 1; ldv_61906: ; if (i < fcreta_size) { goto ldv_61905; } else { } ixgbe_write_reg(hw, 60672U, 1U); etqf = 2147518740U; if ((adapter->flags & 8388608U) != 0U) { etqf = etqf | 67108864U; etqf = (u32 )((int )adapter->ring_feature[1].offset << 20) | etqf; } else { } ixgbe_write_reg(hw, 20792U, etqf); fcoe_q = (int )(adapter->rx_ring[(int )fcoe->offset])->reg_idx; ixgbe_write_reg(hw, 60432U, (unsigned int )(fcoe_q << 16) | 2147483648U); ixgbe_write_reg(hw, 20736U, 128U); return; } } void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter ) { struct ixgbe_fcoe *fcoe ; int cpu ; int i ; int ddp_max ; unsigned int tmp ; { fcoe = & adapter->fcoe; if ((unsigned long )fcoe->ddp_pool == (unsigned long )((struct ixgbe_fcoe_ddp_pool *)0)) { return; } else { } ddp_max = 512; if ((unsigned int )adapter->hw.mac.type == 4U) { ddp_max = 2048; } else { } i = 0; goto ldv_61916; ldv_61915: ixgbe_fcoe_ddp_put(adapter->netdev, (int )((u16 )i)); i = i + 1; ldv_61916: ; if (i < ddp_max) { goto ldv_61915; } else { } cpu = -1; goto ldv_61919; ldv_61918: ixgbe_fcoe_dma_pool_free(fcoe, (unsigned int )cpu); ldv_61919: tmp = cpumask_next(cpu, cpu_possible_mask); cpu = (int )tmp; if (cpu < nr_cpu_ids) { goto ldv_61918; } else { } dma_unmap_single_attrs___0(& (adapter->pdev)->dev, fcoe->extra_ddp_buffer_dma, 4096UL, 2, (struct dma_attrs *)0); kfree((void const *)fcoe->extra_ddp_buffer); fcoe->extra_ddp_buffer = (void *)0; fcoe->extra_ddp_buffer_dma = 0ULL; return; } } int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter ) { struct ixgbe_fcoe *fcoe ; struct device *dev ; void *buffer ; dma_addr_t dma ; unsigned int cpu ; int tmp ; int err ; int tmp___0 ; { fcoe = & adapter->fcoe; dev = & (adapter->pdev)->dev; if ((unsigned long )fcoe->ddp_pool == (unsigned long )((struct ixgbe_fcoe_ddp_pool *)0)) { return (0); } else { } buffer = kzalloc(4096UL, 32U); if ((unsigned long )buffer == (unsigned long )((void *)0)) { return (-12); } else { } dma = dma_map_single_attrs___0(dev, buffer, 4096UL, 2, (struct dma_attrs *)0); tmp = dma_mapping_error(dev, dma); if (tmp != 0) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "failed to map extra DDP buffer\n"); } else { } kfree((void const *)buffer); return (-12); } else { } fcoe->extra_ddp_buffer = buffer; fcoe->extra_ddp_buffer_dma = dma; cpu = 4294967295U; goto ldv_61930; ldv_61931: tmp___0 = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); err = tmp___0; if (err == 0) { goto ldv_61930; } else { } if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "failed to alloc DDP pool on cpu:%d\n", cpu); } else { } ixgbe_free_fcoe_ddp_resources(adapter); return (-12); ldv_61930: cpu = cpumask_next((int )cpu, cpu_possible_mask); if ((unsigned int )nr_cpu_ids > cpu) { goto ldv_61931; } else { } return (0); } } static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter ) { struct ixgbe_fcoe *fcoe ; void *tmp ; { fcoe = & adapter->fcoe; if ((adapter->flags & 1048576U) == 0U) { return (-22); } else { } tmp = __alloc_percpu(24UL, 8UL); fcoe->ddp_pool = (struct ixgbe_fcoe_ddp_pool *)tmp; if ((unsigned long )fcoe->ddp_pool == (unsigned long )((struct ixgbe_fcoe_ddp_pool *)0)) { if ((int )adapter->msg_enable & 1) { netdev_err((struct net_device const *)adapter->netdev, "failed to allocate percpu DDP resources\n"); } else { } return (-12); } else { } (adapter->netdev)->fcoe_ddp_xid = 511U; if ((unsigned int )adapter->hw.mac.type == 4U) { (adapter->netdev)->fcoe_ddp_xid = 2047U; } else { } return (0); } } static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter ) { struct ixgbe_fcoe *fcoe ; { fcoe = & adapter->fcoe; (adapter->netdev)->fcoe_ddp_xid = 0U; if ((unsigned long )fcoe->ddp_pool == (unsigned long )((struct ixgbe_fcoe_ddp_pool *)0)) { return; } else { } free_percpu((void *)fcoe->ddp_pool); fcoe->ddp_pool = (struct ixgbe_fcoe_ddp_pool *)0; return; } } int ixgbe_fcoe_enable(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_fcoe *fcoe ; bool tmp___0 ; bool tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; fcoe = & adapter->fcoe; atomic_inc(& fcoe->refcnt); if ((adapter->flags & 1048576U) == 0U) { return (-22); } else { } if ((adapter->flags & 2097152U) != 0U) { return (-22); } else { } if ((int )adapter->msg_enable & 1) { netdev_info((struct net_device const *)adapter->netdev, "Enabling FCoE offload features.\n"); } else { } if ((adapter->flags & 8388608U) != 0U) { if (((int )adapter->msg_enable & 2) != 0) { netdev_warn((struct net_device const *)adapter->netdev, "Enabling FCoE on PF will disable legacy VFs\n"); } else { } } else { } tmp___0 = netif_running((struct net_device const *)netdev); if ((int )tmp___0) { (*((netdev->netdev_ops)->ndo_stop))(netdev); } else { } ixgbe_fcoe_ddp_enable(adapter); adapter->flags = adapter->flags | 2097152U; netdev->features = netdev->features | 2147483648ULL; netdev_features_change(netdev); ixgbe_clear_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter); tmp___1 = netif_running((struct net_device const *)netdev); if ((int )tmp___1) { (*((netdev->netdev_ops)->ndo_open))(netdev); } else { } return (0); } } int ixgbe_fcoe_disable(struct net_device *netdev ) { struct ixgbe_adapter *adapter ; void *tmp ; int tmp___0 ; bool tmp___1 ; bool tmp___2 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; tmp___0 = atomic_dec_and_test(& adapter->fcoe.refcnt); if (tmp___0 == 0) { return (-22); } else { } if ((adapter->flags & 2097152U) == 0U) { return (-22); } else { } if ((int )adapter->msg_enable & 1) { netdev_info((struct net_device const *)adapter->netdev, "Disabling FCoE offload features.\n"); } else { } tmp___1 = netif_running((struct net_device const *)netdev); if ((int )tmp___1) { (*((netdev->netdev_ops)->ndo_stop))(netdev); } else { } ixgbe_fcoe_ddp_disable(adapter); adapter->flags = adapter->flags & 4292870143U; netdev->features = netdev->features & 0xffffffff7fffffffULL; netdev_features_change(netdev); ixgbe_clear_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter); tmp___2 = netif_running((struct net_device const *)netdev); if ((int )tmp___2) { (*((netdev->netdev_ops)->ndo_open))(netdev); } else { } return (0); } } int ixgbe_fcoe_get_wwn(struct net_device *netdev , u64 *wwn , int type ) { u16 prefix ; struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_mac_info *mac ; bool tmp___0 ; { prefix = 65535U; tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; mac = & adapter->hw.mac; switch (type) { case 0: prefix = mac->wwnn_prefix; goto ldv_61959; case 1: prefix = mac->wwpn_prefix; goto ldv_61959; default: ; goto ldv_61959; } ldv_61959: ; if ((unsigned int )prefix != 65535U) { tmp___0 = is_valid_ether_addr((u8 const *)(& mac->san_addr)); if ((int )tmp___0) { *wwn = (((((((unsigned long long )prefix << 48) | ((unsigned long long )mac->san_addr[0] << 40)) | ((unsigned long long )mac->san_addr[1] << 32)) | ((unsigned long long )mac->san_addr[2] << 24)) | ((unsigned long long )mac->san_addr[3] << 16)) | ((unsigned long long )mac->san_addr[4] << 8)) | (unsigned long long )mac->san_addr[5]; return (0); } else { } } else { } return (-22); } } int ixgbe_fcoe_get_hbainfo(struct net_device *netdev , struct netdev_fcoe_hbainfo *info ) { struct ixgbe_adapter *adapter ; void *tmp ; struct ixgbe_hw *hw ; int i ; int pos ; u8 buf[8U] ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct ixgbe_adapter *)tmp; hw = & adapter->hw; if ((unsigned long )info == (unsigned long )((struct netdev_fcoe_hbainfo *)0)) { return (-22); } else { } if ((unsigned int )hw->mac.type != 2U && (unsigned int )hw->mac.type != 3U) { return (-22); } else { } snprintf((char *)(& info->manufacturer), 64UL, "Intel Corporation"); pos = pci_find_ext_capability(adapter->pdev, 3); if (pos != 0) { pos = pos + 4; i = 0; goto ldv_61972; ldv_61971: pci_read_config_byte((struct pci_dev const *)adapter->pdev, pos + i, (u8 *)(& buf) + (unsigned long )i); i = i + 1; ldv_61972: ; if (i <= 7) { goto ldv_61971; } else { } snprintf((char *)(& info->serial_number), 64UL, "%02X%02X%02X%02X%02X%02X%02X%02X", (int )buf[7], (int )buf[6], (int )buf[5], (int )buf[4], (int )buf[3], (int )buf[2], (int )buf[1], (int )buf[0]); } else { snprintf((char *)(& info->serial_number), 64UL, "Unknown"); } snprintf((char *)(& info->hardware_version), 64UL, "Rev %d", (int )hw->revision_id); snprintf((char *)(& info->driver_version), 64UL, "%s v%s", (char *)(& ixgbe_driver_name), (char const *)(& ixgbe_driver_version)); snprintf((char *)(& info->firmware_version), 64UL, "0x%08x", ((int )adapter->eeprom_verh << 16) | (int )adapter->eeprom_verl); if ((unsigned int )hw->mac.type == 2U) { snprintf((char *)(& info->model), 256UL, "Intel 82599"); } else { snprintf((char *)(& info->model), 256UL, "Intel X540"); } snprintf((char *)(& info->model_description), 256UL, "%s", (char *)(& ixgbe_default_device_descr)); return (0); } } u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter ) { int tmp ; { tmp = netdev_get_prio_tc_map((struct net_device const *)adapter->netdev, (u32 )adapter->fcoe.up); return ((u8 )tmp); } } __inline static void spin_lock_bh(spinlock_t *lock ) { { ldv_spin_lock(); ldv_spin_lock_bh_905(lock); return; } } __inline static void spin_unlock_bh(spinlock_t *lock ) { { ldv_spin_unlock(); ldv_spin_unlock_bh_909(lock); return; } } bool ldv_queue_work_on_914(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_915(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_916(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_9(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_917(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_9(2); return; } } bool ldv_queue_delayed_work_on_918(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_9(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_924(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_930(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_932(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_934(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_935(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_936(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_937(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_938(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_939(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_940(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_941(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_dma_pool_alloc_942(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static void ldv_error(void) { { ERROR: ; __VERIFIER_error(); } } bool ldv_is_err(void const *ptr ) { { return ((unsigned long )ptr > 2012UL); } } void *ldv_err_ptr(long error ) { { return ((void *)(2012L - error)); } } long ldv_ptr_err(void const *ptr ) { { return ((long )(2012UL - (unsigned long )ptr)); } } bool ldv_is_err_or_null(void const *ptr ) { bool tmp ; int tmp___0 ; { if ((unsigned long )ptr == (unsigned long )((void const *)0)) { tmp___0 = 1; } else { tmp = ldv_is_err(ptr); if ((int )tmp) { tmp___0 = 1; } else { tmp___0 = 0; } } return ((bool )tmp___0); } } int ldv_spin = 0; void ldv_check_alloc_flags(gfp_t flags ) { { if (ldv_spin != 0 && (flags & 16U) != 0U) { ldv_error(); } else { } return; } } extern struct page *ldv_some_page(void) ; struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags ) { struct page *tmp ; { if (ldv_spin != 0 && (flags & 16U) != 0U) { ldv_error(); } else { } tmp = ldv_some_page(); return (tmp); } } void ldv_check_alloc_nonatomic(void) { { if (ldv_spin != 0) { ldv_error(); } else { } return; } } void ldv_spin_lock(void) { { ldv_spin = 1; return; } } void ldv_spin_unlock(void) { { ldv_spin = 0; return; } } int ldv_spin_trylock(void) { int is_lock ; { is_lock = ldv_undef_int(); if (is_lock != 0) { return (0); } else { ldv_spin = 1; return (1); } } }